aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-03-16 19:29:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-16 19:29:25 -0400
commit7a6362800cb7d1d618a697a650c7aaed3eb39320 (patch)
tree087f9bc6c13ef1fad4b392c5cf9325cd28fa8523 /drivers
parent6445ced8670f37cfc2c5e24a9de9b413dbfc788d (diff)
parentceda86a108671294052cbf51660097b6534672f5 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1480 commits) bonding: enable netpoll without checking link status xfrm: Refcount destination entry on xfrm_lookup net: introduce rx_handler results and logic around that bonding: get rid of IFF_SLAVE_INACTIVE netdev->priv_flag bonding: wrap slave state work net: get rid of multiple bond-related netdevice->priv_flags bonding: register slave pointer for rx_handler be2net: Bump up the version number be2net: Copyright notice change. Update to Emulex instead of ServerEngines e1000e: fix kconfig for crc32 dependency netfilter ebtables: fix xt_AUDIT to work with ebtables xen network backend driver bonding: Improve syslog message at device creation time bonding: Call netif_carrier_off after register_netdevice bonding: Incorrect TX queue offset net_sched: fix ip_tos2prio xfrm: fix __xfrm_route_forward() be2net: Fix UDP packet detected status in RX compl Phonet: fix aligned-mode pipe socket buffer header reserve netxen: support for GbE port settings ... Fix up conflicts in drivers/staging/brcm80211/brcmsmac/wl_mac80211.c with the staging updates.
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/drbd/drbd_nl.c2
-rw-r--r--drivers/bluetooth/ath3k.c287
-rw-r--r--drivers/bluetooth/btusb.c13
-rw-r--r--drivers/bluetooth/hci_ldisc.c1
-rw-r--r--drivers/infiniband/core/addr.c31
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c22
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c22
-rw-r--r--drivers/infiniband/hw/nes/nes.c3
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c8
-rw-r--r--drivers/md/dm-log-userspace-transfer.c2
-rw-r--r--drivers/net/Kconfig90
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/atl1c/atl1c_hw.c15
-rw-r--r--drivers/net/atl1c/atl1c_hw.h43
-rw-r--r--drivers/net/atl1c/atl1c_main.c5
-rw-r--r--drivers/net/atl1e/atl1e_ethtool.c12
-rw-r--r--drivers/net/atl1e/atl1e_hw.c34
-rw-r--r--drivers/net/atl1e/atl1e_hw.h111
-rw-r--r--drivers/net/atl1e/atl1e_main.c10
-rw-r--r--drivers/net/atlx/atl1.c77
-rw-r--r--drivers/net/atlx/atl2.c2
-rw-r--r--drivers/net/ax88796.c810
-rw-r--r--drivers/net/benet/be.h55
-rw-r--r--drivers/net/benet/be_cmds.c202
-rw-r--r--drivers/net/benet/be_cmds.h96
-rw-r--r--drivers/net/benet/be_ethtool.c87
-rw-r--r--drivers/net/benet/be_hw.h110
-rw-r--r--drivers/net/benet/be_main.c620
-rw-r--r--drivers/net/bna/bnad.c108
-rw-r--r--drivers/net/bna/bnad.h2
-rw-r--r--drivers/net/bnx2.c16
-rw-r--r--drivers/net/bnx2.h6
-rw-r--r--drivers/net/bnx2x/bnx2x.h35
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c70
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h6
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.c137
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.h5
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c58
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h114
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c2527
-rw-r--r--drivers/net/bnx2x/bnx2x_link.h34
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c600
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h1
-rw-r--r--drivers/net/bonding/Makefile3
-rw-r--r--drivers/net/bonding/bond_3ad.c2
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/bonding/bond_main.c638
-rw-r--r--drivers/net/bonding/bond_procfs.c275
-rw-r--r--drivers/net/bonding/bond_sysfs.c23
-rw-r--r--drivers/net/bonding/bonding.h111
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/c_can/Kconfig15
-rw-r--r--drivers/net/can/c_can/Makefile8
-rw-r--r--drivers/net/can/c_can/c_can.c1158
-rw-r--r--drivers/net/can/c_can/c_can.h86
-rw-r--r--drivers/net/can/c_can/c_can_platform.c215
-rw-r--r--drivers/net/can/usb/esd_usb2.c6
-rw-r--r--drivers/net/cnic.c209
-rw-r--r--drivers/net/cnic.h2
-rw-r--r--drivers/net/cnic_if.h8
-rw-r--r--drivers/net/cs89x0.c19
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c5
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c1
-rw-r--r--drivers/net/davinci_emac.c2
-rw-r--r--drivers/net/dm9000.c7
-rw-r--r--drivers/net/e1000e/defines.h1
-rw-r--r--drivers/net/e1000e/e1000.h5
-rw-r--r--drivers/net/e1000e/ethtool.c92
-rw-r--r--drivers/net/e1000e/hw.h5
-rw-r--r--drivers/net/e1000e/ich8lan.c48
-rw-r--r--drivers/net/e1000e/lib.c4
-rw-r--r--drivers/net/e1000e/netdev.c129
-rw-r--r--drivers/net/e1000e/phy.c8
-rw-r--r--drivers/net/enic/Makefile2
-rw-r--r--drivers/net/enic/enic.h11
-rw-r--r--drivers/net/enic/enic_dev.c221
-rw-r--r--drivers/net/enic/enic_dev.h41
-rw-r--r--drivers/net/enic/enic_main.c326
-rw-r--r--drivers/net/enic/vnic_dev.c26
-rw-r--r--drivers/net/enic/vnic_dev.h8
-rw-r--r--drivers/net/enic/vnic_devcmd.h38
-rw-r--r--drivers/net/enic/vnic_rq.h5
-rw-r--r--drivers/net/eql.c10
-rw-r--r--drivers/net/fec.c650
-rw-r--r--drivers/net/forcedeth.c8
-rw-r--r--drivers/net/ftmac100.c1198
-rw-r--r--drivers/net/ftmac100.h180
-rw-r--r--drivers/net/hamradio/bpqether.c5
-rw-r--r--drivers/net/igb/e1000_82575.c296
-rw-r--r--drivers/net/igb/e1000_82575.h1
-rw-r--r--drivers/net/igb/e1000_defines.h52
-rw-r--r--drivers/net/igb/e1000_hw.h9
-rw-r--r--drivers/net/igb/e1000_mbx.c38
-rw-r--r--drivers/net/igb/e1000_nvm.c64
-rw-r--r--drivers/net/igb/e1000_nvm.h1
-rw-r--r--drivers/net/igb/e1000_regs.h27
-rw-r--r--drivers/net/igb/igb.h8
-rw-r--r--drivers/net/igb/igb_ethtool.c30
-rw-r--r--drivers/net/igb/igb_main.c232
-rw-r--r--drivers/net/igbvf/ethtool.c6
-rw-r--r--drivers/net/igbvf/igbvf.h3
-rw-r--r--drivers/net/igbvf/netdev.c63
-rw-r--r--drivers/net/ipg.c4
-rw-r--r--drivers/net/ixgb/ixgb.h2
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c39
-rw-r--r--drivers/net/ixgb/ixgb_main.c54
-rw-r--r--drivers/net/ixgbe/ixgbe.h16
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c102
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c228
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c947
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h8
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c160
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.h12
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.c138
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.h25
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c176
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.h29
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c429
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c57
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c103
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.h6
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c481
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.c37
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c594
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h7
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c116
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h65
-rw-r--r--drivers/net/ixgbe/ixgbe_x540.c43
-rw-r--r--drivers/net/ixgbevf/defines.h2
-rw-r--r--drivers/net/ixgbevf/ethtool.c4
-rw-r--r--drivers/net/ixgbevf/ixgbevf.h1
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c98
-rw-r--r--drivers/net/ixgbevf/regs.h2
-rw-r--r--drivers/net/jme.c306
-rw-r--r--drivers/net/jme.h87
-rw-r--r--drivers/net/loopback.c9
-rw-r--r--drivers/net/macvlan.c14
-rw-r--r--drivers/net/macvtap.c18
-rw-r--r--drivers/net/mii.c14
-rw-r--r--drivers/net/mv643xx_eth.c74
-rw-r--r--drivers/net/myri10ge/myri10ge.c4
-rw-r--r--drivers/net/netxen/netxen_nic.h6
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c15
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c58
-rw-r--r--drivers/net/netxen/netxen_nic_main.c3
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/micrel.c24
-rw-r--r--drivers/net/phy/phy.c8
-rw-r--r--drivers/net/ppp_generic.c148
-rw-r--r--drivers/net/pptp.c45
-rw-r--r--drivers/net/qla3xxx.c10
-rw-r--r--drivers/net/qlcnic/qlcnic.h5
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c15
-rw-r--r--drivers/net/r8169.c272
-rw-r--r--drivers/net/s2io.c2
-rw-r--r--drivers/net/sfc/efx.c86
-rw-r--r--drivers/net/sfc/efx.h19
-rw-r--r--drivers/net/sfc/ethtool.c37
-rw-r--r--drivers/net/sfc/falcon.c22
-rw-r--r--drivers/net/sfc/falcon_boards.c2
-rw-r--r--drivers/net/sfc/falcon_xmac.c2
-rw-r--r--drivers/net/sfc/filter.c117
-rw-r--r--drivers/net/sfc/io.h15
-rw-r--r--drivers/net/sfc/mcdi.c32
-rw-r--r--drivers/net/sfc/mcdi.h4
-rw-r--r--drivers/net/sfc/mcdi_mac.c2
-rw-r--r--drivers/net/sfc/mcdi_pcol.h2
-rw-r--r--drivers/net/sfc/mcdi_phy.c2
-rw-r--r--drivers/net/sfc/mdio_10g.c34
-rw-r--r--drivers/net/sfc/mdio_10g.h5
-rw-r--r--drivers/net/sfc/mtd.c2
-rw-r--r--drivers/net/sfc/net_driver.h83
-rw-r--r--drivers/net/sfc/nic.c73
-rw-r--r--drivers/net/sfc/nic.h9
-rw-r--r--drivers/net/sfc/phy.h2
-rw-r--r--drivers/net/sfc/qt202x_phy.c2
-rw-r--r--drivers/net/sfc/regs.h8
-rw-r--r--drivers/net/sfc/rx.c144
-rw-r--r--drivers/net/sfc/selftest.c4
-rw-r--r--drivers/net/sfc/selftest.h2
-rw-r--r--drivers/net/sfc/siena.c24
-rw-r--r--drivers/net/sfc/spi.h2
-rw-r--r--drivers/net/sfc/tenxpress.c4
-rw-r--r--drivers/net/sfc/tx.c92
-rw-r--r--drivers/net/sfc/txc43128_phy.c4
-rw-r--r--drivers/net/sfc/workarounds.h2
-rw-r--r--drivers/net/sh_eth.c737
-rw-r--r--drivers/net/sh_eth.h654
-rw-r--r--drivers/net/sis900.c4
-rw-r--r--drivers/net/sky2.c2
-rw-r--r--drivers/net/smc91x.c13
-rw-r--r--drivers/net/smsc911x.c4
-rw-r--r--drivers/net/sungem.c58
-rw-r--r--drivers/net/sungem.h1
-rw-r--r--drivers/net/tg3.c335
-rw-r--r--drivers/net/tg3.h13
-rw-r--r--drivers/net/tlan.c3840
-rw-r--r--drivers/net/tlan.h192
-rw-r--r--drivers/net/tun.c85
-rw-r--r--drivers/net/typhoon.c3
-rw-r--r--drivers/net/usb/cdc-phonet.c10
-rw-r--r--drivers/net/veth.c12
-rw-r--r--drivers/net/via-velocity.c9
-rw-r--r--drivers/net/via-velocity.h8
-rw-r--r--drivers/net/vxge/vxge-config.c32
-rw-r--r--drivers/net/vxge/vxge-config.h10
-rw-r--r--drivers/net/vxge/vxge-main.c234
-rw-r--r--drivers/net/vxge/vxge-main.h23
-rw-r--r--drivers/net/vxge/vxge-traffic.c116
-rw-r--r--drivers/net/vxge/vxge-traffic.h14
-rw-r--r--drivers/net/vxge/vxge-version.h4
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/Makefile5
-rw-r--r--drivers/net/wireless/adm8211.c4
-rw-r--r--drivers/net/wireless/at76c50x-usb.c10
-rw-r--r--drivers/net/wireless/at76c50x-usb.h2
-rw-r--r--drivers/net/wireless/ath/ar9170/Kconfig4
-rw-r--r--drivers/net/wireless/ath/ar9170/ar9170.h2
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c8
-rw-r--r--drivers/net/wireless/ath/ath.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/Kconfig11
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c9
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h40
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c7
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c176
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h17
-rw-r--r--drivers/net/wireless/ath/ath5k/caps.c48
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c20
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.h10
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c24
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h28
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c94
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c9
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c46
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h15
-rw-r--r--drivers/net/wireless/ath/ath5k/trace.h107
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c26
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c112
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c37
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h1143
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h157
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c93
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c494
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h17
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c41
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c45
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c38
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c169
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c78
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h80
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c170
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c48
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c485
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c84
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c65
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c57
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c103
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h5
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c754
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c177
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h22
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c717
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c308
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h3
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c15
-rw-r--r--drivers/net/wireless/ath/carl9170/fwcmd.h1
-rw-r--r--drivers/net/wireless/ath/carl9170/fwdesc.h28
-rw-r--r--drivers/net/wireless/ath/carl9170/hw.h25
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c9
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c8
-rw-r--r--drivers/net/wireless/ath/carl9170/version.h8
-rw-r--r--drivers/net/wireless/ath/carl9170/wlan.h20
-rw-r--r--drivers/net/wireless/ath/key.c5
-rw-r--r--drivers/net/wireless/ath/regd.c7
-rw-r--r--drivers/net/wireless/ath/regd.h1
-rw-r--r--drivers/net/wireless/b43/Kconfig2
-rw-r--r--drivers/net/wireless/b43/main.c6
-rw-r--r--drivers/net/wireless/b43/phy_n.c207
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c1209
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h52
-rw-r--r--drivers/net/wireless/b43/xmit.c75
-rw-r--r--drivers/net/wireless/b43/xmit.h6
-rw-r--r--drivers/net/wireless/b43legacy/main.c5
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.h2
-rw-r--r--drivers/net/wireless/iwlegacy/Kconfig116
-rw-r--r--drivers/net/wireless/iwlegacy/Makefile25
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c (renamed from drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c)11
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h (renamed from drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h)4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-fh.h (renamed from drivers/net/wireless/iwlwifi/iwl-3945-fh.h)5
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-hw.h (renamed from drivers/net/wireless/iwlwifi/iwl-3945-hw.h)9
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-led.c (renamed from drivers/net/wireless/iwlwifi/iwl-3945-led.c)31
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-led.h (renamed from drivers/net/wireless/iwlwifi/iwl-3945-led.h)2
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-rs.c (renamed from drivers/net/wireless/iwlwifi/iwl-3945-rs.c)41
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.c (renamed from drivers/net/wireless/iwlwifi/iwl-3945.c)267
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.h (renamed from drivers/net/wireless/iwlwifi/iwl-3945.h)12
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-calib.c967
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-calib.h (renamed from drivers/net/wireless/iwlwifi/iwl-legacy.h)30
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c774
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h59
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c154
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-hw.h (renamed from drivers/net/wireless/iwlwifi/iwl-4965-hw.h)26
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-led.c74
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-led.h33
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-lib.c1260
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rs.c2870
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rx.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-rx.c)177
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-sta.c721
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-tx.c1369
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-ucode.c166
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.c (renamed from drivers/net/wireless/iwlwifi/iwl-4965.c)815
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.h282
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-commands.h3405
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c2674
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.h646
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-csr.h422
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-debug.h198
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-debugfs.c1467
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-dev.h1426
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-devtrace.c45
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-devtrace.h270
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.c561
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.h344
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-fh.h513
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-hcmd.c271
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-helpers.h181
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-io.h545
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-led.c188
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-led.h56
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-legacy-rs.h456
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-power.c165
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-power.h55
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-prph.h523
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-rx.c302
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-scan.c625
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-spectrum.h92
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-sta.c816
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-sta.h148
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-tx.c660
-rw-r--r--drivers/net/wireless/iwlegacy/iwl3945-base.c (renamed from drivers/net/wireless/iwlwifi/iwl3945-base.c)566
-rw-r--r--drivers/net/wireless/iwlegacy/iwl4965-base.c3632
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig132
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile40
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c560
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c59
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-led.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-led.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c539
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c29
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c101
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c582
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h49
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h130
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c166
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h66
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c119
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h90
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c199
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.h16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-legacy.c662
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c880
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c54
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c78
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c3
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c7
-rw-r--r--drivers/net/wireless/libertas/cfg.c6
-rw-r--r--drivers/net/wireless/libertas/cmd.c10
-rw-r--r--drivers/net/wireless/libertas/dev.h2
-rw-r--r--drivers/net/wireless/libertas/host.h2
-rw-r--r--drivers/net/wireless/libertas/if_spi.c368
-rw-r--r--drivers/net/wireless/libertas/main.c77
-rw-r--r--drivers/net/wireless/libertas/mesh.c11
-rw-r--r--drivers/net/wireless/libertas_tf/main.c3
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c8
-rw-r--r--drivers/net/wireless/mwl8k.c516
-rw-r--r--drivers/net/wireless/orinoco/scan.c5
-rw-r--r--drivers/net/wireless/p54/Kconfig5
-rw-r--r--drivers/net/wireless/p54/eeprom.c211
-rw-r--r--drivers/net/wireless/p54/eeprom.h7
-rw-r--r--drivers/net/wireless/p54/fwio.c21
-rw-r--r--drivers/net/wireless/p54/lmac.h3
-rw-r--r--drivers/net/wireless/p54/main.c61
-rw-r--r--drivers/net/wireless/p54/p54.h7
-rw-r--r--drivers/net/wireless/p54/p54spi_eeprom.h9
-rw-r--r--drivers/net/wireless/p54/txrx.c19
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig12
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c183
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c179
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c17
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h139
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c917
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c239
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h67
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c74
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00ht.c29
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h24
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00link.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c75
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c178
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h31
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00reg.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c251
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c75
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c10
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c33
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rtl8187.h2
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig24
-rw-r--r--drivers/net/wireless/rtlwifi/Makefile15
-rw-r--r--drivers/net/wireless/rtlwifi/base.c91
-rw-r--r--drivers/net/wireless/rtlwifi/base.h39
-rw-r--r--drivers/net/wireless/rtlwifi/core.c26
-rw-r--r--drivers/net/wireless/rtlwifi/debug.h1
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c18
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.h3
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c152
-rw-r--r--drivers/net/wireless/rtlwifi/pci.h12
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c58
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/Makefile9
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c1398
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h204
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c (renamed from drivers/net/wireless/rtlwifi/rtl8192ce/fw.c)72
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h (renamed from drivers/net/wireless/rtlwifi/rtl8192ce/fw.h)0
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/main.c39
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c2042
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h246
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/Makefile3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/def.h144
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/dm.c1364
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/dm.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c158
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.h11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/led.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.c2081
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.h35
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/reg.h73
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/rf.c10
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/rf.h5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c22
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.h14
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c183
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.h464
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/Makefile14
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/def.h62
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/dm.c113
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/dm.h32
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c2504
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.h116
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/led.c142
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/led.h37
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c1144
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.h180
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/phy.c607
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/phy.h36
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/reg.h30
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/rf.c493
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/rf.h47
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c336
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.h53
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/table.c1888
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/table.h71
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c687
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.h430
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c1035
-rw-r--r--drivers/net/wireless/rtlwifi/usb.h164
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h680
-rw-r--r--drivers/net/wireless/wl1251/acx.c53
-rw-r--r--drivers/net/wireless/wl1251/acx.h72
-rw-r--r--drivers/net/wireless/wl1251/event.c18
-rw-r--r--drivers/net/wireless/wl1251/main.c22
-rw-r--r--drivers/net/wireless/wl1251/ps.c52
-rw-r--r--drivers/net/wireless/wl1251/rx.c51
-rw-r--r--drivers/net/wireless/wl1251/tx.c74
-rw-r--r--drivers/net/wireless/wl1251/wl1251.h7
-rw-r--r--drivers/net/wireless/wl1251/wl12xx_80211.h3
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig2
-rw-r--r--drivers/net/wireless/wl12xx/acx.c277
-rw-r--r--drivers/net/wireless/wl12xx/acx.h141
-rw-r--r--drivers/net/wireless/wl12xx/boot.c38
-rw-r--r--drivers/net/wireless/wl12xx/boot.h5
-rw-r--r--drivers/net/wireless/wl12xx/cmd.c319
-rw-r--r--drivers/net/wireless/wl12xx/cmd.h161
-rw-r--r--drivers/net/wireless/wl12xx/conf.h125
-rw-r--r--drivers/net/wireless/wl12xx/debugfs.c51
-rw-r--r--drivers/net/wireless/wl12xx/event.c21
-rw-r--r--drivers/net/wireless/wl12xx/event.h10
-rw-r--r--drivers/net/wireless/wl12xx/init.c400
-rw-r--r--drivers/net/wireless/wl12xx/init.h2
-rw-r--r--drivers/net/wireless/wl12xx/io.h1
-rw-r--r--drivers/net/wireless/wl12xx/main.c1462
-rw-r--r--drivers/net/wireless/wl12xx/ps.c90
-rw-r--r--drivers/net/wireless/wl12xx/ps.h4
-rw-r--r--drivers/net/wireless/wl12xx/rx.c37
-rw-r--r--drivers/net/wireless/wl12xx/rx.h17
-rw-r--r--drivers/net/wireless/wl12xx/scan.c20
-rw-r--r--drivers/net/wireless/wl12xx/sdio.c43
-rw-r--r--drivers/net/wireless/wl12xx/spi.c21
-rw-r--r--drivers/net/wireless/wl12xx/tx.c365
-rw-r--r--drivers/net/wireless/wl12xx/tx.h15
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx.h211
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx_80211.h14
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c169
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h5
-rw-r--r--drivers/net/wireless/zd1211rw/zd_def.h2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c453
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h24
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c597
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.h37
-rw-r--r--drivers/net/xen-netback/Makefile3
-rw-r--r--drivers/net/xen-netback/common.h161
-rw-r--r--drivers/net/xen-netback/interface.c424
-rw-r--r--drivers/net/xen-netback/netback.c1745
-rw-r--r--drivers/net/xen-netback/xenbus.c490
-rw-r--r--drivers/net/xen-netfront.c24
-rw-r--r--drivers/s390/net/qeth_core.h4
-rw-r--r--drivers/s390/net/qeth_core_main.c57
-rw-r--r--drivers/s390/net/qeth_l2_main.c45
-rw-r--r--drivers/s390/net/qeth_l3_main.c60
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/Kconfig4
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/Kconfig4
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c21
-rw-r--r--drivers/scsi/fcoe/fcoe.c4
-rw-r--r--drivers/ssb/main.c44
-rw-r--r--drivers/ssb/pci.c6
-rw-r--r--drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c4
-rw-r--r--drivers/staging/brcm80211/brcmsmac/wl_mac80211.c31
-rw-r--r--drivers/staging/brcm80211/brcmsmac/wlc_main.c5
-rw-r--r--drivers/staging/pohmelfs/config.c2
-rw-r--r--drivers/staging/winbond/wbusb.c7
-rw-r--r--drivers/video/uvesafb.c2
-rw-r--r--drivers/xen/events.c38
567 files changed, 80730 insertions, 21642 deletions
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 8cbfaa687d72..fe81c851ca88 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -2177,7 +2177,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
2177 return; 2177 return;
2178 } 2178 }
2179 2179
2180 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) { 2180 if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) {
2181 retcode = ERR_PERM; 2181 retcode = ERR_PERM;
2182 goto fail; 2182 goto fail;
2183 } 2183 }
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 6dcd55a74c0a..5577ed656e2f 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -31,6 +31,30 @@
31 31
32#define VERSION "1.0" 32#define VERSION "1.0"
33 33
34#define ATH3K_DNLOAD 0x01
35#define ATH3K_GETSTATE 0x05
36#define ATH3K_SET_NORMAL_MODE 0x07
37#define ATH3K_GETVERSION 0x09
38#define USB_REG_SWITCH_VID_PID 0x0a
39
40#define ATH3K_MODE_MASK 0x3F
41#define ATH3K_NORMAL_MODE 0x0E
42
43#define ATH3K_PATCH_UPDATE 0x80
44#define ATH3K_SYSCFG_UPDATE 0x40
45
46#define ATH3K_XTAL_FREQ_26M 0x00
47#define ATH3K_XTAL_FREQ_40M 0x01
48#define ATH3K_XTAL_FREQ_19P2 0x02
49#define ATH3K_NAME_LEN 0xFF
50
51struct ath3k_version {
52 unsigned int rom_version;
53 unsigned int build_version;
54 unsigned int ram_version;
55 unsigned char ref_clock;
56 unsigned char reserved[0x07];
57};
34 58
35static struct usb_device_id ath3k_table[] = { 59static struct usb_device_id ath3k_table[] = {
36 /* Atheros AR3011 */ 60 /* Atheros AR3011 */
@@ -42,15 +66,31 @@ static struct usb_device_id ath3k_table[] = {
42 /* Atheros AR9285 Malbec with sflash firmware */ 66 /* Atheros AR9285 Malbec with sflash firmware */
43 { USB_DEVICE(0x03F0, 0x311D) }, 67 { USB_DEVICE(0x03F0, 0x311D) },
44 68
69 /* Atheros AR3012 with sflash firmware*/
70 { USB_DEVICE(0x0CF3, 0x3004) },
71
45 /* Atheros AR5BBU12 with sflash firmware */ 72 /* Atheros AR5BBU12 with sflash firmware */
46 { USB_DEVICE(0x0489, 0xE02C) }, 73 { USB_DEVICE(0x0489, 0xE02C) },
74
47 { } /* Terminating entry */ 75 { } /* Terminating entry */
48}; 76};
49 77
50MODULE_DEVICE_TABLE(usb, ath3k_table); 78MODULE_DEVICE_TABLE(usb, ath3k_table);
51 79
80#define BTUSB_ATH3012 0x80
81/* This table is to load patch and sysconfig files
82 * for AR3012 */
83static struct usb_device_id ath3k_blist_tbl[] = {
84
85 /* Atheros AR3012 with sflash firmware*/
86 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
87
88 { } /* Terminating entry */
89};
90
52#define USB_REQ_DFU_DNLOAD 1 91#define USB_REQ_DFU_DNLOAD 1
53#define BULK_SIZE 4096 92#define BULK_SIZE 4096
93#define FW_HDR_SIZE 20
54 94
55static int ath3k_load_firmware(struct usb_device *udev, 95static int ath3k_load_firmware(struct usb_device *udev,
56 const struct firmware *firmware) 96 const struct firmware *firmware)
@@ -106,28 +146,265 @@ error:
106 return err; 146 return err;
107} 147}
108 148
149static int ath3k_get_state(struct usb_device *udev, unsigned char *state)
150{
151 int pipe = 0;
152
153 pipe = usb_rcvctrlpipe(udev, 0);
154 return usb_control_msg(udev, pipe, ATH3K_GETSTATE,
155 USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
156 state, 0x01, USB_CTRL_SET_TIMEOUT);
157}
158
159static int ath3k_get_version(struct usb_device *udev,
160 struct ath3k_version *version)
161{
162 int pipe = 0;
163
164 pipe = usb_rcvctrlpipe(udev, 0);
165 return usb_control_msg(udev, pipe, ATH3K_GETVERSION,
166 USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, version,
167 sizeof(struct ath3k_version),
168 USB_CTRL_SET_TIMEOUT);
169}
170
171static int ath3k_load_fwfile(struct usb_device *udev,
172 const struct firmware *firmware)
173{
174 u8 *send_buf;
175 int err, pipe, len, size, count, sent = 0;
176 int ret;
177
178 count = firmware->size;
179
180 send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC);
181 if (!send_buf) {
182 BT_ERR("Can't allocate memory chunk for firmware");
183 return -ENOMEM;
184 }
185
186 size = min_t(uint, count, FW_HDR_SIZE);
187 memcpy(send_buf, firmware->data, size);
188
189 pipe = usb_sndctrlpipe(udev, 0);
190 ret = usb_control_msg(udev, pipe, ATH3K_DNLOAD,
191 USB_TYPE_VENDOR, 0, 0, send_buf,
192 size, USB_CTRL_SET_TIMEOUT);
193 if (ret < 0) {
194 BT_ERR("Can't change to loading configuration err");
195 kfree(send_buf);
196 return ret;
197 }
198
199 sent += size;
200 count -= size;
201
202 while (count) {
203 size = min_t(uint, count, BULK_SIZE);
204 pipe = usb_sndbulkpipe(udev, 0x02);
205
206 memcpy(send_buf, firmware->data + sent, size);
207
208 err = usb_bulk_msg(udev, pipe, send_buf, size,
209 &len, 3000);
210 if (err || (len != size)) {
211 BT_ERR("Error in firmware loading err = %d,"
212 "len = %d, size = %d", err, len, size);
213 kfree(send_buf);
214 return err;
215 }
216 sent += size;
217 count -= size;
218 }
219
220 kfree(send_buf);
221 return 0;
222}
223
224static int ath3k_switch_pid(struct usb_device *udev)
225{
226 int pipe = 0;
227
228 pipe = usb_sndctrlpipe(udev, 0);
229 return usb_control_msg(udev, pipe, USB_REG_SWITCH_VID_PID,
230 USB_TYPE_VENDOR, 0, 0,
231 NULL, 0, USB_CTRL_SET_TIMEOUT);
232}
233
234static int ath3k_set_normal_mode(struct usb_device *udev)
235{
236 unsigned char fw_state;
237 int pipe = 0, ret;
238
239 ret = ath3k_get_state(udev, &fw_state);
240 if (ret < 0) {
241 BT_ERR("Can't get state to change to normal mode err");
242 return ret;
243 }
244
245 if ((fw_state & ATH3K_MODE_MASK) == ATH3K_NORMAL_MODE) {
246 BT_DBG("firmware was already in normal mode");
247 return 0;
248 }
249
250 pipe = usb_sndctrlpipe(udev, 0);
251 return usb_control_msg(udev, pipe, ATH3K_SET_NORMAL_MODE,
252 USB_TYPE_VENDOR, 0, 0,
253 NULL, 0, USB_CTRL_SET_TIMEOUT);
254}
255
256static int ath3k_load_patch(struct usb_device *udev)
257{
258 unsigned char fw_state;
259 char filename[ATH3K_NAME_LEN] = {0};
260 const struct firmware *firmware;
261 struct ath3k_version fw_version, pt_version;
262 int ret;
263
264 ret = ath3k_get_state(udev, &fw_state);
265 if (ret < 0) {
266 BT_ERR("Can't get state to change to load ram patch err");
267 return ret;
268 }
269
270 if (fw_state & ATH3K_PATCH_UPDATE) {
271 BT_DBG("Patch was already downloaded");
272 return 0;
273 }
274
275 ret = ath3k_get_version(udev, &fw_version);
276 if (ret < 0) {
277 BT_ERR("Can't get version to change to load ram patch err");
278 return ret;
279 }
280
281 snprintf(filename, ATH3K_NAME_LEN, "ar3k/AthrBT_0x%08x.dfu",
282 fw_version.rom_version);
283
284 ret = request_firmware(&firmware, filename, &udev->dev);
285 if (ret < 0) {
286 BT_ERR("Patch file not found %s", filename);
287 return ret;
288 }
289
290 pt_version.rom_version = *(int *)(firmware->data + firmware->size - 8);
291 pt_version.build_version = *(int *)
292 (firmware->data + firmware->size - 4);
293
294 if ((pt_version.rom_version != fw_version.rom_version) ||
295 (pt_version.build_version <= fw_version.build_version)) {
296 BT_ERR("Patch file version did not match with firmware");
297 release_firmware(firmware);
298 return -EINVAL;
299 }
300
301 ret = ath3k_load_fwfile(udev, firmware);
302 release_firmware(firmware);
303
304 return ret;
305}
306
307static int ath3k_load_syscfg(struct usb_device *udev)
308{
309 unsigned char fw_state;
310 char filename[ATH3K_NAME_LEN] = {0};
311 const struct firmware *firmware;
312 struct ath3k_version fw_version;
313 int clk_value, ret;
314
315 ret = ath3k_get_state(udev, &fw_state);
316 if (ret < 0) {
317 BT_ERR("Can't get state to change to load configration err");
318 return -EBUSY;
319 }
320
321 ret = ath3k_get_version(udev, &fw_version);
322 if (ret < 0) {
323 BT_ERR("Can't get version to change to load ram patch err");
324 return ret;
325 }
326
327 switch (fw_version.ref_clock) {
328
329 case ATH3K_XTAL_FREQ_26M:
330 clk_value = 26;
331 break;
332 case ATH3K_XTAL_FREQ_40M:
333 clk_value = 40;
334 break;
335 case ATH3K_XTAL_FREQ_19P2:
336 clk_value = 19;
337 break;
338 default:
339 clk_value = 0;
340 break;
341 }
342
343 snprintf(filename, ATH3K_NAME_LEN, "ar3k/ramps_0x%08x_%d%s",
344 fw_version.rom_version, clk_value, ".dfu");
345
346 ret = request_firmware(&firmware, filename, &udev->dev);
347 if (ret < 0) {
348 BT_ERR("Configuration file not found %s", filename);
349 return ret;
350 }
351
352 ret = ath3k_load_fwfile(udev, firmware);
353 release_firmware(firmware);
354
355 return ret;
356}
357
109static int ath3k_probe(struct usb_interface *intf, 358static int ath3k_probe(struct usb_interface *intf,
110 const struct usb_device_id *id) 359 const struct usb_device_id *id)
111{ 360{
112 const struct firmware *firmware; 361 const struct firmware *firmware;
113 struct usb_device *udev = interface_to_usbdev(intf); 362 struct usb_device *udev = interface_to_usbdev(intf);
363 int ret;
114 364
115 BT_DBG("intf %p id %p", intf, id); 365 BT_DBG("intf %p id %p", intf, id);
116 366
117 if (intf->cur_altsetting->desc.bInterfaceNumber != 0) 367 if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
118 return -ENODEV; 368 return -ENODEV;
119 369
120 if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) { 370 /* match device ID in ath3k blacklist table */
121 return -EIO; 371 if (!id->driver_info) {
372 const struct usb_device_id *match;
373 match = usb_match_id(intf, ath3k_blist_tbl);
374 if (match)
375 id = match;
122 } 376 }
123 377
124 if (ath3k_load_firmware(udev, firmware)) { 378 /* load patch and sysconfig files for AR3012 */
125 release_firmware(firmware); 379 if (id->driver_info & BTUSB_ATH3012) {
380 ret = ath3k_load_patch(udev);
381 if (ret < 0) {
382 BT_ERR("Loading patch file failed");
383 return ret;
384 }
385 ret = ath3k_load_syscfg(udev);
386 if (ret < 0) {
387 BT_ERR("Loading sysconfig file failed");
388 return ret;
389 }
390 ret = ath3k_set_normal_mode(udev);
391 if (ret < 0) {
392 BT_ERR("Set normal mode failed");
393 return ret;
394 }
395 ath3k_switch_pid(udev);
396 return 0;
397 }
398
399 if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) {
400 BT_ERR("Error loading firmware");
126 return -EIO; 401 return -EIO;
127 } 402 }
403
404 ret = ath3k_load_firmware(udev, firmware);
128 release_firmware(firmware); 405 release_firmware(firmware);
129 406
130 return 0; 407 return ret;
131} 408}
132 409
133static void ath3k_disconnect(struct usb_interface *intf) 410static void ath3k_disconnect(struct usb_interface *intf)
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 700a3840fddc..7e0ebd4a1a74 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -105,6 +105,9 @@ static struct usb_device_id blacklist_table[] = {
105 /* Atheros AR9285 Malbec with sflash firmware */ 105 /* Atheros AR9285 Malbec with sflash firmware */
106 { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, 106 { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
107 107
108 /* Atheros 3012 with sflash firmware */
109 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_IGNORE },
110
108 /* Atheros AR5BBU12 with sflash firmware */ 111 /* Atheros AR5BBU12 with sflash firmware */
109 { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, 112 { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
110 113
@@ -714,15 +717,11 @@ static int btusb_send_frame(struct sk_buff *skb)
714 pipe = usb_sndisocpipe(data->udev, 717 pipe = usb_sndisocpipe(data->udev,
715 data->isoc_tx_ep->bEndpointAddress); 718 data->isoc_tx_ep->bEndpointAddress);
716 719
717 urb->dev = data->udev; 720 usb_fill_int_urb(urb, data->udev, pipe,
718 urb->pipe = pipe; 721 skb->data, skb->len, btusb_isoc_tx_complete,
719 urb->context = skb; 722 skb, data->isoc_tx_ep->bInterval);
720 urb->complete = btusb_isoc_tx_complete;
721 urb->interval = data->isoc_tx_ep->bInterval;
722 723
723 urb->transfer_flags = URB_ISO_ASAP; 724 urb->transfer_flags = URB_ISO_ASAP;
724 urb->transfer_buffer = skb->data;
725 urb->transfer_buffer_length = skb->len;
726 725
727 __fill_isoc_descriptor(urb, skb->len, 726 __fill_isoc_descriptor(urb, skb->len,
728 le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize)); 727 le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize));
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 3c6cabcb7d84..48ad2a7ab080 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -398,6 +398,7 @@ static int hci_uart_register_dev(struct hci_uart *hu)
398 hdev->flush = hci_uart_flush; 398 hdev->flush = hci_uart_flush;
399 hdev->send = hci_uart_send_frame; 399 hdev->send = hci_uart_send_frame;
400 hdev->destruct = hci_uart_destruct; 400 hdev->destruct = hci_uart_destruct;
401 hdev->parent = hu->tty->dev;
401 402
402 hdev->owner = THIS_MODULE; 403 hdev->owner = THIS_MODULE;
403 404
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 8aba0ba57de5..e0ef5fdc361e 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -183,20 +183,15 @@ static int addr4_resolve(struct sockaddr_in *src_in,
183{ 183{
184 __be32 src_ip = src_in->sin_addr.s_addr; 184 __be32 src_ip = src_in->sin_addr.s_addr;
185 __be32 dst_ip = dst_in->sin_addr.s_addr; 185 __be32 dst_ip = dst_in->sin_addr.s_addr;
186 struct flowi fl;
187 struct rtable *rt; 186 struct rtable *rt;
188 struct neighbour *neigh; 187 struct neighbour *neigh;
189 int ret; 188 int ret;
190 189
191 memset(&fl, 0, sizeof fl); 190 rt = ip_route_output(&init_net, dst_ip, src_ip, 0, addr->bound_dev_if);
192 fl.nl_u.ip4_u.daddr = dst_ip; 191 if (IS_ERR(rt)) {
193 fl.nl_u.ip4_u.saddr = src_ip; 192 ret = PTR_ERR(rt);
194 fl.oif = addr->bound_dev_if;
195
196 ret = ip_route_output_key(&init_net, &rt, &fl);
197 if (ret)
198 goto out; 193 goto out;
199 194 }
200 src_in->sin_family = AF_INET; 195 src_in->sin_family = AF_INET;
201 src_in->sin_addr.s_addr = rt->rt_src; 196 src_in->sin_addr.s_addr = rt->rt_src;
202 197
@@ -236,28 +231,28 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
236 struct sockaddr_in6 *dst_in, 231 struct sockaddr_in6 *dst_in,
237 struct rdma_dev_addr *addr) 232 struct rdma_dev_addr *addr)
238{ 233{
239 struct flowi fl; 234 struct flowi6 fl6;
240 struct neighbour *neigh; 235 struct neighbour *neigh;
241 struct dst_entry *dst; 236 struct dst_entry *dst;
242 int ret; 237 int ret;
243 238
244 memset(&fl, 0, sizeof fl); 239 memset(&fl6, 0, sizeof fl6);
245 ipv6_addr_copy(&fl.fl6_dst, &dst_in->sin6_addr); 240 ipv6_addr_copy(&fl6.daddr, &dst_in->sin6_addr);
246 ipv6_addr_copy(&fl.fl6_src, &src_in->sin6_addr); 241 ipv6_addr_copy(&fl6.saddr, &src_in->sin6_addr);
247 fl.oif = addr->bound_dev_if; 242 fl6.flowi6_oif = addr->bound_dev_if;
248 243
249 dst = ip6_route_output(&init_net, NULL, &fl); 244 dst = ip6_route_output(&init_net, NULL, &fl6);
250 if ((ret = dst->error)) 245 if ((ret = dst->error))
251 goto put; 246 goto put;
252 247
253 if (ipv6_addr_any(&fl.fl6_src)) { 248 if (ipv6_addr_any(&fl6.saddr)) {
254 ret = ipv6_dev_get_saddr(&init_net, ip6_dst_idev(dst)->dev, 249 ret = ipv6_dev_get_saddr(&init_net, ip6_dst_idev(dst)->dev,
255 &fl.fl6_dst, 0, &fl.fl6_src); 250 &fl6.daddr, 0, &fl6.saddr);
256 if (ret) 251 if (ret)
257 goto put; 252 goto put;
258 253
259 src_in->sin6_family = AF_INET6; 254 src_in->sin6_family = AF_INET6;
260 ipv6_addr_copy(&src_in->sin6_addr, &fl.fl6_src); 255 ipv6_addr_copy(&src_in->sin6_addr, &fl6.saddr);
261 } 256 }
262 257
263 if (dst->dev->flags & IFF_LOOPBACK) { 258 if (dst->dev->flags & IFF_LOOPBACK) {
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index d02dcc6e5963..3216bcad7e82 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -338,23 +338,11 @@ static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,
338 __be16 peer_port, u8 tos) 338 __be16 peer_port, u8 tos)
339{ 339{
340 struct rtable *rt; 340 struct rtable *rt;
341 struct flowi fl = { 341
342 .oif = 0, 342 rt = ip_route_output_ports(&init_net, NULL, peer_ip, local_ip,
343 .nl_u = { 343 peer_port, local_port, IPPROTO_TCP,
344 .ip4_u = { 344 tos, 0);
345 .daddr = peer_ip, 345 if (IS_ERR(rt))
346 .saddr = local_ip,
347 .tos = tos}
348 },
349 .proto = IPPROTO_TCP,
350 .uli_u = {
351 .ports = {
352 .sport = local_port,
353 .dport = peer_port}
354 }
355 };
356
357 if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
358 return NULL; 346 return NULL;
359 return rt; 347 return rt;
360} 348}
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index b4d9e4caf3c9..9d8dcfab2b38 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -315,23 +315,11 @@ static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip,
315 __be16 peer_port, u8 tos) 315 __be16 peer_port, u8 tos)
316{ 316{
317 struct rtable *rt; 317 struct rtable *rt;
318 struct flowi fl = { 318
319 .oif = 0, 319 rt = ip_route_output_ports(&init_net, NULL, peer_ip, local_ip,
320 .nl_u = { 320 peer_port, local_port, IPPROTO_TCP,
321 .ip4_u = { 321 tos, 0);
322 .daddr = peer_ip, 322 if (IS_ERR(rt))
323 .saddr = local_ip,
324 .tos = tos}
325 },
326 .proto = IPPROTO_TCP,
327 .uli_u = {
328 .ports = {
329 .sport = local_port,
330 .dport = peer_port}
331 }
332 };
333
334 if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
335 return NULL; 323 return NULL;
336 return rt; 324 return rt;
337} 325}
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 3b4ec3238ceb..3d7f3664b67b 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -153,7 +153,8 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
153 nesdev, nesdev->netdev[0]->name); 153 nesdev, nesdev->netdev[0]->name);
154 netdev = nesdev->netdev[0]; 154 netdev = nesdev->netdev[0];
155 nesvnic = netdev_priv(netdev); 155 nesvnic = netdev_priv(netdev);
156 is_bonded = (netdev->master == event_netdev); 156 is_bonded = netif_is_bond_slave(netdev) &&
157 (netdev->master == event_netdev);
157 if ((netdev == event_netdev) || is_bonded) { 158 if ((netdev == event_netdev) || is_bonded) {
158 if (nesvnic->rdma_enabled == 0) { 159 if (nesvnic->rdma_enabled == 0) {
159 nes_debug(NES_DBG_NETDEV, "Returning without processing event for %s since" 160 nes_debug(NES_DBG_NETDEV, "Returning without processing event for %s since"
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 009ec814d517..ef3291551bc6 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1104,21 +1104,19 @@ static inline int mini_cm_accelerated(struct nes_cm_core *cm_core,
1104static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpindex) 1104static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpindex)
1105{ 1105{
1106 struct rtable *rt; 1106 struct rtable *rt;
1107 struct flowi fl;
1108 struct neighbour *neigh; 1107 struct neighbour *neigh;
1109 int rc = arpindex; 1108 int rc = arpindex;
1110 struct net_device *netdev; 1109 struct net_device *netdev;
1111 struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter; 1110 struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
1112 1111
1113 memset(&fl, 0, sizeof fl); 1112 rt = ip_route_output(&init_net, htonl(dst_ip), 0, 0, 0);
1114 fl.nl_u.ip4_u.daddr = htonl(dst_ip); 1113 if (IS_ERR(rt)) {
1115 if (ip_route_output_key(&init_net, &rt, &fl)) {
1116 printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n", 1114 printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n",
1117 __func__, dst_ip); 1115 __func__, dst_ip);
1118 return rc; 1116 return rc;
1119 } 1117 }
1120 1118
1121 if (nesvnic->netdev->master) 1119 if (netif_is_bond_slave(netdev))
1122 netdev = nesvnic->netdev->master; 1120 netdev = nesvnic->netdev->master;
1123 else 1121 else
1124 netdev = nesvnic->netdev; 1122 netdev = nesvnic->netdev;
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
index 049eaf12aaab..1f23e048f077 100644
--- a/drivers/md/dm-log-userspace-transfer.c
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -134,7 +134,7 @@ static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
134{ 134{
135 struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1); 135 struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
136 136
137 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) 137 if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
138 return; 138 return;
139 139
140 spin_lock(&receiving_list_lock); 140 spin_lock(&receiving_list_lock);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 03823327db25..dc280bc8eba2 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -238,8 +238,8 @@ source "drivers/net/arm/Kconfig"
238config AX88796 238config AX88796
239 tristate "ASIX AX88796 NE2000 clone support" 239 tristate "ASIX AX88796 NE2000 clone support"
240 depends on ARM || MIPS || SUPERH 240 depends on ARM || MIPS || SUPERH
241 select CRC32 241 select PHYLIB
242 select MII 242 select MDIO_BITBANG
243 help 243 help
244 AX88796 driver, using platform bus to provide 244 AX88796 driver, using platform bus to provide
245 chip detection and resources 245 chip detection and resources
@@ -1498,7 +1498,7 @@ config FORCEDETH
1498config CS89x0 1498config CS89x0
1499 tristate "CS89x0 support" 1499 tristate "CS89x0 support"
1500 depends on NET_ETHERNET && (ISA || EISA || MACH_IXDP2351 \ 1500 depends on NET_ETHERNET && (ISA || EISA || MACH_IXDP2351 \
1501 || ARCH_IXDP2X01 || MACH_MX31ADS) 1501 || ARCH_IXDP2X01 || MACH_MX31ADS || MACH_QQ2440)
1502 ---help--- 1502 ---help---
1503 Support for CS89x0 chipset based Ethernet cards. If you have a 1503 Support for CS89x0 chipset based Ethernet cards. If you have a
1504 network (Ethernet) card of this type, say Y and read the 1504 network (Ethernet) card of this type, say Y and read the
@@ -1512,7 +1512,7 @@ config CS89x0
1512config CS89x0_NONISA_IRQ 1512config CS89x0_NONISA_IRQ
1513 def_bool y 1513 def_bool y
1514 depends on CS89x0 != n 1514 depends on CS89x0 != n
1515 depends on MACH_IXDP2351 || ARCH_IXDP2X01 || MACH_MX31ADS 1515 depends on MACH_IXDP2351 || ARCH_IXDP2X01 || MACH_MX31ADS || MACH_QQ2440
1516 1516
1517config TC35815 1517config TC35815
1518 tristate "TOSHIBA TC35815 Ethernet support" 1518 tristate "TOSHIBA TC35815 Ethernet support"
@@ -1944,7 +1944,8 @@ config 68360_ENET
1944config FEC 1944config FEC
1945 bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)" 1945 bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
1946 depends on M523x || M527x || M5272 || M528x || M520x || M532x || \ 1946 depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
1947 MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 || SOC_IMX28 1947 IMX_HAVE_PLATFORM_FEC || MXS_HAVE_PLATFORM_FEC
1948 default IMX_HAVE_PLATFORM_FEC || MXS_HAVE_PLATFORM_FEC if ARM
1948 select PHYLIB 1949 select PHYLIB
1949 help 1950 help
1950 Say Y here if you want to use the built-in 10/100 Fast ethernet 1951 Say Y here if you want to use the built-in 10/100 Fast ethernet
@@ -2007,6 +2008,15 @@ config BCM63XX_ENET
2007 This driver supports the ethernet MACs in the Broadcom 63xx 2008 This driver supports the ethernet MACs in the Broadcom 63xx
2008 MIPS chipset family (BCM63XX). 2009 MIPS chipset family (BCM63XX).
2009 2010
2011config FTMAC100
2012 tristate "Faraday FTMAC100 10/100 Ethernet support"
2013 depends on ARM
2014 select MII
2015 help
2016 This driver supports the FTMAC100 10/100 Ethernet controller
2017 from Faraday. It is used on Faraday A320, Andes AG101 and some
2018 other ARM/NDS32 SoC's.
2019
2010source "drivers/net/fs_enet/Kconfig" 2020source "drivers/net/fs_enet/Kconfig"
2011 2021
2012source "drivers/net/octeon/Kconfig" 2022source "drivers/net/octeon/Kconfig"
@@ -2098,7 +2108,9 @@ config E1000
2098 2108
2099config E1000E 2109config E1000E
2100 tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support" 2110 tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support"
2111 select CRC32
2101 depends on PCI && (!SPARC32 || BROKEN) 2112 depends on PCI && (!SPARC32 || BROKEN)
2113 select CRC32
2102 ---help--- 2114 ---help---
2103 This driver supports the PCI-Express Intel(R) PRO/1000 gigabit 2115 This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
2104 ethernet family of adapters. For PCI or PCI-X e1000 adapters, 2116 ethernet family of adapters. For PCI or PCI-X e1000 adapters,
@@ -2235,15 +2247,6 @@ config R8169
2235 To compile this driver as a module, choose M here: the module 2247 To compile this driver as a module, choose M here: the module
2236 will be called r8169. This is recommended. 2248 will be called r8169. This is recommended.
2237 2249
2238config R8169_VLAN
2239 bool "VLAN support"
2240 depends on R8169 && VLAN_8021Q
2241 ---help---
2242 Say Y here for the r8169 driver to support the functions required
2243 by the kernel 802.1Q code.
2244
2245 If in doubt, say Y.
2246
2247config SB1250_MAC 2250config SB1250_MAC
2248 tristate "SB1250 Gigabit Ethernet support" 2251 tristate "SB1250 Gigabit Ethernet support"
2249 depends on SIBYTE_SB1xxx_SOC 2252 depends on SIBYTE_SB1xxx_SOC
@@ -2594,14 +2597,9 @@ config CHELSIO_T1_1G
2594 Enables support for Chelsio's gigabit Ethernet PCI cards. If you 2597 Enables support for Chelsio's gigabit Ethernet PCI cards. If you
2595 are using only 10G cards say 'N' here. 2598 are using only 10G cards say 'N' here.
2596 2599
2597config CHELSIO_T3_DEPENDS
2598 tristate
2599 depends on PCI && INET
2600 default y
2601
2602config CHELSIO_T3 2600config CHELSIO_T3
2603 tristate "Chelsio Communications T3 10Gb Ethernet support" 2601 tristate "Chelsio Communications T3 10Gb Ethernet support"
2604 depends on CHELSIO_T3_DEPENDS 2602 depends on PCI && INET
2605 select FW_LOADER 2603 select FW_LOADER
2606 select MDIO 2604 select MDIO
2607 help 2605 help
@@ -2619,14 +2617,9 @@ config CHELSIO_T3
2619 To compile this driver as a module, choose M here: the module 2617 To compile this driver as a module, choose M here: the module
2620 will be called cxgb3. 2618 will be called cxgb3.
2621 2619
2622config CHELSIO_T4_DEPENDS
2623 tristate
2624 depends on PCI && INET
2625 default y
2626
2627config CHELSIO_T4 2620config CHELSIO_T4
2628 tristate "Chelsio Communications T4 Ethernet support" 2621 tristate "Chelsio Communications T4 Ethernet support"
2629 depends on CHELSIO_T4_DEPENDS 2622 depends on PCI
2630 select FW_LOADER 2623 select FW_LOADER
2631 select MDIO 2624 select MDIO
2632 help 2625 help
@@ -2644,14 +2637,9 @@ config CHELSIO_T4
2644 To compile this driver as a module choose M here; the module 2637 To compile this driver as a module choose M here; the module
2645 will be called cxgb4. 2638 will be called cxgb4.
2646 2639
2647config CHELSIO_T4VF_DEPENDS
2648 tristate
2649 depends on PCI && INET
2650 default y
2651
2652config CHELSIO_T4VF 2640config CHELSIO_T4VF
2653 tristate "Chelsio Communications T4 Virtual Function Ethernet support" 2641 tristate "Chelsio Communications T4 Virtual Function Ethernet support"
2654 depends on CHELSIO_T4VF_DEPENDS 2642 depends on PCI
2655 help 2643 help
2656 This driver supports Chelsio T4-based gigabit and 10Gb Ethernet 2644 This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
2657 adapters with PCI-E SR-IOV Virtual Functions. 2645 adapters with PCI-E SR-IOV Virtual Functions.
@@ -2966,12 +2954,38 @@ config XEN_NETDEV_FRONTEND
2966 select XEN_XENBUS_FRONTEND 2954 select XEN_XENBUS_FRONTEND
2967 default y 2955 default y
2968 help 2956 help
2969 The network device frontend driver allows the kernel to 2957 This driver provides support for Xen paravirtual network
2970 access network devices exported exported by a virtual 2958 devices exported by a Xen network driver domain (often
2971 machine containing a physical network device driver. The 2959 domain 0).
2972 frontend driver is intended for unprivileged guest domains; 2960
2973 if you are compiling a kernel for a Xen guest, you almost 2961 The corresponding Linux backend driver is enabled by the
2974 certainly want to enable this. 2962 CONFIG_XEN_NETDEV_BACKEND option.
2963
2964 If you are compiling a kernel for use as Xen guest, you
2965 should say Y here. To compile this driver as a module, chose
2966 M here: the module will be called xen-netfront.
2967
2968config XEN_NETDEV_BACKEND
2969 tristate "Xen backend network device"
2970 depends on XEN_BACKEND
2971 help
2972 This driver allows the kernel to act as a Xen network driver
2973 domain which exports paravirtual network devices to other
2974 Xen domains. These devices can be accessed by any operating
2975 system that implements a compatible front end.
2976
2977 The corresponding Linux frontend driver is enabled by the
2978 CONFIG_XEN_NETDEV_FRONTEND configuration option.
2979
2980 The backend driver presents a standard network device
2981 endpoint for each paravirtual network device to the driver
2982 domain network stack. These can then be bridged or routed
2983 etc in order to provide full network connectivity.
2984
2985 If you are compiling a kernel to run in a Xen network driver
2986 domain (often this is domain 0) you should say Y here. To
2987 compile this driver as a module, chose M here: the module
2988 will be called xen-netback.
2975 2989
2976config ISERIES_VETH 2990config ISERIES_VETH
2977 tristate "iSeries Virtual Ethernet driver support" 2991 tristate "iSeries Virtual Ethernet driver support"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index b90738d13994..01b604ad155e 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -147,6 +147,7 @@ obj-$(CONFIG_FORCEDETH) += forcedeth.o
147obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o 147obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
148obj-$(CONFIG_AX88796) += ax88796.o 148obj-$(CONFIG_AX88796) += ax88796.o
149obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o 149obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
150obj-$(CONFIG_FTMAC100) += ftmac100.o
150 151
151obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o 152obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
152obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o 153obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
@@ -171,6 +172,7 @@ obj-$(CONFIG_SLIP) += slip.o
171obj-$(CONFIG_SLHC) += slhc.o 172obj-$(CONFIG_SLHC) += slhc.o
172 173
173obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o 174obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
175obj-$(CONFIG_XEN_NETDEV_BACKEND) += xen-netback/
174 176
175obj-$(CONFIG_DUMMY) += dummy.o 177obj-$(CONFIG_DUMMY) += dummy.o
176obj-$(CONFIG_IFB) += ifb.o 178obj-$(CONFIG_IFB) += ifb.o
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
index 1bf672009948..23f2ab0f2fa8 100644
--- a/drivers/net/atl1c/atl1c_hw.c
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -345,7 +345,7 @@ int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data)
345 */ 345 */
346static int atl1c_phy_setup_adv(struct atl1c_hw *hw) 346static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
347{ 347{
348 u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_SPEED_MASK; 348 u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_ALL;
349 u16 mii_giga_ctrl_data = GIGA_CR_1000T_DEFAULT_CAP & 349 u16 mii_giga_ctrl_data = GIGA_CR_1000T_DEFAULT_CAP &
350 ~GIGA_CR_1000T_SPEED_MASK; 350 ~GIGA_CR_1000T_SPEED_MASK;
351 351
@@ -373,7 +373,7 @@ static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
373 } 373 }
374 374
375 if (atl1c_write_phy_reg(hw, MII_ADVERTISE, mii_adv_data) != 0 || 375 if (atl1c_write_phy_reg(hw, MII_ADVERTISE, mii_adv_data) != 0 ||
376 atl1c_write_phy_reg(hw, MII_GIGA_CR, mii_giga_ctrl_data) != 0) 376 atl1c_write_phy_reg(hw, MII_CTRL1000, mii_giga_ctrl_data) != 0)
377 return -1; 377 return -1;
378 return 0; 378 return 0;
379} 379}
@@ -517,19 +517,18 @@ int atl1c_phy_init(struct atl1c_hw *hw)
517 "Error Setting up Auto-Negotiation\n"); 517 "Error Setting up Auto-Negotiation\n");
518 return ret_val; 518 return ret_val;
519 } 519 }
520 mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG; 520 mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART;
521 break; 521 break;
522 case MEDIA_TYPE_100M_FULL: 522 case MEDIA_TYPE_100M_FULL:
523 mii_bmcr_data |= BMCR_SPEED_100 | BMCR_FULL_DUPLEX; 523 mii_bmcr_data |= BMCR_SPEED100 | BMCR_FULLDPLX;
524 break; 524 break;
525 case MEDIA_TYPE_100M_HALF: 525 case MEDIA_TYPE_100M_HALF:
526 mii_bmcr_data |= BMCR_SPEED_100; 526 mii_bmcr_data |= BMCR_SPEED100;
527 break; 527 break;
528 case MEDIA_TYPE_10M_FULL: 528 case MEDIA_TYPE_10M_FULL:
529 mii_bmcr_data |= BMCR_SPEED_10 | BMCR_FULL_DUPLEX; 529 mii_bmcr_data |= BMCR_FULLDPLX;
530 break; 530 break;
531 case MEDIA_TYPE_10M_HALF: 531 case MEDIA_TYPE_10M_HALF:
532 mii_bmcr_data |= BMCR_SPEED_10;
533 break; 532 break;
534 default: 533 default:
535 if (netif_msg_link(adapter)) 534 if (netif_msg_link(adapter))
@@ -657,7 +656,7 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw)
657 err = atl1c_phy_setup_adv(hw); 656 err = atl1c_phy_setup_adv(hw);
658 if (err) 657 if (err)
659 return err; 658 return err;
660 mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG; 659 mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART;
661 660
662 return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data); 661 return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data);
663} 662}
diff --git a/drivers/net/atl1c/atl1c_hw.h b/drivers/net/atl1c/atl1c_hw.h
index 3dd675979aa1..655fc6c4a8a4 100644
--- a/drivers/net/atl1c/atl1c_hw.h
+++ b/drivers/net/atl1c/atl1c_hw.h
@@ -736,55 +736,16 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
736#define REG_DEBUG_DATA0 0x1900 736#define REG_DEBUG_DATA0 0x1900
737#define REG_DEBUG_DATA1 0x1904 737#define REG_DEBUG_DATA1 0x1904
738 738
739/* PHY Control Register */
740#define MII_BMCR 0x00
741#define BMCR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
742#define BMCR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
743#define BMCR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
744#define BMCR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
745#define BMCR_ISOLATE 0x0400 /* Isolate PHY from MII */
746#define BMCR_POWER_DOWN 0x0800 /* Power down */
747#define BMCR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
748#define BMCR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
749#define BMCR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
750#define BMCR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
751#define BMCR_SPEED_MASK 0x2040
752#define BMCR_SPEED_1000 0x0040
753#define BMCR_SPEED_100 0x2000
754#define BMCR_SPEED_10 0x0000
755
756/* PHY Status Register */
757#define MII_BMSR 0x01
758#define BMMSR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
759#define BMSR_JABBER_DETECT 0x0002 /* Jabber Detected */
760#define BMSR_LINK_STATUS 0x0004 /* Link Status 1 = link */
761#define BMSR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
762#define BMSR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
763#define BMSR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
764#define BMSR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
765#define BMSR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
766#define BMSR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
767#define BMSR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
768#define BMSR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
769#define BMSR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
770#define BMSR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
771#define BMMII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
772#define BMMII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
773
774#define MII_PHYSID1 0x02
775#define MII_PHYSID2 0x03
776#define L1D_MPW_PHYID1 0xD01C /* V7 */ 739#define L1D_MPW_PHYID1 0xD01C /* V7 */
777#define L1D_MPW_PHYID2 0xD01D /* V1-V6 */ 740#define L1D_MPW_PHYID2 0xD01D /* V1-V6 */
778#define L1D_MPW_PHYID3 0xD01E /* V8 */ 741#define L1D_MPW_PHYID3 0xD01E /* V8 */
779 742
780 743
781/* Autoneg Advertisement Register */ 744/* Autoneg Advertisement Register */
782#define MII_ADVERTISE 0x04 745#define ADVERTISE_DEFAULT_CAP \
783#define ADVERTISE_SPEED_MASK 0x01E0 746 (ADVERTISE_ALL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)
784#define ADVERTISE_DEFAULT_CAP 0x0DE0
785 747
786/* 1000BASE-T Control Register */ 748/* 1000BASE-T Control Register */
787#define MII_GIGA_CR 0x09
788#define GIGA_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port 0=DTE device */ 749#define GIGA_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port 0=DTE device */
789 750
790#define GIGA_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master 0=Configure PHY as Slave */ 751#define GIGA_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master 0=Configure PHY as Slave */
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 3824382faecc..7d9d5067a65c 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -1102,10 +1102,10 @@ static void atl1c_configure_tx(struct atl1c_adapter *adapter)
1102 AT_READ_REG(hw, REG_DEVICE_CTRL, &dev_ctrl_data); 1102 AT_READ_REG(hw, REG_DEVICE_CTRL, &dev_ctrl_data);
1103 max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT) & 1103 max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT) &
1104 DEVICE_CTRL_MAX_PAYLOAD_MASK; 1104 DEVICE_CTRL_MAX_PAYLOAD_MASK;
1105 hw->dmaw_block = min(max_pay_load, hw->dmaw_block); 1105 hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block);
1106 max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT) & 1106 max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT) &
1107 DEVICE_CTRL_MAX_RREQ_SZ_MASK; 1107 DEVICE_CTRL_MAX_RREQ_SZ_MASK;
1108 hw->dmar_block = min(max_pay_load, hw->dmar_block); 1108 hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
1109 1109
1110 txq_ctrl_data = (hw->tpd_burst & TXQ_NUM_TPD_BURST_MASK) << 1110 txq_ctrl_data = (hw->tpd_burst & TXQ_NUM_TPD_BURST_MASK) <<
1111 TXQ_NUM_TPD_BURST_SHIFT; 1111 TXQ_NUM_TPD_BURST_SHIFT;
@@ -2718,7 +2718,6 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
2718 goto err_reset; 2718 goto err_reset;
2719 } 2719 }
2720 2720
2721 device_init_wakeup(&pdev->dev, 1);
2722 /* reset the controller to 2721 /* reset the controller to
2723 * put the device in a known good starting state */ 2722 * put the device in a known good starting state */
2724 err = atl1c_phy_init(&adapter->hw); 2723 err = atl1c_phy_init(&adapter->hw);
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c
index 6943a6c3b948..1209297433b8 100644
--- a/drivers/net/atl1e/atl1e_ethtool.c
+++ b/drivers/net/atl1e/atl1e_ethtool.c
@@ -95,18 +95,18 @@ static int atl1e_set_settings(struct net_device *netdev,
95 ecmd->advertising = hw->autoneg_advertised | 95 ecmd->advertising = hw->autoneg_advertised |
96 ADVERTISED_TP | ADVERTISED_Autoneg; 96 ADVERTISED_TP | ADVERTISED_Autoneg;
97 97
98 adv4 = hw->mii_autoneg_adv_reg & ~MII_AR_SPEED_MASK; 98 adv4 = hw->mii_autoneg_adv_reg & ~ADVERTISE_ALL;
99 adv9 = hw->mii_1000t_ctrl_reg & ~MII_AT001_CR_1000T_SPEED_MASK; 99 adv9 = hw->mii_1000t_ctrl_reg & ~MII_AT001_CR_1000T_SPEED_MASK;
100 if (hw->autoneg_advertised & ADVERTISE_10_HALF) 100 if (hw->autoneg_advertised & ADVERTISE_10_HALF)
101 adv4 |= MII_AR_10T_HD_CAPS; 101 adv4 |= ADVERTISE_10HALF;
102 if (hw->autoneg_advertised & ADVERTISE_10_FULL) 102 if (hw->autoneg_advertised & ADVERTISE_10_FULL)
103 adv4 |= MII_AR_10T_FD_CAPS; 103 adv4 |= ADVERTISE_10FULL;
104 if (hw->autoneg_advertised & ADVERTISE_100_HALF) 104 if (hw->autoneg_advertised & ADVERTISE_100_HALF)
105 adv4 |= MII_AR_100TX_HD_CAPS; 105 adv4 |= ADVERTISE_100HALF;
106 if (hw->autoneg_advertised & ADVERTISE_100_FULL) 106 if (hw->autoneg_advertised & ADVERTISE_100_FULL)
107 adv4 |= MII_AR_100TX_FD_CAPS; 107 adv4 |= ADVERTISE_100FULL;
108 if (hw->autoneg_advertised & ADVERTISE_1000_FULL) 108 if (hw->autoneg_advertised & ADVERTISE_1000_FULL)
109 adv9 |= MII_AT001_CR_1000T_FD_CAPS; 109 adv9 |= ADVERTISE_1000FULL;
110 110
111 if (adv4 != hw->mii_autoneg_adv_reg || 111 if (adv4 != hw->mii_autoneg_adv_reg ||
112 adv9 != hw->mii_1000t_ctrl_reg) { 112 adv9 != hw->mii_1000t_ctrl_reg) {
diff --git a/drivers/net/atl1e/atl1e_hw.c b/drivers/net/atl1e/atl1e_hw.c
index 76cc043def8c..923063d2e5bb 100644
--- a/drivers/net/atl1e/atl1e_hw.c
+++ b/drivers/net/atl1e/atl1e_hw.c
@@ -318,7 +318,7 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
318 * Advertisement Register (Address 4) and the 1000 mb speed bits in 318 * Advertisement Register (Address 4) and the 1000 mb speed bits in
319 * the 1000Base-T control Register (Address 9). 319 * the 1000Base-T control Register (Address 9).
320 */ 320 */
321 mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK; 321 mii_autoneg_adv_reg &= ~ADVERTISE_ALL;
322 mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK; 322 mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK;
323 323
324 /* 324 /*
@@ -327,44 +327,37 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
327 */ 327 */
328 switch (hw->media_type) { 328 switch (hw->media_type) {
329 case MEDIA_TYPE_AUTO_SENSOR: 329 case MEDIA_TYPE_AUTO_SENSOR:
330 mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS | 330 mii_autoneg_adv_reg |= ADVERTISE_ALL;
331 MII_AR_10T_FD_CAPS | 331 hw->autoneg_advertised = ADVERTISE_ALL;
332 MII_AR_100TX_HD_CAPS |
333 MII_AR_100TX_FD_CAPS);
334 hw->autoneg_advertised = ADVERTISE_10_HALF |
335 ADVERTISE_10_FULL |
336 ADVERTISE_100_HALF |
337 ADVERTISE_100_FULL;
338 if (hw->nic_type == athr_l1e) { 332 if (hw->nic_type == athr_l1e) {
339 mii_1000t_ctrl_reg |= 333 mii_1000t_ctrl_reg |= ADVERTISE_1000FULL;
340 MII_AT001_CR_1000T_FD_CAPS;
341 hw->autoneg_advertised |= ADVERTISE_1000_FULL; 334 hw->autoneg_advertised |= ADVERTISE_1000_FULL;
342 } 335 }
343 break; 336 break;
344 337
345 case MEDIA_TYPE_100M_FULL: 338 case MEDIA_TYPE_100M_FULL:
346 mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS; 339 mii_autoneg_adv_reg |= ADVERTISE_100FULL;
347 hw->autoneg_advertised = ADVERTISE_100_FULL; 340 hw->autoneg_advertised = ADVERTISE_100_FULL;
348 break; 341 break;
349 342
350 case MEDIA_TYPE_100M_HALF: 343 case MEDIA_TYPE_100M_HALF:
351 mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS; 344 mii_autoneg_adv_reg |= ADVERTISE_100_HALF;
352 hw->autoneg_advertised = ADVERTISE_100_HALF; 345 hw->autoneg_advertised = ADVERTISE_100_HALF;
353 break; 346 break;
354 347
355 case MEDIA_TYPE_10M_FULL: 348 case MEDIA_TYPE_10M_FULL:
356 mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS; 349 mii_autoneg_adv_reg |= ADVERTISE_10_FULL;
357 hw->autoneg_advertised = ADVERTISE_10_FULL; 350 hw->autoneg_advertised = ADVERTISE_10_FULL;
358 break; 351 break;
359 352
360 default: 353 default:
361 mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS; 354 mii_autoneg_adv_reg |= ADVERTISE_10_HALF;
362 hw->autoneg_advertised = ADVERTISE_10_HALF; 355 hw->autoneg_advertised = ADVERTISE_10_HALF;
363 break; 356 break;
364 } 357 }
365 358
366 /* flow control fixed to enable all */ 359 /* flow control fixed to enable all */
367 mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE); 360 mii_autoneg_adv_reg |= (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
368 361
369 hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; 362 hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
370 hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg; 363 hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
@@ -374,7 +367,7 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
374 return ret_val; 367 return ret_val;
375 368
376 if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) { 369 if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
377 ret_val = atl1e_write_phy_reg(hw, MII_AT001_CR, 370 ret_val = atl1e_write_phy_reg(hw, MII_CTRL1000,
378 mii_1000t_ctrl_reg); 371 mii_1000t_ctrl_reg);
379 if (ret_val) 372 if (ret_val)
380 return ret_val; 373 return ret_val;
@@ -397,7 +390,7 @@ int atl1e_phy_commit(struct atl1e_hw *hw)
397 int ret_val; 390 int ret_val;
398 u16 phy_data; 391 u16 phy_data;
399 392
400 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; 393 phy_data = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
401 394
402 ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data); 395 ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data);
403 if (ret_val) { 396 if (ret_val) {
@@ -645,15 +638,14 @@ int atl1e_restart_autoneg(struct atl1e_hw *hw)
645 return err; 638 return err;
646 639
647 if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) { 640 if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
648 err = atl1e_write_phy_reg(hw, MII_AT001_CR, 641 err = atl1e_write_phy_reg(hw, MII_CTRL1000,
649 hw->mii_1000t_ctrl_reg); 642 hw->mii_1000t_ctrl_reg);
650 if (err) 643 if (err)
651 return err; 644 return err;
652 } 645 }
653 646
654 err = atl1e_write_phy_reg(hw, MII_BMCR, 647 err = atl1e_write_phy_reg(hw, MII_BMCR,
655 MII_CR_RESET | MII_CR_AUTO_NEG_EN | 648 BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART);
656 MII_CR_RESTART_AUTO_NEG);
657 return err; 649 return err;
658} 650}
659 651
diff --git a/drivers/net/atl1e/atl1e_hw.h b/drivers/net/atl1e/atl1e_hw.h
index 5ea2f4d86cfa..74df16aef793 100644
--- a/drivers/net/atl1e/atl1e_hw.h
+++ b/drivers/net/atl1e/atl1e_hw.h
@@ -629,127 +629,24 @@ s32 atl1e_restart_autoneg(struct atl1e_hw *hw);
629 629
630/***************************** MII definition ***************************************/ 630/***************************** MII definition ***************************************/
631/* PHY Common Register */ 631/* PHY Common Register */
632#define MII_BMCR 0x00
633#define MII_BMSR 0x01
634#define MII_PHYSID1 0x02
635#define MII_PHYSID2 0x03
636#define MII_ADVERTISE 0x04
637#define MII_LPA 0x05
638#define MII_EXPANSION 0x06
639#define MII_AT001_CR 0x09
640#define MII_AT001_SR 0x0A
641#define MII_AT001_ESR 0x0F
642#define MII_AT001_PSCR 0x10 632#define MII_AT001_PSCR 0x10
643#define MII_AT001_PSSR 0x11 633#define MII_AT001_PSSR 0x11
644#define MII_INT_CTRL 0x12 634#define MII_INT_CTRL 0x12
645#define MII_INT_STATUS 0x13 635#define MII_INT_STATUS 0x13
646#define MII_SMARTSPEED 0x14 636#define MII_SMARTSPEED 0x14
647#define MII_RERRCOUNTER 0x15
648#define MII_SREVISION 0x16
649#define MII_RESV1 0x17
650#define MII_LBRERROR 0x18 637#define MII_LBRERROR 0x18
651#define MII_PHYADDR 0x19
652#define MII_RESV2 0x1a 638#define MII_RESV2 0x1a
653#define MII_TPISTATUS 0x1b
654#define MII_NCONFIG 0x1c
655 639
656#define MII_DBG_ADDR 0x1D 640#define MII_DBG_ADDR 0x1D
657#define MII_DBG_DATA 0x1E 641#define MII_DBG_DATA 0x1E
658 642
659
660/* PHY Control Register */
661#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
662#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
663#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
664#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
665#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
666#define MII_CR_POWER_DOWN 0x0800 /* Power down */
667#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
668#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
669#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
670#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
671#define MII_CR_SPEED_MASK 0x2040
672#define MII_CR_SPEED_1000 0x0040
673#define MII_CR_SPEED_100 0x2000
674#define MII_CR_SPEED_10 0x0000
675
676
677/* PHY Status Register */
678#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
679#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
680#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
681#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
682#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
683#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
684#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
685#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
686#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
687#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
688#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
689#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
690#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
691#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
692#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
693
694/* Link partner ability register. */
695#define MII_LPA_SLCT 0x001f /* Same as advertise selector */
696#define MII_LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */
697#define MII_LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */
698#define MII_LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */
699#define MII_LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */
700#define MII_LPA_100BASE4 0x0200 /* 100BASE-T4 */
701#define MII_LPA_PAUSE 0x0400 /* PAUSE */
702#define MII_LPA_ASYPAUSE 0x0800 /* Asymmetrical PAUSE */
703#define MII_LPA_RFAULT 0x2000 /* Link partner faulted */
704#define MII_LPA_LPACK 0x4000 /* Link partner acked us */
705#define MII_LPA_NPAGE 0x8000 /* Next page bit */
706
707/* Autoneg Advertisement Register */ 643/* Autoneg Advertisement Register */
708#define MII_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ 644#define MII_AR_DEFAULT_CAP_MASK 0
709#define MII_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
710#define MII_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
711#define MII_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
712#define MII_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
713#define MII_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
714#define MII_AR_PAUSE 0x0400 /* Pause operation desired */
715#define MII_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
716#define MII_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
717#define MII_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
718#define MII_AR_SPEED_MASK 0x01E0
719#define MII_AR_DEFAULT_CAP_MASK 0x0DE0
720 645
721/* 1000BASE-T Control Register */ 646/* 1000BASE-T Control Register */
722#define MII_AT001_CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ 647#define MII_AT001_CR_1000T_SPEED_MASK \
723#define MII_AT001_CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ 648 (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
724#define MII_AT001_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */ 649#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK MII_AT001_CR_1000T_SPEED_MASK
725/* 0=DTE device */
726#define MII_AT001_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
727/* 0=Configure PHY as Slave */
728#define MII_AT001_CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
729/* 0=Automatic Master/Slave config */
730#define MII_AT001_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
731#define MII_AT001_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
732#define MII_AT001_CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
733#define MII_AT001_CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
734#define MII_AT001_CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
735#define MII_AT001_CR_1000T_SPEED_MASK 0x0300
736#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK 0x0300
737
738/* 1000BASE-T Status Register */
739#define MII_AT001_SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
740#define MII_AT001_SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
741#define MII_AT001_SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
742#define MII_AT001_SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
743#define MII_AT001_SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */
744#define MII_AT001_SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
745#define MII_AT001_SR_1000T_REMOTE_RX_STATUS_SHIFT 12
746#define MII_AT001_SR_1000T_LOCAL_RX_STATUS_SHIFT 13
747
748/* Extended Status Register */
749#define MII_AT001_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
750#define MII_AT001_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
751#define MII_AT001_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
752#define MII_AT001_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
753 650
754/* AT001 PHY Specific Control Register */ 651/* AT001 PHY Specific Control Register */
755#define MII_AT001_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ 652#define MII_AT001_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index e28f8baf394e..1ff001a8270c 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -547,8 +547,8 @@ static int __devinit atl1e_sw_init(struct atl1e_adapter *adapter)
547 hw->device_id = pdev->device; 547 hw->device_id = pdev->device;
548 hw->subsystem_vendor_id = pdev->subsystem_vendor; 548 hw->subsystem_vendor_id = pdev->subsystem_vendor;
549 hw->subsystem_id = pdev->subsystem_device; 549 hw->subsystem_id = pdev->subsystem_device;
550 hw->revision_id = pdev->revision;
550 551
551 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
552 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 552 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
553 553
554 phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS); 554 phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS);
@@ -932,11 +932,11 @@ static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
932 max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT)) & 932 max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT)) &
933 DEVICE_CTRL_MAX_PAYLOAD_MASK; 933 DEVICE_CTRL_MAX_PAYLOAD_MASK;
934 934
935 hw->dmaw_block = min(max_pay_load, hw->dmaw_block); 935 hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block);
936 936
937 max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT)) & 937 max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT)) &
938 DEVICE_CTRL_MAX_RREQ_SZ_MASK; 938 DEVICE_CTRL_MAX_RREQ_SZ_MASK;
939 hw->dmar_block = min(max_pay_load, hw->dmar_block); 939 hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
940 940
941 if (hw->nic_type != athr_l2e_revB) 941 if (hw->nic_type != athr_l2e_revB)
942 AT_WRITE_REGW(hw, REG_TXQ_CTRL + 2, 942 AT_WRITE_REGW(hw, REG_TXQ_CTRL + 2,
@@ -2051,9 +2051,9 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
2051 atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); 2051 atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
2052 atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); 2052 atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
2053 2053
2054 mii_advertise_data = MII_AR_10T_HD_CAPS; 2054 mii_advertise_data = ADVERTISE_10HALF;
2055 2055
2056 if ((atl1e_write_phy_reg(hw, MII_AT001_CR, 0) != 0) || 2056 if ((atl1e_write_phy_reg(hw, MII_CTRL1000, 0) != 0) ||
2057 (atl1e_write_phy_reg(hw, 2057 (atl1e_write_phy_reg(hw,
2058 MII_ADVERTISE, mii_advertise_data) != 0) || 2058 MII_ADVERTISE, mii_advertise_data) != 0) ||
2059 (atl1e_phy_commit(hw)) != 0) { 2059 (atl1e_phy_commit(hw)) != 0) {
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 3b527687c28f..67f40b9c16ed 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -950,6 +950,7 @@ static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
950 hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 950 hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
951 951
952 adapter->wol = 0; 952 adapter->wol = 0;
953 device_set_wakeup_enable(&adapter->pdev->dev, false);
953 adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7; 954 adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
954 adapter->ict = 50000; /* 100ms */ 955 adapter->ict = 50000; /* 100ms */
955 adapter->link_speed = SPEED_0; /* hardware init */ 956 adapter->link_speed = SPEED_0; /* hardware init */
@@ -2735,15 +2736,15 @@ static int atl1_close(struct net_device *netdev)
2735} 2736}
2736 2737
2737#ifdef CONFIG_PM 2738#ifdef CONFIG_PM
2738static int atl1_suspend(struct pci_dev *pdev, pm_message_t state) 2739static int atl1_suspend(struct device *dev)
2739{ 2740{
2741 struct pci_dev *pdev = to_pci_dev(dev);
2740 struct net_device *netdev = pci_get_drvdata(pdev); 2742 struct net_device *netdev = pci_get_drvdata(pdev);
2741 struct atl1_adapter *adapter = netdev_priv(netdev); 2743 struct atl1_adapter *adapter = netdev_priv(netdev);
2742 struct atl1_hw *hw = &adapter->hw; 2744 struct atl1_hw *hw = &adapter->hw;
2743 u32 ctrl = 0; 2745 u32 ctrl = 0;
2744 u32 wufc = adapter->wol; 2746 u32 wufc = adapter->wol;
2745 u32 val; 2747 u32 val;
2746 int retval;
2747 u16 speed; 2748 u16 speed;
2748 u16 duplex; 2749 u16 duplex;
2749 2750
@@ -2751,17 +2752,15 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
2751 if (netif_running(netdev)) 2752 if (netif_running(netdev))
2752 atl1_down(adapter); 2753 atl1_down(adapter);
2753 2754
2754 retval = pci_save_state(pdev);
2755 if (retval)
2756 return retval;
2757
2758 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); 2755 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
2759 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); 2756 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
2760 val = ctrl & BMSR_LSTATUS; 2757 val = ctrl & BMSR_LSTATUS;
2761 if (val) 2758 if (val)
2762 wufc &= ~ATLX_WUFC_LNKC; 2759 wufc &= ~ATLX_WUFC_LNKC;
2760 if (!wufc)
2761 goto disable_wol;
2763 2762
2764 if (val && wufc) { 2763 if (val) {
2765 val = atl1_get_speed_and_duplex(hw, &speed, &duplex); 2764 val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
2766 if (val) { 2765 if (val) {
2767 if (netif_msg_ifdown(adapter)) 2766 if (netif_msg_ifdown(adapter))
@@ -2798,23 +2797,18 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
2798 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; 2797 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
2799 iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); 2798 iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
2800 ioread32(hw->hw_addr + REG_PCIE_PHYMISC); 2799 ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
2801 2800 } else {
2802 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
2803 goto exit;
2804 }
2805
2806 if (!val && wufc) {
2807 ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); 2801 ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
2808 iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); 2802 iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
2809 ioread32(hw->hw_addr + REG_WOL_CTRL); 2803 ioread32(hw->hw_addr + REG_WOL_CTRL);
2810 iowrite32(0, hw->hw_addr + REG_MAC_CTRL); 2804 iowrite32(0, hw->hw_addr + REG_MAC_CTRL);
2811 ioread32(hw->hw_addr + REG_MAC_CTRL); 2805 ioread32(hw->hw_addr + REG_MAC_CTRL);
2812 hw->phy_configured = false; 2806 hw->phy_configured = false;
2813 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
2814 goto exit;
2815 } 2807 }
2816 2808
2817disable_wol: 2809 return 0;
2810
2811 disable_wol:
2818 iowrite32(0, hw->hw_addr + REG_WOL_CTRL); 2812 iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
2819 ioread32(hw->hw_addr + REG_WOL_CTRL); 2813 ioread32(hw->hw_addr + REG_WOL_CTRL);
2820 ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC); 2814 ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
@@ -2822,37 +2816,17 @@ disable_wol:
2822 iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); 2816 iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
2823 ioread32(hw->hw_addr + REG_PCIE_PHYMISC); 2817 ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
2824 hw->phy_configured = false; 2818 hw->phy_configured = false;
2825 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
2826exit:
2827 if (netif_running(netdev))
2828 pci_disable_msi(adapter->pdev);
2829 pci_disable_device(pdev);
2830 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2831 2819
2832 return 0; 2820 return 0;
2833} 2821}
2834 2822
2835static int atl1_resume(struct pci_dev *pdev) 2823static int atl1_resume(struct device *dev)
2836{ 2824{
2825 struct pci_dev *pdev = to_pci_dev(dev);
2837 struct net_device *netdev = pci_get_drvdata(pdev); 2826 struct net_device *netdev = pci_get_drvdata(pdev);
2838 struct atl1_adapter *adapter = netdev_priv(netdev); 2827 struct atl1_adapter *adapter = netdev_priv(netdev);
2839 u32 err;
2840 2828
2841 pci_set_power_state(pdev, PCI_D0);
2842 pci_restore_state(pdev);
2843
2844 err = pci_enable_device(pdev);
2845 if (err) {
2846 if (netif_msg_ifup(adapter))
2847 dev_printk(KERN_DEBUG, &pdev->dev,
2848 "error enabling pci device\n");
2849 return err;
2850 }
2851
2852 pci_set_master(pdev);
2853 iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); 2829 iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
2854 pci_enable_wake(pdev, PCI_D3hot, 0);
2855 pci_enable_wake(pdev, PCI_D3cold, 0);
2856 2830
2857 atl1_reset_hw(&adapter->hw); 2831 atl1_reset_hw(&adapter->hw);
2858 2832
@@ -2864,16 +2838,25 @@ static int atl1_resume(struct pci_dev *pdev)
2864 2838
2865 return 0; 2839 return 0;
2866} 2840}
2841
2842static SIMPLE_DEV_PM_OPS(atl1_pm_ops, atl1_suspend, atl1_resume);
2843#define ATL1_PM_OPS (&atl1_pm_ops)
2844
2867#else 2845#else
2868#define atl1_suspend NULL 2846
2869#define atl1_resume NULL 2847static int atl1_suspend(struct device *dev) { return 0; }
2848
2849#define ATL1_PM_OPS NULL
2870#endif 2850#endif
2871 2851
2872static void atl1_shutdown(struct pci_dev *pdev) 2852static void atl1_shutdown(struct pci_dev *pdev)
2873{ 2853{
2874#ifdef CONFIG_PM 2854 struct net_device *netdev = pci_get_drvdata(pdev);
2875 atl1_suspend(pdev, PMSG_SUSPEND); 2855 struct atl1_adapter *adapter = netdev_priv(netdev);
2876#endif 2856
2857 atl1_suspend(&pdev->dev);
2858 pci_wake_from_d3(pdev, adapter->wol);
2859 pci_set_power_state(pdev, PCI_D3hot);
2877} 2860}
2878 2861
2879#ifdef CONFIG_NET_POLL_CONTROLLER 2862#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3117,9 +3100,8 @@ static struct pci_driver atl1_driver = {
3117 .id_table = atl1_pci_tbl, 3100 .id_table = atl1_pci_tbl,
3118 .probe = atl1_probe, 3101 .probe = atl1_probe,
3119 .remove = __devexit_p(atl1_remove), 3102 .remove = __devexit_p(atl1_remove),
3120 .suspend = atl1_suspend, 3103 .shutdown = atl1_shutdown,
3121 .resume = atl1_resume, 3104 .driver.pm = ATL1_PM_OPS,
3122 .shutdown = atl1_shutdown
3123}; 3105};
3124 3106
3125/* 3107/*
@@ -3409,6 +3391,9 @@ static int atl1_set_wol(struct net_device *netdev,
3409 adapter->wol = 0; 3391 adapter->wol = 0;
3410 if (wol->wolopts & WAKE_MAGIC) 3392 if (wol->wolopts & WAKE_MAGIC)
3411 adapter->wol |= ATLX_WUFC_MAG; 3393 adapter->wol |= ATLX_WUFC_MAG;
3394
3395 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
3396
3412 return 0; 3397 return 0;
3413} 3398}
3414 3399
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index 4e6f4e95a5a0..e637e9f28fd4 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -93,8 +93,8 @@ static int __devinit atl2_sw_init(struct atl2_adapter *adapter)
93 hw->device_id = pdev->device; 93 hw->device_id = pdev->device;
94 hw->subsystem_vendor_id = pdev->subsystem_vendor; 94 hw->subsystem_vendor_id = pdev->subsystem_vendor;
95 hw->subsystem_id = pdev->subsystem_device; 95 hw->subsystem_id = pdev->subsystem_device;
96 hw->revision_id = pdev->revision;
96 97
97 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
98 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 98 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
99 99
100 adapter->wol = 0; 100 adapter->wol = 0;
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index 4bebff3faeab..e7cb8c8b9776 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -9,7 +9,7 @@
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12*/ 12 */
13 13
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
@@ -17,46 +17,45 @@
17#include <linux/isapnp.h> 17#include <linux/isapnp.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/io.h>
20#include <linux/platform_device.h> 21#include <linux/platform_device.h>
21#include <linux/delay.h> 22#include <linux/delay.h>
22#include <linux/timer.h> 23#include <linux/timer.h>
23#include <linux/netdevice.h> 24#include <linux/netdevice.h>
24#include <linux/etherdevice.h> 25#include <linux/etherdevice.h>
25#include <linux/ethtool.h> 26#include <linux/ethtool.h>
26#include <linux/mii.h> 27#include <linux/mdio-bitbang.h>
28#include <linux/phy.h>
27#include <linux/eeprom_93cx6.h> 29#include <linux/eeprom_93cx6.h>
28#include <linux/slab.h> 30#include <linux/slab.h>
29 31
30#include <net/ax88796.h> 32#include <net/ax88796.h>
31 33
32#include <asm/system.h> 34#include <asm/system.h>
33#include <asm/io.h>
34
35static int phy_debug = 0;
36 35
37/* Rename the lib8390.c functions to show that they are in this driver */ 36/* Rename the lib8390.c functions to show that they are in this driver */
38#define __ei_open ax_ei_open 37#define __ei_open ax_ei_open
39#define __ei_close ax_ei_close 38#define __ei_close ax_ei_close
40#define __ei_poll ax_ei_poll 39#define __ei_poll ax_ei_poll
41#define __ei_start_xmit ax_ei_start_xmit 40#define __ei_start_xmit ax_ei_start_xmit
42#define __ei_tx_timeout ax_ei_tx_timeout 41#define __ei_tx_timeout ax_ei_tx_timeout
43#define __ei_get_stats ax_ei_get_stats 42#define __ei_get_stats ax_ei_get_stats
44#define __ei_set_multicast_list ax_ei_set_multicast_list 43#define __ei_set_multicast_list ax_ei_set_multicast_list
45#define __ei_interrupt ax_ei_interrupt 44#define __ei_interrupt ax_ei_interrupt
46#define ____alloc_ei_netdev ax__alloc_ei_netdev 45#define ____alloc_ei_netdev ax__alloc_ei_netdev
47#define __NS8390_init ax_NS8390_init 46#define __NS8390_init ax_NS8390_init
48 47
49/* force unsigned long back to 'void __iomem *' */ 48/* force unsigned long back to 'void __iomem *' */
50#define ax_convert_addr(_a) ((void __force __iomem *)(_a)) 49#define ax_convert_addr(_a) ((void __force __iomem *)(_a))
51 50
52#define ei_inb(_a) readb(ax_convert_addr(_a)) 51#define ei_inb(_a) readb(ax_convert_addr(_a))
53#define ei_outb(_v, _a) writeb(_v, ax_convert_addr(_a)) 52#define ei_outb(_v, _a) writeb(_v, ax_convert_addr(_a))
54 53
55#define ei_inb_p(_a) ei_inb(_a) 54#define ei_inb_p(_a) ei_inb(_a)
56#define ei_outb_p(_v, _a) ei_outb(_v, _a) 55#define ei_outb_p(_v, _a) ei_outb(_v, _a)
57 56
58/* define EI_SHIFT() to take into account our register offsets */ 57/* define EI_SHIFT() to take into account our register offsets */
59#define EI_SHIFT(x) (ei_local->reg_offset[(x)]) 58#define EI_SHIFT(x) (ei_local->reg_offset[(x)])
60 59
61/* Ensure we have our RCR base value */ 60/* Ensure we have our RCR base value */
62#define AX88796_PLATFORM 61#define AX88796_PLATFORM
@@ -74,43 +73,46 @@ static unsigned char version[] = "ax88796.c: Copyright 2005,2007 Simtec Electron
74#define NE_DATAPORT EI_SHIFT(0x10) 73#define NE_DATAPORT EI_SHIFT(0x10)
75 74
76#define NE1SM_START_PG 0x20 /* First page of TX buffer */ 75#define NE1SM_START_PG 0x20 /* First page of TX buffer */
77#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */ 76#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */
78#define NESM_START_PG 0x40 /* First page of TX buffer */ 77#define NESM_START_PG 0x40 /* First page of TX buffer */
79#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ 78#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
80 79
80#define AX_GPOC_PPDSET BIT(6)
81
81/* device private data */ 82/* device private data */
82 83
83struct ax_device { 84struct ax_device {
84 struct timer_list mii_timer; 85 struct mii_bus *mii_bus;
85 spinlock_t mii_lock; 86 struct mdiobb_ctrl bb_ctrl;
86 struct mii_if_info mii; 87 struct phy_device *phy_dev;
87 88 void __iomem *addr_memr;
88 u32 msg_enable; 89 u8 reg_memr;
89 void __iomem *map2; 90 int link;
90 struct platform_device *dev; 91 int speed;
91 struct resource *mem; 92 int duplex;
92 struct resource *mem2; 93
93 struct ax_plat_data *plat; 94 void __iomem *map2;
94 95 const struct ax_plat_data *plat;
95 unsigned char running; 96
96 unsigned char resume_open; 97 unsigned char running;
97 unsigned int irqflags; 98 unsigned char resume_open;
98 99 unsigned int irqflags;
99 u32 reg_offsets[0x20]; 100
101 u32 reg_offsets[0x20];
100}; 102};
101 103
102static inline struct ax_device *to_ax_dev(struct net_device *dev) 104static inline struct ax_device *to_ax_dev(struct net_device *dev)
103{ 105{
104 struct ei_device *ei_local = netdev_priv(dev); 106 struct ei_device *ei_local = netdev_priv(dev);
105 return (struct ax_device *)(ei_local+1); 107 return (struct ax_device *)(ei_local + 1);
106} 108}
107 109
108/* ax_initial_check 110/*
111 * ax_initial_check
109 * 112 *
110 * do an initial probe for the card to check wether it exists 113 * do an initial probe for the card to check wether it exists
111 * and is functional 114 * and is functional
112 */ 115 */
113
114static int ax_initial_check(struct net_device *dev) 116static int ax_initial_check(struct net_device *dev)
115{ 117{
116 struct ei_device *ei_local = netdev_priv(dev); 118 struct ei_device *ei_local = netdev_priv(dev);
@@ -122,10 +124,10 @@ static int ax_initial_check(struct net_device *dev)
122 if (reg0 == 0xFF) 124 if (reg0 == 0xFF)
123 return -ENODEV; 125 return -ENODEV;
124 126
125 ei_outb(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD); 127 ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ioaddr + E8390_CMD);
126 regd = ei_inb(ioaddr + 0x0d); 128 regd = ei_inb(ioaddr + 0x0d);
127 ei_outb(0xff, ioaddr + 0x0d); 129 ei_outb(0xff, ioaddr + 0x0d);
128 ei_outb(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD); 130 ei_outb(E8390_NODMA + E8390_PAGE0, ioaddr + E8390_CMD);
129 ei_inb(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */ 131 ei_inb(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
130 if (ei_inb(ioaddr + EN0_COUNTER0) != 0) { 132 if (ei_inb(ioaddr + EN0_COUNTER0) != 0) {
131 ei_outb(reg0, ioaddr); 133 ei_outb(reg0, ioaddr);
@@ -136,29 +138,28 @@ static int ax_initial_check(struct net_device *dev)
136 return 0; 138 return 0;
137} 139}
138 140
139/* Hard reset the card. This used to pause for the same period that a 141/*
140 8390 reset command required, but that shouldn't be necessary. */ 142 * Hard reset the card. This used to pause for the same period that a
141 143 * 8390 reset command required, but that shouldn't be necessary.
144 */
142static void ax_reset_8390(struct net_device *dev) 145static void ax_reset_8390(struct net_device *dev)
143{ 146{
144 struct ei_device *ei_local = netdev_priv(dev); 147 struct ei_device *ei_local = netdev_priv(dev);
145 struct ax_device *ax = to_ax_dev(dev);
146 unsigned long reset_start_time = jiffies; 148 unsigned long reset_start_time = jiffies;
147 void __iomem *addr = (void __iomem *)dev->base_addr; 149 void __iomem *addr = (void __iomem *)dev->base_addr;
148 150
149 if (ei_debug > 1) 151 if (ei_debug > 1)
150 dev_dbg(&ax->dev->dev, "resetting the 8390 t=%ld\n", jiffies); 152 netdev_dbg(dev, "resetting the 8390 t=%ld\n", jiffies);
151 153
152 ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET); 154 ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET);
153 155
154 ei_status.txing = 0; 156 ei_local->txing = 0;
155 ei_status.dmaing = 0; 157 ei_local->dmaing = 0;
156 158
157 /* This check _should_not_ be necessary, omit eventually. */ 159 /* This check _should_not_ be necessary, omit eventually. */
158 while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) { 160 while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) {
159 if (jiffies - reset_start_time > 2*HZ/100) { 161 if (jiffies - reset_start_time > 2 * HZ / 100) {
160 dev_warn(&ax->dev->dev, "%s: %s did not complete.\n", 162 netdev_warn(dev, "%s: did not complete.\n", __func__);
161 __func__, dev->name);
162 break; 163 break;
163 } 164 }
164 } 165 }
@@ -171,70 +172,72 @@ static void ax_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
171 int ring_page) 172 int ring_page)
172{ 173{
173 struct ei_device *ei_local = netdev_priv(dev); 174 struct ei_device *ei_local = netdev_priv(dev);
174 struct ax_device *ax = to_ax_dev(dev);
175 void __iomem *nic_base = ei_local->mem; 175 void __iomem *nic_base = ei_local->mem;
176 176
177 /* This *shouldn't* happen. If it does, it's the last thing you'll see */ 177 /* This *shouldn't* happen. If it does, it's the last thing you'll see */
178 if (ei_status.dmaing) { 178 if (ei_local->dmaing) {
179 dev_err(&ax->dev->dev, "%s: DMAing conflict in %s " 179 netdev_err(dev, "DMAing conflict in %s "
180 "[DMAstat:%d][irqlock:%d].\n", 180 "[DMAstat:%d][irqlock:%d].\n",
181 dev->name, __func__, 181 __func__,
182 ei_status.dmaing, ei_status.irqlock); 182 ei_local->dmaing, ei_local->irqlock);
183 return; 183 return;
184 } 184 }
185 185
186 ei_status.dmaing |= 0x01; 186 ei_local->dmaing |= 0x01;
187 ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); 187 ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD);
188 ei_outb(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO); 188 ei_outb(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
189 ei_outb(0, nic_base + EN0_RCNTHI); 189 ei_outb(0, nic_base + EN0_RCNTHI);
190 ei_outb(0, nic_base + EN0_RSARLO); /* On page boundary */ 190 ei_outb(0, nic_base + EN0_RSARLO); /* On page boundary */
191 ei_outb(ring_page, nic_base + EN0_RSARHI); 191 ei_outb(ring_page, nic_base + EN0_RSARHI);
192 ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); 192 ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
193 193
194 if (ei_status.word16) 194 if (ei_local->word16)
195 readsw(nic_base + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1); 195 readsw(nic_base + NE_DATAPORT, hdr,
196 sizeof(struct e8390_pkt_hdr) >> 1);
196 else 197 else
197 readsb(nic_base + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)); 198 readsb(nic_base + NE_DATAPORT, hdr,
199 sizeof(struct e8390_pkt_hdr));
198 200
199 ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ 201 ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
200 ei_status.dmaing &= ~0x01; 202 ei_local->dmaing &= ~0x01;
201 203
202 le16_to_cpus(&hdr->count); 204 le16_to_cpus(&hdr->count);
203} 205}
204 206
205 207
206/* Block input and output, similar to the Crynwr packet driver. If you 208/*
207 are porting to a new ethercard, look at the packet driver source for hints. 209 * Block input and output, similar to the Crynwr packet driver. If
208 The NEx000 doesn't share the on-board packet memory -- you have to put 210 * you are porting to a new ethercard, look at the packet driver
209 the packet out through the "remote DMA" dataport using ei_outb. */ 211 * source for hints. The NEx000 doesn't share the on-board packet
210 212 * memory -- you have to put the packet out through the "remote DMA"
213 * dataport using ei_outb.
214 */
211static void ax_block_input(struct net_device *dev, int count, 215static void ax_block_input(struct net_device *dev, int count,
212 struct sk_buff *skb, int ring_offset) 216 struct sk_buff *skb, int ring_offset)
213{ 217{
214 struct ei_device *ei_local = netdev_priv(dev); 218 struct ei_device *ei_local = netdev_priv(dev);
215 struct ax_device *ax = to_ax_dev(dev);
216 void __iomem *nic_base = ei_local->mem; 219 void __iomem *nic_base = ei_local->mem;
217 char *buf = skb->data; 220 char *buf = skb->data;
218 221
219 if (ei_status.dmaing) { 222 if (ei_local->dmaing) {
220 dev_err(&ax->dev->dev, 223 netdev_err(dev,
221 "%s: DMAing conflict in %s " 224 "DMAing conflict in %s "
222 "[DMAstat:%d][irqlock:%d].\n", 225 "[DMAstat:%d][irqlock:%d].\n",
223 dev->name, __func__, 226 __func__,
224 ei_status.dmaing, ei_status.irqlock); 227 ei_local->dmaing, ei_local->irqlock);
225 return; 228 return;
226 } 229 }
227 230
228 ei_status.dmaing |= 0x01; 231 ei_local->dmaing |= 0x01;
229 232
230 ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); 233 ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + NE_CMD);
231 ei_outb(count & 0xff, nic_base + EN0_RCNTLO); 234 ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
232 ei_outb(count >> 8, nic_base + EN0_RCNTHI); 235 ei_outb(count >> 8, nic_base + EN0_RCNTHI);
233 ei_outb(ring_offset & 0xff, nic_base + EN0_RSARLO); 236 ei_outb(ring_offset & 0xff, nic_base + EN0_RSARLO);
234 ei_outb(ring_offset >> 8, nic_base + EN0_RSARHI); 237 ei_outb(ring_offset >> 8, nic_base + EN0_RSARHI);
235 ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); 238 ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
236 239
237 if (ei_status.word16) { 240 if (ei_local->word16) {
238 readsw(nic_base + NE_DATAPORT, buf, count >> 1); 241 readsw(nic_base + NE_DATAPORT, buf, count >> 1);
239 if (count & 0x01) 242 if (count & 0x01)
240 buf[count-1] = ei_inb(nic_base + NE_DATAPORT); 243 buf[count-1] = ei_inb(nic_base + NE_DATAPORT);
@@ -243,34 +246,34 @@ static void ax_block_input(struct net_device *dev, int count,
243 readsb(nic_base + NE_DATAPORT, buf, count); 246 readsb(nic_base + NE_DATAPORT, buf, count);
244 } 247 }
245 248
246 ei_status.dmaing &= ~1; 249 ei_local->dmaing &= ~1;
247} 250}
248 251
249static void ax_block_output(struct net_device *dev, int count, 252static void ax_block_output(struct net_device *dev, int count,
250 const unsigned char *buf, const int start_page) 253 const unsigned char *buf, const int start_page)
251{ 254{
252 struct ei_device *ei_local = netdev_priv(dev); 255 struct ei_device *ei_local = netdev_priv(dev);
253 struct ax_device *ax = to_ax_dev(dev);
254 void __iomem *nic_base = ei_local->mem; 256 void __iomem *nic_base = ei_local->mem;
255 unsigned long dma_start; 257 unsigned long dma_start;
256 258
257 /* Round the count up for word writes. Do we need to do this? 259 /*
258 What effect will an odd byte count have on the 8390? 260 * Round the count up for word writes. Do we need to do this?
259 I should check someday. */ 261 * What effect will an odd byte count have on the 8390? I
260 262 * should check someday.
261 if (ei_status.word16 && (count & 0x01)) 263 */
264 if (ei_local->word16 && (count & 0x01))
262 count++; 265 count++;
263 266
264 /* This *shouldn't* happen. If it does, it's the last thing you'll see */ 267 /* This *shouldn't* happen. If it does, it's the last thing you'll see */
265 if (ei_status.dmaing) { 268 if (ei_local->dmaing) {
266 dev_err(&ax->dev->dev, "%s: DMAing conflict in %s." 269 netdev_err(dev, "DMAing conflict in %s."
267 "[DMAstat:%d][irqlock:%d]\n", 270 "[DMAstat:%d][irqlock:%d]\n",
268 dev->name, __func__, 271 __func__,
269 ei_status.dmaing, ei_status.irqlock); 272 ei_local->dmaing, ei_local->irqlock);
270 return; 273 return;
271 } 274 }
272 275
273 ei_status.dmaing |= 0x01; 276 ei_local->dmaing |= 0x01;
274 /* We should already be in page 0, but to be safe... */ 277 /* We should already be in page 0, but to be safe... */
275 ei_outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD); 278 ei_outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
276 279
@@ -278,250 +281,170 @@ static void ax_block_output(struct net_device *dev, int count,
278 281
279 /* Now the normal output. */ 282 /* Now the normal output. */
280 ei_outb(count & 0xff, nic_base + EN0_RCNTLO); 283 ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
281 ei_outb(count >> 8, nic_base + EN0_RCNTHI); 284 ei_outb(count >> 8, nic_base + EN0_RCNTHI);
282 ei_outb(0x00, nic_base + EN0_RSARLO); 285 ei_outb(0x00, nic_base + EN0_RSARLO);
283 ei_outb(start_page, nic_base + EN0_RSARHI); 286 ei_outb(start_page, nic_base + EN0_RSARHI);
284 287
285 ei_outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD); 288 ei_outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
286 if (ei_status.word16) { 289 if (ei_local->word16)
287 writesw(nic_base + NE_DATAPORT, buf, count>>1); 290 writesw(nic_base + NE_DATAPORT, buf, count >> 1);
288 } else { 291 else
289 writesb(nic_base + NE_DATAPORT, buf, count); 292 writesb(nic_base + NE_DATAPORT, buf, count);
290 }
291 293
292 dma_start = jiffies; 294 dma_start = jiffies;
293 295
294 while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) { 296 while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) {
295 if (jiffies - dma_start > 2*HZ/100) { /* 20ms */ 297 if (jiffies - dma_start > 2 * HZ / 100) { /* 20ms */
296 dev_warn(&ax->dev->dev, 298 netdev_warn(dev, "timeout waiting for Tx RDC.\n");
297 "%s: timeout waiting for Tx RDC.\n", dev->name);
298 ax_reset_8390(dev); 299 ax_reset_8390(dev);
299 ax_NS8390_init(dev,1); 300 ax_NS8390_init(dev, 1);
300 break; 301 break;
301 } 302 }
302 } 303 }
303 304
304 ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ 305 ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
305 ei_status.dmaing &= ~0x01; 306 ei_local->dmaing &= ~0x01;
306} 307}
307 308
308/* definitions for accessing MII/EEPROM interface */ 309/* definitions for accessing MII/EEPROM interface */
309 310
310#define AX_MEMR EI_SHIFT(0x14) 311#define AX_MEMR EI_SHIFT(0x14)
311#define AX_MEMR_MDC (1<<0) 312#define AX_MEMR_MDC BIT(0)
312#define AX_MEMR_MDIR (1<<1) 313#define AX_MEMR_MDIR BIT(1)
313#define AX_MEMR_MDI (1<<2) 314#define AX_MEMR_MDI BIT(2)
314#define AX_MEMR_MDO (1<<3) 315#define AX_MEMR_MDO BIT(3)
315#define AX_MEMR_EECS (1<<4) 316#define AX_MEMR_EECS BIT(4)
316#define AX_MEMR_EEI (1<<5) 317#define AX_MEMR_EEI BIT(5)
317#define AX_MEMR_EEO (1<<6) 318#define AX_MEMR_EEO BIT(6)
318#define AX_MEMR_EECLK (1<<7) 319#define AX_MEMR_EECLK BIT(7)
319 320
320/* ax_mii_ei_outbits 321static void ax_handle_link_change(struct net_device *dev)
321 *
322 * write the specified set of bits to the phy
323*/
324
325static void
326ax_mii_ei_outbits(struct net_device *dev, unsigned int bits, int len)
327{ 322{
328 struct ei_device *ei_local = netdev_priv(dev); 323 struct ax_device *ax = to_ax_dev(dev);
329 void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR; 324 struct phy_device *phy_dev = ax->phy_dev;
330 unsigned int memr; 325 int status_change = 0;
331
332 /* clock low, data to output mode */
333 memr = ei_inb(memr_addr);
334 memr &= ~(AX_MEMR_MDC | AX_MEMR_MDIR);
335 ei_outb(memr, memr_addr);
336
337 for (len--; len >= 0; len--) {
338 if (bits & (1 << len))
339 memr |= AX_MEMR_MDO;
340 else
341 memr &= ~AX_MEMR_MDO;
342
343 ei_outb(memr, memr_addr);
344
345 /* clock high */
346 326
347 ei_outb(memr | AX_MEMR_MDC, memr_addr); 327 if (phy_dev->link && ((ax->speed != phy_dev->speed) ||
348 udelay(1); 328 (ax->duplex != phy_dev->duplex))) {
349 329
350 /* clock low */ 330 ax->speed = phy_dev->speed;
351 ei_outb(memr, memr_addr); 331 ax->duplex = phy_dev->duplex;
332 status_change = 1;
352 } 333 }
353 334
354 /* leaves the clock line low, mdir input */ 335 if (phy_dev->link != ax->link) {
355 memr |= AX_MEMR_MDIR; 336 if (!phy_dev->link) {
356 ei_outb(memr, (void __iomem *)dev->base_addr + AX_MEMR); 337 ax->speed = 0;
357} 338 ax->duplex = -1;
358 339 }
359/* ax_phy_ei_inbits 340 ax->link = phy_dev->link;
360 *
361 * read a specified number of bits from the phy
362*/
363
364static unsigned int
365ax_phy_ei_inbits(struct net_device *dev, int no)
366{
367 struct ei_device *ei_local = netdev_priv(dev);
368 void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR;
369 unsigned int memr;
370 unsigned int result = 0;
371
372 /* clock low, data to input mode */
373 memr = ei_inb(memr_addr);
374 memr &= ~AX_MEMR_MDC;
375 memr |= AX_MEMR_MDIR;
376 ei_outb(memr, memr_addr);
377
378 for (no--; no >= 0; no--) {
379 ei_outb(memr | AX_MEMR_MDC, memr_addr);
380
381 udelay(1);
382
383 if (ei_inb(memr_addr) & AX_MEMR_MDI)
384 result |= (1<<no);
385 341
386 ei_outb(memr, memr_addr); 342 status_change = 1;
387 } 343 }
388 344
389 return result; 345 if (status_change)
390} 346 phy_print_status(phy_dev);
391
392/* ax_phy_issueaddr
393 *
394 * use the low level bit shifting routines to send the address
395 * and command to the specified phy
396*/
397
398static void
399ax_phy_issueaddr(struct net_device *dev, int phy_addr, int reg, int opc)
400{
401 if (phy_debug)
402 pr_debug("%s: dev %p, %04x, %04x, %d\n",
403 __func__, dev, phy_addr, reg, opc);
404
405 ax_mii_ei_outbits(dev, 0x3f, 6); /* pre-amble */
406 ax_mii_ei_outbits(dev, 1, 2); /* frame-start */
407 ax_mii_ei_outbits(dev, opc, 2); /* op code */
408 ax_mii_ei_outbits(dev, phy_addr, 5); /* phy address */
409 ax_mii_ei_outbits(dev, reg, 5); /* reg address */
410} 347}
411 348
412static int 349static int ax_mii_probe(struct net_device *dev)
413ax_phy_read(struct net_device *dev, int phy_addr, int reg)
414{ 350{
415 struct ei_device *ei_local = netdev_priv(dev); 351 struct ax_device *ax = to_ax_dev(dev);
416 unsigned long flags; 352 struct phy_device *phy_dev = NULL;
417 unsigned int result; 353 int ret;
418 354
419 spin_lock_irqsave(&ei_local->page_lock, flags); 355 /* find the first phy */
356 phy_dev = phy_find_first(ax->mii_bus);
357 if (!phy_dev) {
358 netdev_err(dev, "no PHY found\n");
359 return -ENODEV;
360 }
420 361
421 ax_phy_issueaddr(dev, phy_addr, reg, 2); 362 ret = phy_connect_direct(dev, phy_dev, ax_handle_link_change, 0,
363 PHY_INTERFACE_MODE_MII);
364 if (ret) {
365 netdev_err(dev, "Could not attach to PHY\n");
366 return ret;
367 }
422 368
423 result = ax_phy_ei_inbits(dev, 17); 369 /* mask with MAC supported features */
424 result &= ~(3<<16); 370 phy_dev->supported &= PHY_BASIC_FEATURES;
371 phy_dev->advertising = phy_dev->supported;
425 372
426 spin_unlock_irqrestore(&ei_local->page_lock, flags); 373 ax->phy_dev = phy_dev;
427 374
428 if (phy_debug) 375 netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
429 pr_debug("%s: %04x.%04x => read %04x\n", __func__, 376 phy_dev->drv->name, dev_name(&phy_dev->dev), phy_dev->irq);
430 phy_addr, reg, result);
431 377
432 return result; 378 return 0;
433} 379}
434 380
435static void 381static void ax_phy_switch(struct net_device *dev, int on)
436ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value)
437{ 382{
438 struct ei_device *ei = netdev_priv(dev); 383 struct ei_device *ei_local = netdev_priv(dev);
439 struct ax_device *ax = to_ax_dev(dev); 384 struct ax_device *ax = to_ax_dev(dev);
440 unsigned long flags;
441
442 dev_dbg(&ax->dev->dev, "%s: %p, %04x, %04x %04x\n",
443 __func__, dev, phy_addr, reg, value);
444
445 spin_lock_irqsave(&ei->page_lock, flags);
446
447 ax_phy_issueaddr(dev, phy_addr, reg, 1);
448 ax_mii_ei_outbits(dev, 2, 2); /* send TA */
449 ax_mii_ei_outbits(dev, value, 16);
450
451 spin_unlock_irqrestore(&ei->page_lock, flags);
452}
453 385
454static void ax_mii_expiry(unsigned long data) 386 u8 reg_gpoc = ax->plat->gpoc_val;
455{
456 struct net_device *dev = (struct net_device *)data;
457 struct ax_device *ax = to_ax_dev(dev);
458 unsigned long flags;
459 387
460 spin_lock_irqsave(&ax->mii_lock, flags); 388 if (!!on)
461 mii_check_media(&ax->mii, netif_msg_link(ax), 0); 389 reg_gpoc &= ~AX_GPOC_PPDSET;
462 spin_unlock_irqrestore(&ax->mii_lock, flags); 390 else
391 reg_gpoc |= AX_GPOC_PPDSET;
463 392
464 if (ax->running) { 393 ei_outb(reg_gpoc, ei_local->mem + EI_SHIFT(0x17));
465 ax->mii_timer.expires = jiffies + HZ*2;
466 add_timer(&ax->mii_timer);
467 }
468} 394}
469 395
470static int ax_open(struct net_device *dev) 396static int ax_open(struct net_device *dev)
471{ 397{
472 struct ax_device *ax = to_ax_dev(dev); 398 struct ax_device *ax = to_ax_dev(dev);
473 struct ei_device *ei_local = netdev_priv(dev);
474 int ret; 399 int ret;
475 400
476 dev_dbg(&ax->dev->dev, "%s: open\n", dev->name); 401 netdev_dbg(dev, "open\n");
477 402
478 ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags, 403 ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags,
479 dev->name, dev); 404 dev->name, dev);
480 if (ret) 405 if (ret)
481 return ret; 406 goto failed_request_irq;
482
483 ret = ax_ei_open(dev);
484 if (ret) {
485 free_irq(dev->irq, dev);
486 return ret;
487 }
488 407
489 /* turn the phy on (if turned off) */ 408 /* turn the phy on (if turned off) */
409 ax_phy_switch(dev, 1);
490 410
491 ei_outb(ax->plat->gpoc_val, ei_local->mem + EI_SHIFT(0x17)); 411 ret = ax_mii_probe(dev);
492 ax->running = 1; 412 if (ret)
493 413 goto failed_mii_probe;
494 /* start the MII timer */ 414 phy_start(ax->phy_dev);
495
496 init_timer(&ax->mii_timer);
497 415
498 ax->mii_timer.expires = jiffies+1; 416 ret = ax_ei_open(dev);
499 ax->mii_timer.data = (unsigned long) dev; 417 if (ret)
500 ax->mii_timer.function = ax_mii_expiry; 418 goto failed_ax_ei_open;
501 419
502 add_timer(&ax->mii_timer); 420 ax->running = 1;
503 421
504 return 0; 422 return 0;
423
424 failed_ax_ei_open:
425 phy_disconnect(ax->phy_dev);
426 failed_mii_probe:
427 ax_phy_switch(dev, 0);
428 free_irq(dev->irq, dev);
429 failed_request_irq:
430 return ret;
505} 431}
506 432
507static int ax_close(struct net_device *dev) 433static int ax_close(struct net_device *dev)
508{ 434{
509 struct ax_device *ax = to_ax_dev(dev); 435 struct ax_device *ax = to_ax_dev(dev);
510 struct ei_device *ei_local = netdev_priv(dev);
511 436
512 dev_dbg(&ax->dev->dev, "%s: close\n", dev->name); 437 netdev_dbg(dev, "close\n");
513
514 /* turn the phy off */
515
516 ei_outb(ax->plat->gpoc_val | (1<<6),
517 ei_local->mem + EI_SHIFT(0x17));
518 438
519 ax->running = 0; 439 ax->running = 0;
520 wmb(); 440 wmb();
521 441
522 del_timer_sync(&ax->mii_timer);
523 ax_ei_close(dev); 442 ax_ei_close(dev);
524 443
444 /* turn the phy off */
445 ax_phy_switch(dev, 0);
446 phy_disconnect(ax->phy_dev);
447
525 free_irq(dev->irq, dev); 448 free_irq(dev->irq, dev);
526 return 0; 449 return 0;
527} 450}
@@ -529,17 +452,15 @@ static int ax_close(struct net_device *dev)
529static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 452static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
530{ 453{
531 struct ax_device *ax = to_ax_dev(dev); 454 struct ax_device *ax = to_ax_dev(dev);
532 unsigned long flags; 455 struct phy_device *phy_dev = ax->phy_dev;
533 int rc;
534 456
535 if (!netif_running(dev)) 457 if (!netif_running(dev))
536 return -EINVAL; 458 return -EINVAL;
537 459
538 spin_lock_irqsave(&ax->mii_lock, flags); 460 if (!phy_dev)
539 rc = generic_mii_ioctl(&ax->mii, if_mii(req), cmd, NULL); 461 return -ENODEV;
540 spin_unlock_irqrestore(&ax->mii_lock, flags);
541 462
542 return rc; 463 return phy_mii_ioctl(phy_dev, req, cmd);
543} 464}
544 465
545/* ethtool ops */ 466/* ethtool ops */
@@ -547,56 +468,40 @@ static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
547static void ax_get_drvinfo(struct net_device *dev, 468static void ax_get_drvinfo(struct net_device *dev,
548 struct ethtool_drvinfo *info) 469 struct ethtool_drvinfo *info)
549{ 470{
550 struct ax_device *ax = to_ax_dev(dev); 471 struct platform_device *pdev = to_platform_device(dev->dev.parent);
551 472
552 strcpy(info->driver, DRV_NAME); 473 strcpy(info->driver, DRV_NAME);
553 strcpy(info->version, DRV_VERSION); 474 strcpy(info->version, DRV_VERSION);
554 strcpy(info->bus_info, ax->dev->name); 475 strcpy(info->bus_info, pdev->name);
555} 476}
556 477
557static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 478static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
558{ 479{
559 struct ax_device *ax = to_ax_dev(dev); 480 struct ax_device *ax = to_ax_dev(dev);
560 unsigned long flags; 481 struct phy_device *phy_dev = ax->phy_dev;
561 482
562 spin_lock_irqsave(&ax->mii_lock, flags); 483 if (!phy_dev)
563 mii_ethtool_gset(&ax->mii, cmd); 484 return -ENODEV;
564 spin_unlock_irqrestore(&ax->mii_lock, flags);
565 485
566 return 0; 486 return phy_ethtool_gset(phy_dev, cmd);
567} 487}
568 488
569static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 489static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
570{ 490{
571 struct ax_device *ax = to_ax_dev(dev); 491 struct ax_device *ax = to_ax_dev(dev);
572 unsigned long flags; 492 struct phy_device *phy_dev = ax->phy_dev;
573 int rc;
574 493
575 spin_lock_irqsave(&ax->mii_lock, flags); 494 if (!phy_dev)
576 rc = mii_ethtool_sset(&ax->mii, cmd); 495 return -ENODEV;
577 spin_unlock_irqrestore(&ax->mii_lock, flags);
578
579 return rc;
580}
581
582static int ax_nway_reset(struct net_device *dev)
583{
584 struct ax_device *ax = to_ax_dev(dev);
585 return mii_nway_restart(&ax->mii);
586}
587 496
588static u32 ax_get_link(struct net_device *dev) 497 return phy_ethtool_sset(phy_dev, cmd);
589{
590 struct ax_device *ax = to_ax_dev(dev);
591 return mii_link_ok(&ax->mii);
592} 498}
593 499
594static const struct ethtool_ops ax_ethtool_ops = { 500static const struct ethtool_ops ax_ethtool_ops = {
595 .get_drvinfo = ax_get_drvinfo, 501 .get_drvinfo = ax_get_drvinfo,
596 .get_settings = ax_get_settings, 502 .get_settings = ax_get_settings,
597 .set_settings = ax_set_settings, 503 .set_settings = ax_set_settings,
598 .nway_reset = ax_nway_reset, 504 .get_link = ethtool_op_get_link,
599 .get_link = ax_get_link,
600}; 505};
601 506
602#ifdef CONFIG_AX88796_93CX6 507#ifdef CONFIG_AX88796_93CX6
@@ -640,37 +545,131 @@ static const struct net_device_ops ax_netdev_ops = {
640 .ndo_get_stats = ax_ei_get_stats, 545 .ndo_get_stats = ax_ei_get_stats,
641 .ndo_set_multicast_list = ax_ei_set_multicast_list, 546 .ndo_set_multicast_list = ax_ei_set_multicast_list,
642 .ndo_validate_addr = eth_validate_addr, 547 .ndo_validate_addr = eth_validate_addr,
643 .ndo_set_mac_address = eth_mac_addr, 548 .ndo_set_mac_address = eth_mac_addr,
644 .ndo_change_mtu = eth_change_mtu, 549 .ndo_change_mtu = eth_change_mtu,
645#ifdef CONFIG_NET_POLL_CONTROLLER 550#ifdef CONFIG_NET_POLL_CONTROLLER
646 .ndo_poll_controller = ax_ei_poll, 551 .ndo_poll_controller = ax_ei_poll,
647#endif 552#endif
648}; 553};
649 554
555static void ax_bb_mdc(struct mdiobb_ctrl *ctrl, int level)
556{
557 struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
558
559 if (level)
560 ax->reg_memr |= AX_MEMR_MDC;
561 else
562 ax->reg_memr &= ~AX_MEMR_MDC;
563
564 ei_outb(ax->reg_memr, ax->addr_memr);
565}
566
567static void ax_bb_dir(struct mdiobb_ctrl *ctrl, int output)
568{
569 struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
570
571 if (output)
572 ax->reg_memr &= ~AX_MEMR_MDIR;
573 else
574 ax->reg_memr |= AX_MEMR_MDIR;
575
576 ei_outb(ax->reg_memr, ax->addr_memr);
577}
578
579static void ax_bb_set_data(struct mdiobb_ctrl *ctrl, int value)
580{
581 struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
582
583 if (value)
584 ax->reg_memr |= AX_MEMR_MDO;
585 else
586 ax->reg_memr &= ~AX_MEMR_MDO;
587
588 ei_outb(ax->reg_memr, ax->addr_memr);
589}
590
591static int ax_bb_get_data(struct mdiobb_ctrl *ctrl)
592{
593 struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
594 int reg_memr = ei_inb(ax->addr_memr);
595
596 return reg_memr & AX_MEMR_MDI ? 1 : 0;
597}
598
599static struct mdiobb_ops bb_ops = {
600 .owner = THIS_MODULE,
601 .set_mdc = ax_bb_mdc,
602 .set_mdio_dir = ax_bb_dir,
603 .set_mdio_data = ax_bb_set_data,
604 .get_mdio_data = ax_bb_get_data,
605};
606
650/* setup code */ 607/* setup code */
651 608
609static int ax_mii_init(struct net_device *dev)
610{
611 struct platform_device *pdev = to_platform_device(dev->dev.parent);
612 struct ei_device *ei_local = netdev_priv(dev);
613 struct ax_device *ax = to_ax_dev(dev);
614 int err, i;
615
616 ax->bb_ctrl.ops = &bb_ops;
617 ax->addr_memr = ei_local->mem + AX_MEMR;
618 ax->mii_bus = alloc_mdio_bitbang(&ax->bb_ctrl);
619 if (!ax->mii_bus) {
620 err = -ENOMEM;
621 goto out;
622 }
623
624 ax->mii_bus->name = "ax88796_mii_bus";
625 ax->mii_bus->parent = dev->dev.parent;
626 snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
627
628 ax->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
629 if (!ax->mii_bus->irq) {
630 err = -ENOMEM;
631 goto out_free_mdio_bitbang;
632 }
633
634 for (i = 0; i < PHY_MAX_ADDR; i++)
635 ax->mii_bus->irq[i] = PHY_POLL;
636
637 err = mdiobus_register(ax->mii_bus);
638 if (err)
639 goto out_free_irq;
640
641 return 0;
642
643 out_free_irq:
644 kfree(ax->mii_bus->irq);
645 out_free_mdio_bitbang:
646 free_mdio_bitbang(ax->mii_bus);
647 out:
648 return err;
649}
650
652static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local) 651static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local)
653{ 652{
654 void __iomem *ioaddr = ei_local->mem; 653 void __iomem *ioaddr = ei_local->mem;
655 struct ax_device *ax = to_ax_dev(dev); 654 struct ax_device *ax = to_ax_dev(dev);
656 655
657 /* Select page 0*/ 656 /* Select page 0 */
658 ei_outb(E8390_NODMA+E8390_PAGE0+E8390_STOP, ioaddr + E8390_CMD); 657 ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_STOP, ioaddr + E8390_CMD);
659 658
660 /* set to byte access */ 659 /* set to byte access */
661 ei_outb(ax->plat->dcr_val & ~1, ioaddr + EN0_DCFG); 660 ei_outb(ax->plat->dcr_val & ~1, ioaddr + EN0_DCFG);
662 ei_outb(ax->plat->gpoc_val, ioaddr + EI_SHIFT(0x17)); 661 ei_outb(ax->plat->gpoc_val, ioaddr + EI_SHIFT(0x17));
663} 662}
664 663
665/* ax_init_dev 664/*
665 * ax_init_dev
666 * 666 *
667 * initialise the specified device, taking care to note the MAC 667 * initialise the specified device, taking care to note the MAC
668 * address it may already have (if configured), ensure 668 * address it may already have (if configured), ensure
669 * the device is ready to be used by lib8390.c and registerd with 669 * the device is ready to be used by lib8390.c and registerd with
670 * the network layer. 670 * the network layer.
671 */ 671 */
672 672static int ax_init_dev(struct net_device *dev)
673static int ax_init_dev(struct net_device *dev, int first_init)
674{ 673{
675 struct ei_device *ei_local = netdev_priv(dev); 674 struct ei_device *ei_local = netdev_priv(dev);
676 struct ax_device *ax = to_ax_dev(dev); 675 struct ax_device *ax = to_ax_dev(dev);
@@ -690,23 +689,23 @@ static int ax_init_dev(struct net_device *dev, int first_init)
690 689
691 /* read the mac from the card prom if we need it */ 690 /* read the mac from the card prom if we need it */
692 691
693 if (first_init && ax->plat->flags & AXFLG_HAS_EEPROM) { 692 if (ax->plat->flags & AXFLG_HAS_EEPROM) {
694 unsigned char SA_prom[32]; 693 unsigned char SA_prom[32];
695 694
696 for(i = 0; i < sizeof(SA_prom); i+=2) { 695 for (i = 0; i < sizeof(SA_prom); i += 2) {
697 SA_prom[i] = ei_inb(ioaddr + NE_DATAPORT); 696 SA_prom[i] = ei_inb(ioaddr + NE_DATAPORT);
698 SA_prom[i+1] = ei_inb(ioaddr + NE_DATAPORT); 697 SA_prom[i + 1] = ei_inb(ioaddr + NE_DATAPORT);
699 } 698 }
700 699
701 if (ax->plat->wordlength == 2) 700 if (ax->plat->wordlength == 2)
702 for (i = 0; i < 16; i++) 701 for (i = 0; i < 16; i++)
703 SA_prom[i] = SA_prom[i+i]; 702 SA_prom[i] = SA_prom[i+i];
704 703
705 memcpy(dev->dev_addr, SA_prom, 6); 704 memcpy(dev->dev_addr, SA_prom, 6);
706 } 705 }
707 706
708#ifdef CONFIG_AX88796_93CX6 707#ifdef CONFIG_AX88796_93CX6
709 if (first_init && ax->plat->flags & AXFLG_HAS_93CX6) { 708 if (ax->plat->flags & AXFLG_HAS_93CX6) {
710 unsigned char mac_addr[6]; 709 unsigned char mac_addr[6];
711 struct eeprom_93cx6 eeprom; 710 struct eeprom_93cx6 eeprom;
712 711
@@ -719,7 +718,7 @@ static int ax_init_dev(struct net_device *dev, int first_init)
719 (__le16 __force *)mac_addr, 718 (__le16 __force *)mac_addr,
720 sizeof(mac_addr) >> 1); 719 sizeof(mac_addr) >> 1);
721 720
722 memcpy(dev->dev_addr, mac_addr, 6); 721 memcpy(dev->dev_addr, mac_addr, 6);
723 } 722 }
724#endif 723#endif
725 if (ax->plat->wordlength == 2) { 724 if (ax->plat->wordlength == 2) {
@@ -732,67 +731,56 @@ static int ax_init_dev(struct net_device *dev, int first_init)
732 stop_page = NE1SM_STOP_PG; 731 stop_page = NE1SM_STOP_PG;
733 } 732 }
734 733
735 /* load the mac-address from the device if this is the 734 /* load the mac-address from the device */
736 * first time we've initialised */ 735 if (ax->plat->flags & AXFLG_MAC_FROMDEV) {
737 736 ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
738 if (first_init) { 737 ei_local->mem + E8390_CMD); /* 0x61 */
739 if (ax->plat->flags & AXFLG_MAC_FROMDEV) { 738 for (i = 0; i < ETHER_ADDR_LEN; i++)
740 ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, 739 dev->dev_addr[i] =
741 ei_local->mem + E8390_CMD); /* 0x61 */ 740 ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
742 for (i = 0; i < ETHER_ADDR_LEN; i++)
743 dev->dev_addr[i] =
744 ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
745 }
746
747 if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
748 ax->plat->mac_addr)
749 memcpy(dev->dev_addr, ax->plat->mac_addr,
750 ETHER_ADDR_LEN);
751 } 741 }
752 742
743 if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
744 ax->plat->mac_addr)
745 memcpy(dev->dev_addr, ax->plat->mac_addr,
746 ETHER_ADDR_LEN);
747
753 ax_reset_8390(dev); 748 ax_reset_8390(dev);
754 749
755 ei_status.name = "AX88796"; 750 ei_local->name = "AX88796";
756 ei_status.tx_start_page = start_page; 751 ei_local->tx_start_page = start_page;
757 ei_status.stop_page = stop_page; 752 ei_local->stop_page = stop_page;
758 ei_status.word16 = (ax->plat->wordlength == 2); 753 ei_local->word16 = (ax->plat->wordlength == 2);
759 ei_status.rx_start_page = start_page + TX_PAGES; 754 ei_local->rx_start_page = start_page + TX_PAGES;
760 755
761#ifdef PACKETBUF_MEMSIZE 756#ifdef PACKETBUF_MEMSIZE
762 /* Allow the packet buffer size to be overridden by know-it-alls. */ 757 /* Allow the packet buffer size to be overridden by know-it-alls. */
763 ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE; 758 ei_local->stop_page = ei_local->tx_start_page + PACKETBUF_MEMSIZE;
764#endif 759#endif
765 760
766 ei_status.reset_8390 = &ax_reset_8390; 761 ei_local->reset_8390 = &ax_reset_8390;
767 ei_status.block_input = &ax_block_input; 762 ei_local->block_input = &ax_block_input;
768 ei_status.block_output = &ax_block_output; 763 ei_local->block_output = &ax_block_output;
769 ei_status.get_8390_hdr = &ax_get_8390_hdr; 764 ei_local->get_8390_hdr = &ax_get_8390_hdr;
770 ei_status.priv = 0; 765 ei_local->priv = 0;
771
772 dev->netdev_ops = &ax_netdev_ops;
773 dev->ethtool_ops = &ax_ethtool_ops;
774
775 ax->msg_enable = NETIF_MSG_LINK;
776 ax->mii.phy_id_mask = 0x1f;
777 ax->mii.reg_num_mask = 0x1f;
778 ax->mii.phy_id = 0x10; /* onboard phy */
779 ax->mii.force_media = 0;
780 ax->mii.full_duplex = 0;
781 ax->mii.mdio_read = ax_phy_read;
782 ax->mii.mdio_write = ax_phy_write;
783 ax->mii.dev = dev;
784 766
785 ax_NS8390_init(dev, 0); 767 dev->netdev_ops = &ax_netdev_ops;
768 dev->ethtool_ops = &ax_ethtool_ops;
786 769
787 if (first_init) 770 ret = ax_mii_init(dev);
788 dev_info(&ax->dev->dev, "%dbit, irq %d, %lx, MAC: %pM\n", 771 if (ret)
789 ei_status.word16 ? 16:8, dev->irq, dev->base_addr, 772 goto out_irq;
790 dev->dev_addr); 773
774 ax_NS8390_init(dev, 0);
791 775
792 ret = register_netdev(dev); 776 ret = register_netdev(dev);
793 if (ret) 777 if (ret)
794 goto out_irq; 778 goto out_irq;
795 779
780 netdev_info(dev, "%dbit, irq %d, %lx, MAC: %pM\n",
781 ei_local->word16 ? 16 : 8, dev->irq, dev->base_addr,
782 dev->dev_addr);
783
796 return 0; 784 return 0;
797 785
798 out_irq: 786 out_irq:
@@ -802,24 +790,24 @@ static int ax_init_dev(struct net_device *dev, int first_init)
802 return ret; 790 return ret;
803} 791}
804 792
805static int ax_remove(struct platform_device *_dev) 793static int ax_remove(struct platform_device *pdev)
806{ 794{
807 struct net_device *dev = platform_get_drvdata(_dev); 795 struct net_device *dev = platform_get_drvdata(pdev);
808 struct ax_device *ax; 796 struct ei_device *ei_local = netdev_priv(dev);
809 797 struct ax_device *ax = to_ax_dev(dev);
810 ax = to_ax_dev(dev); 798 struct resource *mem;
811 799
812 unregister_netdev(dev); 800 unregister_netdev(dev);
813 free_irq(dev->irq, dev); 801 free_irq(dev->irq, dev);
814 802
815 iounmap(ei_status.mem); 803 iounmap(ei_local->mem);
816 release_resource(ax->mem); 804 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
817 kfree(ax->mem); 805 release_mem_region(mem->start, resource_size(mem));
818 806
819 if (ax->map2) { 807 if (ax->map2) {
820 iounmap(ax->map2); 808 iounmap(ax->map2);
821 release_resource(ax->mem2); 809 mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
822 kfree(ax->mem2); 810 release_mem_region(mem->start, resource_size(mem));
823 } 811 }
824 812
825 free_netdev(dev); 813 free_netdev(dev);
@@ -827,19 +815,20 @@ static int ax_remove(struct platform_device *_dev)
827 return 0; 815 return 0;
828} 816}
829 817
830/* ax_probe 818/*
819 * ax_probe
831 * 820 *
832 * This is the entry point when the platform device system uses to 821 * This is the entry point when the platform device system uses to
833 * notify us of a new device to attach to. Allocate memory, find 822 * notify us of a new device to attach to. Allocate memory, find the
834 * the resources and information passed, and map the necessary registers. 823 * resources and information passed, and map the necessary registers.
835*/ 824 */
836
837static int ax_probe(struct platform_device *pdev) 825static int ax_probe(struct platform_device *pdev)
838{ 826{
839 struct net_device *dev; 827 struct net_device *dev;
840 struct ax_device *ax; 828 struct ei_device *ei_local;
841 struct resource *res; 829 struct ax_device *ax;
842 size_t size; 830 struct resource *irq, *mem, *mem2;
831 resource_size_t mem_size, mem2_size = 0;
843 int ret = 0; 832 int ret = 0;
844 833
845 dev = ax__alloc_ei_netdev(sizeof(struct ax_device)); 834 dev = ax__alloc_ei_netdev(sizeof(struct ax_device));
@@ -847,120 +836,107 @@ static int ax_probe(struct platform_device *pdev)
847 return -ENOMEM; 836 return -ENOMEM;
848 837
849 /* ok, let's setup our device */ 838 /* ok, let's setup our device */
839 SET_NETDEV_DEV(dev, &pdev->dev);
840 ei_local = netdev_priv(dev);
850 ax = to_ax_dev(dev); 841 ax = to_ax_dev(dev);
851 842
852 memset(ax, 0, sizeof(struct ax_device));
853
854 spin_lock_init(&ax->mii_lock);
855
856 ax->dev = pdev;
857 ax->plat = pdev->dev.platform_data; 843 ax->plat = pdev->dev.platform_data;
858 platform_set_drvdata(pdev, dev); 844 platform_set_drvdata(pdev, dev);
859 845
860 ei_status.rxcr_base = ax->plat->rcr_val; 846 ei_local->rxcr_base = ax->plat->rcr_val;
861 847
862 /* find the platform resources */ 848 /* find the platform resources */
863 849 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
864 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 850 if (!irq) {
865 if (res == NULL) {
866 dev_err(&pdev->dev, "no IRQ specified\n"); 851 dev_err(&pdev->dev, "no IRQ specified\n");
867 ret = -ENXIO; 852 ret = -ENXIO;
868 goto exit_mem; 853 goto exit_mem;
869 } 854 }
870 855
871 dev->irq = res->start; 856 dev->irq = irq->start;
872 ax->irqflags = res->flags & IRQF_TRIGGER_MASK; 857 ax->irqflags = irq->flags & IRQF_TRIGGER_MASK;
873 858
874 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 859 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
875 if (res == NULL) { 860 if (!mem) {
876 dev_err(&pdev->dev, "no MEM specified\n"); 861 dev_err(&pdev->dev, "no MEM specified\n");
877 ret = -ENXIO; 862 ret = -ENXIO;
878 goto exit_mem; 863 goto exit_mem;
879 } 864 }
880 865
881 size = (res->end - res->start) + 1; 866 mem_size = resource_size(mem);
882
883 /* setup the register offsets from either the platform data
884 * or by using the size of the resource provided */
885 867
868 /*
869 * setup the register offsets from either the platform data or
870 * by using the size of the resource provided
871 */
886 if (ax->plat->reg_offsets) 872 if (ax->plat->reg_offsets)
887 ei_status.reg_offset = ax->plat->reg_offsets; 873 ei_local->reg_offset = ax->plat->reg_offsets;
888 else { 874 else {
889 ei_status.reg_offset = ax->reg_offsets; 875 ei_local->reg_offset = ax->reg_offsets;
890 for (ret = 0; ret < 0x18; ret++) 876 for (ret = 0; ret < 0x18; ret++)
891 ax->reg_offsets[ret] = (size / 0x18) * ret; 877 ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
892 } 878 }
893 879
894 ax->mem = request_mem_region(res->start, size, pdev->name); 880 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
895 if (ax->mem == NULL) {
896 dev_err(&pdev->dev, "cannot reserve registers\n"); 881 dev_err(&pdev->dev, "cannot reserve registers\n");
897 ret = -ENXIO; 882 ret = -ENXIO;
898 goto exit_mem; 883 goto exit_mem;
899 } 884 }
900 885
901 ei_status.mem = ioremap(res->start, size); 886 ei_local->mem = ioremap(mem->start, mem_size);
902 dev->base_addr = (unsigned long)ei_status.mem; 887 dev->base_addr = (unsigned long)ei_local->mem;
903 888
904 if (ei_status.mem == NULL) { 889 if (ei_local->mem == NULL) {
905 dev_err(&pdev->dev, "Cannot ioremap area (%08llx,%08llx)\n", 890 dev_err(&pdev->dev, "Cannot ioremap area %pR\n", mem);
906 (unsigned long long)res->start,
907 (unsigned long long)res->end);
908 891
909 ret = -ENXIO; 892 ret = -ENXIO;
910 goto exit_req; 893 goto exit_req;
911 } 894 }
912 895
913 /* look for reset area */ 896 /* look for reset area */
914 897 mem2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
915 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 898 if (!mem2) {
916 if (res == NULL) {
917 if (!ax->plat->reg_offsets) { 899 if (!ax->plat->reg_offsets) {
918 for (ret = 0; ret < 0x20; ret++) 900 for (ret = 0; ret < 0x20; ret++)
919 ax->reg_offsets[ret] = (size / 0x20) * ret; 901 ax->reg_offsets[ret] = (mem_size / 0x20) * ret;
920 } 902 }
921
922 ax->map2 = NULL;
923 } else { 903 } else {
924 size = (res->end - res->start) + 1; 904 mem2_size = resource_size(mem2);
925 905
926 ax->mem2 = request_mem_region(res->start, size, pdev->name); 906 if (!request_mem_region(mem2->start, mem2_size, pdev->name)) {
927 if (ax->mem2 == NULL) {
928 dev_err(&pdev->dev, "cannot reserve registers\n"); 907 dev_err(&pdev->dev, "cannot reserve registers\n");
929 ret = -ENXIO; 908 ret = -ENXIO;
930 goto exit_mem1; 909 goto exit_mem1;
931 } 910 }
932 911
933 ax->map2 = ioremap(res->start, size); 912 ax->map2 = ioremap(mem2->start, mem2_size);
934 if (ax->map2 == NULL) { 913 if (!ax->map2) {
935 dev_err(&pdev->dev, "cannot map reset register\n"); 914 dev_err(&pdev->dev, "cannot map reset register\n");
936 ret = -ENXIO; 915 ret = -ENXIO;
937 goto exit_mem2; 916 goto exit_mem2;
938 } 917 }
939 918
940 ei_status.reg_offset[0x1f] = ax->map2 - ei_status.mem; 919 ei_local->reg_offset[0x1f] = ax->map2 - ei_local->mem;
941 } 920 }
942 921
943 /* got resources, now initialise and register device */ 922 /* got resources, now initialise and register device */
944 923 ret = ax_init_dev(dev);
945 ret = ax_init_dev(dev, 1);
946 if (!ret) 924 if (!ret)
947 return 0; 925 return 0;
948 926
949 if (ax->map2 == NULL) 927 if (!ax->map2)
950 goto exit_mem1; 928 goto exit_mem1;
951 929
952 iounmap(ax->map2); 930 iounmap(ax->map2);
953 931
954 exit_mem2: 932 exit_mem2:
955 release_resource(ax->mem2); 933 release_mem_region(mem2->start, mem2_size);
956 kfree(ax->mem2);
957 934
958 exit_mem1: 935 exit_mem1:
959 iounmap(ei_status.mem); 936 iounmap(ei_local->mem);
960 937
961 exit_req: 938 exit_req:
962 release_resource(ax->mem); 939 release_mem_region(mem->start, mem_size);
963 kfree(ax->mem);
964 940
965 exit_mem: 941 exit_mem:
966 free_netdev(dev); 942 free_netdev(dev);
@@ -974,7 +950,7 @@ static int ax_probe(struct platform_device *pdev)
974static int ax_suspend(struct platform_device *dev, pm_message_t state) 950static int ax_suspend(struct platform_device *dev, pm_message_t state)
975{ 951{
976 struct net_device *ndev = platform_get_drvdata(dev); 952 struct net_device *ndev = platform_get_drvdata(dev);
977 struct ax_device *ax = to_ax_dev(ndev); 953 struct ax_device *ax = to_ax_dev(ndev);
978 954
979 ax->resume_open = ax->running; 955 ax->resume_open = ax->running;
980 956
@@ -987,7 +963,7 @@ static int ax_suspend(struct platform_device *dev, pm_message_t state)
987static int ax_resume(struct platform_device *pdev) 963static int ax_resume(struct platform_device *pdev)
988{ 964{
989 struct net_device *ndev = platform_get_drvdata(pdev); 965 struct net_device *ndev = platform_get_drvdata(pdev);
990 struct ax_device *ax = to_ax_dev(ndev); 966 struct ax_device *ax = to_ax_dev(ndev);
991 967
992 ax_initial_setup(ndev, netdev_priv(ndev)); 968 ax_initial_setup(ndev, netdev_priv(ndev));
993 ax_NS8390_init(ndev, ax->resume_open); 969 ax_NS8390_init(ndev, ax->resume_open);
@@ -1001,7 +977,7 @@ static int ax_resume(struct platform_device *pdev)
1001 977
1002#else 978#else
1003#define ax_suspend NULL 979#define ax_suspend NULL
1004#define ax_resume NULL 980#define ax_resume NULL
1005#endif 981#endif
1006 982
1007static struct platform_driver axdrv = { 983static struct platform_driver axdrv = {
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index add0b93350dd..f803c58b941d 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2010 ServerEngines 2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -8,11 +8,11 @@
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Contact Information: 10 * Contact Information:
11 * linux-drivers@serverengines.com 11 * linux-drivers@emulex.com
12 * 12 *
13 * ServerEngines 13 * Emulex
14 * 209 N. Fair Oaks Ave 14 * 3333 Susan Street
15 * Sunnyvale, CA 94085 15 * Costa Mesa, CA 92626
16 */ 16 */
17 17
18#ifndef BE_H 18#ifndef BE_H
@@ -33,7 +33,7 @@
33 33
34#include "be_hw.h" 34#include "be_hw.h"
35 35
36#define DRV_VER "2.103.175u" 36#define DRV_VER "4.0.100u"
37#define DRV_NAME "be2net" 37#define DRV_NAME "be2net"
38#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" 38#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
39#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" 39#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
@@ -67,7 +67,7 @@ static inline char *nic_name(struct pci_dev *pdev)
67} 67}
68 68
69/* Number of bytes of an RX frame that are copied to skb->data */ 69/* Number of bytes of an RX frame that are copied to skb->data */
70#define BE_HDR_LEN 64 70#define BE_HDR_LEN ((u16) 64)
71#define BE_MAX_JUMBO_FRAME_SIZE 9018 71#define BE_MAX_JUMBO_FRAME_SIZE 9018
72#define BE_MIN_MTU 256 72#define BE_MIN_MTU 256
73 73
@@ -211,18 +211,40 @@ struct be_rx_stats {
211 u32 rx_fps; /* Rx frags per second */ 211 u32 rx_fps; /* Rx frags per second */
212}; 212};
213 213
214struct be_rx_compl_info {
215 u32 rss_hash;
216 u16 vid;
217 u16 pkt_size;
218 u16 rxq_idx;
219 u16 mac_id;
220 u8 vlanf;
221 u8 num_rcvd;
222 u8 err;
223 u8 ipf;
224 u8 tcpf;
225 u8 udpf;
226 u8 ip_csum;
227 u8 l4_csum;
228 u8 ipv6;
229 u8 vtm;
230 u8 pkt_type;
231};
232
214struct be_rx_obj { 233struct be_rx_obj {
215 struct be_adapter *adapter; 234 struct be_adapter *adapter;
216 struct be_queue_info q; 235 struct be_queue_info q;
217 struct be_queue_info cq; 236 struct be_queue_info cq;
237 struct be_rx_compl_info rxcp;
218 struct be_rx_page_info page_info_tbl[RX_Q_LEN]; 238 struct be_rx_page_info page_info_tbl[RX_Q_LEN];
219 struct be_eq_obj rx_eq; 239 struct be_eq_obj rx_eq;
220 struct be_rx_stats stats; 240 struct be_rx_stats stats;
221 u8 rss_id; 241 u8 rss_id;
222 bool rx_post_starved; /* Zero rx frags have been posted to BE */ 242 bool rx_post_starved; /* Zero rx frags have been posted to BE */
223 u16 last_frag_index; 243 u32 cache_line_barrier[16];
224 u16 rsvd; 244};
225 u32 cache_line_barrier[15]; 245
246struct be_drv_stats {
247 u8 be_on_die_temperature;
226}; 248};
227 249
228struct be_vf_cfg { 250struct be_vf_cfg {
@@ -234,6 +256,7 @@ struct be_vf_cfg {
234}; 256};
235 257
236#define BE_INVALID_PMAC_ID 0xffffffff 258#define BE_INVALID_PMAC_ID 0xffffffff
259
237struct be_adapter { 260struct be_adapter {
238 struct pci_dev *pdev; 261 struct pci_dev *pdev;
239 struct net_device *netdev; 262 struct net_device *netdev;
@@ -269,6 +292,7 @@ struct be_adapter {
269 u32 big_page_size; /* Compounded page size shared by rx wrbs */ 292 u32 big_page_size; /* Compounded page size shared by rx wrbs */
270 293
271 u8 msix_vec_next_idx; 294 u8 msix_vec_next_idx;
295 struct be_drv_stats drv_stats;
272 296
273 struct vlan_group *vlan_grp; 297 struct vlan_group *vlan_grp;
274 u16 vlans_added; 298 u16 vlans_added;
@@ -281,6 +305,7 @@ struct be_adapter {
281 struct be_dma_mem stats_cmd; 305 struct be_dma_mem stats_cmd;
282 /* Work queue used to perform periodic tasks like getting statistics */ 306 /* Work queue used to perform periodic tasks like getting statistics */
283 struct delayed_work work; 307 struct delayed_work work;
308 u16 work_counter;
284 309
285 /* Ethtool knobs and info */ 310 /* Ethtool knobs and info */
286 bool rx_csum; /* BE card must perform rx-checksumming */ 311 bool rx_csum; /* BE card must perform rx-checksumming */
@@ -298,7 +323,7 @@ struct be_adapter {
298 u32 rx_fc; /* Rx flow control */ 323 u32 rx_fc; /* Rx flow control */
299 u32 tx_fc; /* Tx flow control */ 324 u32 tx_fc; /* Tx flow control */
300 bool ue_detected; 325 bool ue_detected;
301 bool stats_ioctl_sent; 326 bool stats_cmd_sent;
302 int link_speed; 327 int link_speed;
303 u8 port_type; 328 u8 port_type;
304 u8 transceiver; 329 u8 transceiver;
@@ -307,10 +332,13 @@ struct be_adapter {
307 u32 flash_status; 332 u32 flash_status;
308 struct completion flash_compl; 333 struct completion flash_compl;
309 334
335 bool be3_native;
310 bool sriov_enabled; 336 bool sriov_enabled;
311 struct be_vf_cfg vf_cfg[BE_MAX_VF]; 337 struct be_vf_cfg vf_cfg[BE_MAX_VF];
312 u8 is_virtfn; 338 u8 is_virtfn;
313 u32 sli_family; 339 u32 sli_family;
340 u8 hba_port_num;
341 u16 pvid;
314}; 342};
315 343
316#define be_physfn(adapter) (!adapter->is_virtfn) 344#define be_physfn(adapter) (!adapter->is_virtfn)
@@ -450,9 +478,8 @@ static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
450 mac[5] = (u8)(addr & 0xFF); 478 mac[5] = (u8)(addr & 0xFF);
451 mac[4] = (u8)((addr >> 8) & 0xFF); 479 mac[4] = (u8)((addr >> 8) & 0xFF);
452 mac[3] = (u8)((addr >> 16) & 0xFF); 480 mac[3] = (u8)((addr >> 16) & 0xFF);
453 mac[2] = 0xC9; 481 /* Use the OUI from the current MAC address */
454 mac[1] = 0x00; 482 memcpy(mac, adapter->netdev->dev_addr, 3);
455 mac[0] = 0x00;
456} 483}
457 484
458extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, 485extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index a179cc6d79f2..5a4a87e7c5ea 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2010 ServerEngines 2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -8,21 +8,30 @@
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Contact Information: 10 * Contact Information:
11 * linux-drivers@serverengines.com 11 * linux-drivers@emulex.com
12 * 12 *
13 * ServerEngines 13 * Emulex
14 * 209 N. Fair Oaks Ave 14 * 3333 Susan Street
15 * Sunnyvale, CA 94085 15 * Costa Mesa, CA 92626
16 */ 16 */
17 17
18#include "be.h" 18#include "be.h"
19#include "be_cmds.h" 19#include "be_cmds.h"
20 20
21/* Must be a power of 2 or else MODULO will BUG_ON */
22static int be_get_temp_freq = 32;
23
21static void be_mcc_notify(struct be_adapter *adapter) 24static void be_mcc_notify(struct be_adapter *adapter)
22{ 25{
23 struct be_queue_info *mccq = &adapter->mcc_obj.q; 26 struct be_queue_info *mccq = &adapter->mcc_obj.q;
24 u32 val = 0; 27 u32 val = 0;
25 28
29 if (adapter->eeh_err) {
30 dev_info(&adapter->pdev->dev,
31 "Error in Card Detected! Cannot issue commands\n");
32 return;
33 }
34
26 val |= mccq->id & DB_MCCQ_RING_ID_MASK; 35 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
27 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; 36 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
28 37
@@ -75,7 +84,7 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
75 be_dws_le_to_cpu(&resp->hw_stats, 84 be_dws_le_to_cpu(&resp->hw_stats,
76 sizeof(resp->hw_stats)); 85 sizeof(resp->hw_stats));
77 netdev_stats_update(adapter); 86 netdev_stats_update(adapter);
78 adapter->stats_ioctl_sent = false; 87 adapter->stats_cmd_sent = false;
79 } 88 }
80 } else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) && 89 } else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) &&
81 (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) { 90 (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) {
@@ -102,6 +111,7 @@ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
102{ 111{
103 if (evt->valid) { 112 if (evt->valid) {
104 adapter->vlan_prio_bmap = evt->available_priority_bmap; 113 adapter->vlan_prio_bmap = evt->available_priority_bmap;
114 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
105 adapter->recommended_prio = 115 adapter->recommended_prio =
106 evt->reco_default_priority << VLAN_PRIO_SHIFT; 116 evt->reco_default_priority << VLAN_PRIO_SHIFT;
107 } 117 }
@@ -117,6 +127,16 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
117 } 127 }
118} 128}
119 129
130/*Grp5 PVID evt*/
131static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
132 struct be_async_event_grp5_pvid_state *evt)
133{
134 if (evt->enabled)
135 adapter->pvid = evt->tag;
136 else
137 adapter->pvid = 0;
138}
139
120static void be_async_grp5_evt_process(struct be_adapter *adapter, 140static void be_async_grp5_evt_process(struct be_adapter *adapter,
121 u32 trailer, struct be_mcc_compl *evt) 141 u32 trailer, struct be_mcc_compl *evt)
122{ 142{
@@ -134,6 +154,10 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
134 be_async_grp5_qos_speed_process(adapter, 154 be_async_grp5_qos_speed_process(adapter,
135 (struct be_async_event_grp5_qos_link_speed *)evt); 155 (struct be_async_event_grp5_qos_link_speed *)evt);
136 break; 156 break;
157 case ASYNC_EVENT_PVID_STATE:
158 be_async_grp5_pvid_state_process(adapter,
159 (struct be_async_event_grp5_pvid_state *)evt);
160 break;
137 default: 161 default:
138 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n"); 162 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
139 break; 163 break;
@@ -216,6 +240,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
216 int i, num, status = 0; 240 int i, num, status = 0;
217 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 241 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
218 242
243 if (adapter->eeh_err)
244 return -EIO;
245
219 for (i = 0; i < mcc_timeout; i++) { 246 for (i = 0; i < mcc_timeout; i++) {
220 num = be_process_mcc(adapter, &status); 247 num = be_process_mcc(adapter, &status);
221 if (num) 248 if (num)
@@ -245,6 +272,12 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
245 int msecs = 0; 272 int msecs = 0;
246 u32 ready; 273 u32 ready;
247 274
275 if (adapter->eeh_err) {
276 dev_err(&adapter->pdev->dev,
277 "Error detected in card.Cannot issue commands\n");
278 return -EIO;
279 }
280
248 do { 281 do {
249 ready = ioread32(db); 282 ready = ioread32(db);
250 if (ready == 0xffffffff) { 283 if (ready == 0xffffffff) {
@@ -598,7 +631,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
598 631
599/* Uses synchronous MCCQ */ 632/* Uses synchronous MCCQ */
600int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, 633int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
601 u32 if_id, u32 *pmac_id) 634 u32 if_id, u32 *pmac_id, u32 domain)
602{ 635{
603 struct be_mcc_wrb *wrb; 636 struct be_mcc_wrb *wrb;
604 struct be_cmd_req_pmac_add *req; 637 struct be_cmd_req_pmac_add *req;
@@ -619,6 +652,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
619 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 652 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
620 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req)); 653 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
621 654
655 req->hdr.domain = domain;
622 req->if_id = cpu_to_le32(if_id); 656 req->if_id = cpu_to_le32(if_id);
623 memcpy(req->mac_address, mac_addr, ETH_ALEN); 657 memcpy(req->mac_address, mac_addr, ETH_ALEN);
624 658
@@ -634,7 +668,7 @@ err:
634} 668}
635 669
636/* Uses synchronous MCCQ */ 670/* Uses synchronous MCCQ */
637int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id) 671int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
638{ 672{
639 struct be_mcc_wrb *wrb; 673 struct be_mcc_wrb *wrb;
640 struct be_cmd_req_pmac_del *req; 674 struct be_cmd_req_pmac_del *req;
@@ -655,6 +689,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
655 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 689 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
656 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req)); 690 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
657 691
692 req->hdr.domain = dom;
658 req->if_id = cpu_to_le32(if_id); 693 req->if_id = cpu_to_le32(if_id);
659 req->pmac_id = cpu_to_le32(pmac_id); 694 req->pmac_id = cpu_to_le32(pmac_id);
660 695
@@ -691,7 +726,7 @@ int be_cmd_cq_create(struct be_adapter *adapter,
691 726
692 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 727 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
693 if (lancer_chip(adapter)) { 728 if (lancer_chip(adapter)) {
694 req->hdr.version = 1; 729 req->hdr.version = 2;
695 req->page_size = 1; /* 1 for 4K */ 730 req->page_size = 1; /* 1 for 4K */
696 AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt, 731 AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt,
697 coalesce_wm); 732 coalesce_wm);
@@ -827,6 +862,12 @@ int be_cmd_txq_create(struct be_adapter *adapter,
827 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE, 862 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
828 sizeof(*req)); 863 sizeof(*req));
829 864
865 if (lancer_chip(adapter)) {
866 req->hdr.version = 1;
867 AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
868 adapter->if_handle);
869 }
870
830 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); 871 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
831 req->ulp_num = BE_ULP1_NUM; 872 req->ulp_num = BE_ULP1_NUM;
832 req->type = BE_ETH_TX_RING_TYPE_STANDARD; 873 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
@@ -995,7 +1036,7 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
995} 1036}
996 1037
997/* Uses mbox */ 1038/* Uses mbox */
998int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id) 1039int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
999{ 1040{
1000 struct be_mcc_wrb *wrb; 1041 struct be_mcc_wrb *wrb;
1001 struct be_cmd_req_if_destroy *req; 1042 struct be_cmd_req_if_destroy *req;
@@ -1016,6 +1057,7 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
1016 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1057 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1017 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req)); 1058 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
1018 1059
1060 req->hdr.domain = domain;
1019 req->interface_id = cpu_to_le32(interface_id); 1061 req->interface_id = cpu_to_le32(interface_id);
1020 1062
1021 status = be_mbox_notify_wait(adapter); 1063 status = be_mbox_notify_wait(adapter);
@@ -1036,6 +1078,9 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1036 struct be_sge *sge; 1078 struct be_sge *sge;
1037 int status = 0; 1079 int status = 0;
1038 1080
1081 if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
1082 be_cmd_get_die_temperature(adapter);
1083
1039 spin_lock_bh(&adapter->mcc_lock); 1084 spin_lock_bh(&adapter->mcc_lock);
1040 1085
1041 wrb = wrb_from_mccq(adapter); 1086 wrb = wrb_from_mccq(adapter);
@@ -1056,7 +1101,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1056 sge->len = cpu_to_le32(nonemb_cmd->size); 1101 sge->len = cpu_to_le32(nonemb_cmd->size);
1057 1102
1058 be_mcc_notify(adapter); 1103 be_mcc_notify(adapter);
1059 adapter->stats_ioctl_sent = true; 1104 adapter->stats_cmd_sent = true;
1060 1105
1061err: 1106err:
1062 spin_unlock_bh(&adapter->mcc_lock); 1107 spin_unlock_bh(&adapter->mcc_lock);
@@ -1103,6 +1148,44 @@ err:
1103 return status; 1148 return status;
1104} 1149}
1105 1150
1151/* Uses synchronous mcc */
1152int be_cmd_get_die_temperature(struct be_adapter *adapter)
1153{
1154 struct be_mcc_wrb *wrb;
1155 struct be_cmd_req_get_cntl_addnl_attribs *req;
1156 int status;
1157
1158 spin_lock_bh(&adapter->mcc_lock);
1159
1160 wrb = wrb_from_mccq(adapter);
1161 if (!wrb) {
1162 status = -EBUSY;
1163 goto err;
1164 }
1165 req = embedded_payload(wrb);
1166
1167 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1168 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES);
1169
1170 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1171 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req));
1172
1173 status = be_mcc_notify_wait(adapter);
1174 if (!status) {
1175 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
1176 embedded_payload(wrb);
1177 adapter->drv_stats.be_on_die_temperature =
1178 resp->on_die_temperature;
1179 }
1180 /* If IOCTL fails once, do not bother issuing it again */
1181 else
1182 be_get_temp_freq = 0;
1183
1184err:
1185 spin_unlock_bh(&adapter->mcc_lock);
1186 return status;
1187}
1188
1106/* Uses Mbox */ 1189/* Uses Mbox */
1107int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver) 1190int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
1108{ 1191{
@@ -1868,8 +1951,8 @@ int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
1868 OPCODE_COMMON_SET_QOS, sizeof(*req)); 1951 OPCODE_COMMON_SET_QOS, sizeof(*req));
1869 1952
1870 req->hdr.domain = domain; 1953 req->hdr.domain = domain;
1871 req->valid_bits = BE_QOS_BITS_NIC; 1954 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
1872 req->max_bps_nic = bps; 1955 req->max_bps_nic = cpu_to_le32(bps);
1873 1956
1874 status = be_mcc_notify_wait(adapter); 1957 status = be_mcc_notify_wait(adapter);
1875 1958
@@ -1877,3 +1960,96 @@ err:
1877 spin_unlock_bh(&adapter->mcc_lock); 1960 spin_unlock_bh(&adapter->mcc_lock);
1878 return status; 1961 return status;
1879} 1962}
1963
1964int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
1965{
1966 struct be_mcc_wrb *wrb;
1967 struct be_cmd_req_cntl_attribs *req;
1968 struct be_cmd_resp_cntl_attribs *resp;
1969 struct be_sge *sge;
1970 int status;
1971 int payload_len = max(sizeof(*req), sizeof(*resp));
1972 struct mgmt_controller_attrib *attribs;
1973 struct be_dma_mem attribs_cmd;
1974
1975 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
1976 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
1977 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
1978 &attribs_cmd.dma);
1979 if (!attribs_cmd.va) {
1980 dev_err(&adapter->pdev->dev,
1981 "Memory allocation failure\n");
1982 return -ENOMEM;
1983 }
1984
1985 if (mutex_lock_interruptible(&adapter->mbox_lock))
1986 return -1;
1987
1988 wrb = wrb_from_mbox(adapter);
1989 if (!wrb) {
1990 status = -EBUSY;
1991 goto err;
1992 }
1993 req = attribs_cmd.va;
1994 sge = nonembedded_sgl(wrb);
1995
1996 be_wrb_hdr_prepare(wrb, payload_len, false, 1,
1997 OPCODE_COMMON_GET_CNTL_ATTRIBUTES);
1998 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1999 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len);
2000 sge->pa_hi = cpu_to_le32(upper_32_bits(attribs_cmd.dma));
2001 sge->pa_lo = cpu_to_le32(attribs_cmd.dma & 0xFFFFFFFF);
2002 sge->len = cpu_to_le32(attribs_cmd.size);
2003
2004 status = be_mbox_notify_wait(adapter);
2005 if (!status) {
2006 attribs = (struct mgmt_controller_attrib *)( attribs_cmd.va +
2007 sizeof(struct be_cmd_resp_hdr));
2008 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2009 }
2010
2011err:
2012 mutex_unlock(&adapter->mbox_lock);
2013 pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
2014 attribs_cmd.dma);
2015 return status;
2016}
2017
2018/* Uses mbox */
2019int be_cmd_check_native_mode(struct be_adapter *adapter)
2020{
2021 struct be_mcc_wrb *wrb;
2022 struct be_cmd_req_set_func_cap *req;
2023 int status;
2024
2025 if (mutex_lock_interruptible(&adapter->mbox_lock))
2026 return -1;
2027
2028 wrb = wrb_from_mbox(adapter);
2029 if (!wrb) {
2030 status = -EBUSY;
2031 goto err;
2032 }
2033
2034 req = embedded_payload(wrb);
2035
2036 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
2037 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP);
2038
2039 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2040 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req));
2041
2042 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2043 CAPABILITY_BE3_NATIVE_ERX_API);
2044 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2045
2046 status = be_mbox_notify_wait(adapter);
2047 if (!status) {
2048 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2049 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2050 CAPABILITY_BE3_NATIVE_ERX_API;
2051 }
2052err:
2053 mutex_unlock(&adapter->mbox_lock);
2054 return status;
2055}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 83d15c8a9fa3..4f254cfaabe2 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2010 ServerEngines 2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -8,11 +8,11 @@
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Contact Information: 10 * Contact Information:
11 * linux-drivers@serverengines.com 11 * linux-drivers@emulex.com
12 * 12 *
13 * ServerEngines 13 * Emulex
14 * 209 N. Fair Oaks Ave 14 * 3333 Susan Street
15 * Sunnyvale, CA 94085 15 * Costa Mesa, CA 92626
16 */ 16 */
17 17
18/* 18/*
@@ -88,6 +88,7 @@ struct be_mcc_compl {
88#define ASYNC_EVENT_CODE_GRP_5 0x5 88#define ASYNC_EVENT_CODE_GRP_5 0x5
89#define ASYNC_EVENT_QOS_SPEED 0x1 89#define ASYNC_EVENT_QOS_SPEED 0x1
90#define ASYNC_EVENT_COS_PRIORITY 0x2 90#define ASYNC_EVENT_COS_PRIORITY 0x2
91#define ASYNC_EVENT_PVID_STATE 0x3
91struct be_async_event_trailer { 92struct be_async_event_trailer {
92 u32 code; 93 u32 code;
93}; 94};
@@ -134,6 +135,18 @@ struct be_async_event_grp5_cos_priority {
134 struct be_async_event_trailer trailer; 135 struct be_async_event_trailer trailer;
135} __packed; 136} __packed;
136 137
138/* When the event code of an async trailer is GRP5 and event type is
139 * PVID state, the mcc_compl must be interpreted as follows
140 */
141struct be_async_event_grp5_pvid_state {
142 u8 enabled;
143 u8 rsvd0;
144 u16 tag;
145 u32 event_tag;
146 u32 rsvd1;
147 struct be_async_event_trailer trailer;
148} __packed;
149
137struct be_mcc_mailbox { 150struct be_mcc_mailbox {
138 struct be_mcc_wrb wrb; 151 struct be_mcc_wrb wrb;
139 struct be_mcc_compl compl; 152 struct be_mcc_compl compl;
@@ -156,6 +169,7 @@ struct be_mcc_mailbox {
156#define OPCODE_COMMON_SET_QOS 28 169#define OPCODE_COMMON_SET_QOS 28
157#define OPCODE_COMMON_MCC_CREATE_EXT 90 170#define OPCODE_COMMON_MCC_CREATE_EXT 90
158#define OPCODE_COMMON_SEEPROM_READ 30 171#define OPCODE_COMMON_SEEPROM_READ 30
172#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
159#define OPCODE_COMMON_NTWK_RX_FILTER 34 173#define OPCODE_COMMON_NTWK_RX_FILTER 34
160#define OPCODE_COMMON_GET_FW_VERSION 35 174#define OPCODE_COMMON_GET_FW_VERSION 35
161#define OPCODE_COMMON_SET_FLOW_CONTROL 36 175#define OPCODE_COMMON_SET_FLOW_CONTROL 36
@@ -176,6 +190,8 @@ struct be_mcc_mailbox {
176#define OPCODE_COMMON_GET_BEACON_STATE 70 190#define OPCODE_COMMON_GET_BEACON_STATE 70
177#define OPCODE_COMMON_READ_TRANSRECV_DATA 73 191#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
178#define OPCODE_COMMON_GET_PHY_DETAILS 102 192#define OPCODE_COMMON_GET_PHY_DETAILS 102
193#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
194#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
179 195
180#define OPCODE_ETH_RSS_CONFIG 1 196#define OPCODE_ETH_RSS_CONFIG 1
181#define OPCODE_ETH_ACPI_CONFIG 2 197#define OPCODE_ETH_ACPI_CONFIG 2
@@ -415,7 +431,7 @@ struct be_cmd_resp_mcc_create {
415/* Pseudo amap definition in which each bit of the actual structure is defined 431/* Pseudo amap definition in which each bit of the actual structure is defined
416 * as a byte: used to calculate offset/shift/mask of each field */ 432 * as a byte: used to calculate offset/shift/mask of each field */
417struct amap_tx_context { 433struct amap_tx_context {
418 u8 rsvd0[16]; /* dword 0 */ 434 u8 if_id[16]; /* dword 0 */
419 u8 tx_ring_size[4]; /* dword 0 */ 435 u8 tx_ring_size[4]; /* dword 0 */
420 u8 rsvd1[26]; /* dword 0 */ 436 u8 rsvd1[26]; /* dword 0 */
421 u8 pci_func_id[8]; /* dword 1 */ 437 u8 pci_func_id[8]; /* dword 1 */
@@ -503,7 +519,8 @@ enum be_if_flags {
503 BE_IF_FLAGS_VLAN = 0x100, 519 BE_IF_FLAGS_VLAN = 0x100,
504 BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200, 520 BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200,
505 BE_IF_FLAGS_PASS_L2_ERRORS = 0x400, 521 BE_IF_FLAGS_PASS_L2_ERRORS = 0x400,
506 BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800 522 BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800,
523 BE_IF_FLAGS_MULTICAST = 0x1000
507}; 524};
508 525
509/* An RX interface is an object with one or more MAC addresses and 526/* An RX interface is an object with one or more MAC addresses and
@@ -619,7 +636,10 @@ struct be_rxf_stats {
619 u32 rx_drops_invalid_ring; /* dword 145*/ 636 u32 rx_drops_invalid_ring; /* dword 145*/
620 u32 forwarded_packets; /* dword 146*/ 637 u32 forwarded_packets; /* dword 146*/
621 u32 rx_drops_mtu; /* dword 147*/ 638 u32 rx_drops_mtu; /* dword 147*/
622 u32 rsvd0[15]; 639 u32 rsvd0[7];
640 u32 port0_jabber_events;
641 u32 port1_jabber_events;
642 u32 rsvd1[6];
623}; 643};
624 644
625struct be_erx_stats { 645struct be_erx_stats {
@@ -630,11 +650,16 @@ struct be_erx_stats {
630 u32 debug_pmem_pbuf_dealloc; /* dword 47*/ 650 u32 debug_pmem_pbuf_dealloc; /* dword 47*/
631}; 651};
632 652
653struct be_pmem_stats {
654 u32 eth_red_drops;
655 u32 rsvd[4];
656};
657
633struct be_hw_stats { 658struct be_hw_stats {
634 struct be_rxf_stats rxf; 659 struct be_rxf_stats rxf;
635 u32 rsvd[48]; 660 u32 rsvd[48];
636 struct be_erx_stats erx; 661 struct be_erx_stats erx;
637 u32 rsvd1[6]; 662 struct be_pmem_stats pmem;
638}; 663};
639 664
640struct be_cmd_req_get_stats { 665struct be_cmd_req_get_stats {
@@ -647,6 +672,20 @@ struct be_cmd_resp_get_stats {
647 struct be_hw_stats hw_stats; 672 struct be_hw_stats hw_stats;
648}; 673};
649 674
675struct be_cmd_req_get_cntl_addnl_attribs {
676 struct be_cmd_req_hdr hdr;
677 u8 rsvd[8];
678};
679
680struct be_cmd_resp_get_cntl_addnl_attribs {
681 struct be_cmd_resp_hdr hdr;
682 u16 ipl_file_number;
683 u8 ipl_file_version;
684 u8 rsvd0;
685 u8 on_die_temperature; /* in degrees centigrade*/
686 u8 rsvd1[3];
687};
688
650struct be_cmd_req_vlan_config { 689struct be_cmd_req_vlan_config {
651 struct be_cmd_req_hdr hdr; 690 struct be_cmd_req_hdr hdr;
652 u8 interface_id; 691 u8 interface_id;
@@ -994,17 +1033,47 @@ struct be_cmd_resp_set_qos {
994 u32 rsvd; 1033 u32 rsvd;
995}; 1034};
996 1035
1036/*********************** Controller Attributes ***********************/
1037struct be_cmd_req_cntl_attribs {
1038 struct be_cmd_req_hdr hdr;
1039};
1040
1041struct be_cmd_resp_cntl_attribs {
1042 struct be_cmd_resp_hdr hdr;
1043 struct mgmt_controller_attrib attribs;
1044};
1045
1046/*********************** Set driver function ***********************/
1047#define CAPABILITY_SW_TIMESTAMPS 2
1048#define CAPABILITY_BE3_NATIVE_ERX_API 4
1049
1050struct be_cmd_req_set_func_cap {
1051 struct be_cmd_req_hdr hdr;
1052 u32 valid_cap_flags;
1053 u32 cap_flags;
1054 u8 rsvd[212];
1055};
1056
1057struct be_cmd_resp_set_func_cap {
1058 struct be_cmd_resp_hdr hdr;
1059 u32 valid_cap_flags;
1060 u32 cap_flags;
1061 u8 rsvd[212];
1062};
1063
997extern int be_pci_fnum_get(struct be_adapter *adapter); 1064extern int be_pci_fnum_get(struct be_adapter *adapter);
998extern int be_cmd_POST(struct be_adapter *adapter); 1065extern int be_cmd_POST(struct be_adapter *adapter);
999extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, 1066extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
1000 u8 type, bool permanent, u32 if_handle); 1067 u8 type, bool permanent, u32 if_handle);
1001extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, 1068extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
1002 u32 if_id, u32 *pmac_id); 1069 u32 if_id, u32 *pmac_id, u32 domain);
1003extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id); 1070extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
1071 u32 pmac_id, u32 domain);
1004extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, 1072extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
1005 u32 en_flags, u8 *mac, bool pmac_invalid, 1073 u32 en_flags, u8 *mac, bool pmac_invalid,
1006 u32 *if_handle, u32 *pmac_id, u32 domain); 1074 u32 *if_handle, u32 *pmac_id, u32 domain);
1007extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle); 1075extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle,
1076 u32 domain);
1008extern int be_cmd_eq_create(struct be_adapter *adapter, 1077extern int be_cmd_eq_create(struct be_adapter *adapter,
1009 struct be_queue_info *eq, int eq_delay); 1078 struct be_queue_info *eq, int eq_delay);
1010extern int be_cmd_cq_create(struct be_adapter *adapter, 1079extern int be_cmd_cq_create(struct be_adapter *adapter,
@@ -1076,4 +1145,7 @@ extern int be_cmd_get_phy_info(struct be_adapter *adapter,
1076 struct be_dma_mem *cmd); 1145 struct be_dma_mem *cmd);
1077extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain); 1146extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
1078extern void be_detect_dump_ue(struct be_adapter *adapter); 1147extern void be_detect_dump_ue(struct be_adapter *adapter);
1148extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
1149extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
1150extern int be_cmd_check_native_mode(struct be_adapter *adapter);
1079 1151
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index b4be0271efe0..aac248fbd18b 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2010 ServerEngines 2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -8,11 +8,11 @@
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Contact Information: 10 * Contact Information:
11 * linux-drivers@serverengines.com 11 * linux-drivers@emulex.com
12 * 12 *
13 * ServerEngines 13 * Emulex
14 * 209 N. Fair Oaks Ave 14 * 3333 Susan Street
15 * Sunnyvale, CA 94085 15 * Costa Mesa, CA 92626
16 */ 16 */
17 17
18#include "be.h" 18#include "be.h"
@@ -26,7 +26,8 @@ struct be_ethtool_stat {
26 int offset; 26 int offset;
27}; 27};
28 28
29enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT}; 29enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT,
30 PMEMSTAT, DRVSTAT};
30#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \ 31#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
31 offsetof(_struct, field) 32 offsetof(_struct, field)
32#define NETSTAT_INFO(field) #field, NETSTAT,\ 33#define NETSTAT_INFO(field) #field, NETSTAT,\
@@ -43,6 +44,11 @@ enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT};
43 field) 44 field)
44#define ERXSTAT_INFO(field) #field, ERXSTAT,\ 45#define ERXSTAT_INFO(field) #field, ERXSTAT,\
45 FIELDINFO(struct be_erx_stats, field) 46 FIELDINFO(struct be_erx_stats, field)
47#define PMEMSTAT_INFO(field) #field, PMEMSTAT,\
48 FIELDINFO(struct be_pmem_stats, field)
49#define DRVSTAT_INFO(field) #field, DRVSTAT,\
50 FIELDINFO(struct be_drv_stats, \
51 field)
46 52
47static const struct be_ethtool_stat et_stats[] = { 53static const struct be_ethtool_stat et_stats[] = {
48 {NETSTAT_INFO(rx_packets)}, 54 {NETSTAT_INFO(rx_packets)},
@@ -99,7 +105,11 @@ static const struct be_ethtool_stat et_stats[] = {
99 {MISCSTAT_INFO(rx_drops_too_many_frags)}, 105 {MISCSTAT_INFO(rx_drops_too_many_frags)},
100 {MISCSTAT_INFO(rx_drops_invalid_ring)}, 106 {MISCSTAT_INFO(rx_drops_invalid_ring)},
101 {MISCSTAT_INFO(forwarded_packets)}, 107 {MISCSTAT_INFO(forwarded_packets)},
102 {MISCSTAT_INFO(rx_drops_mtu)} 108 {MISCSTAT_INFO(rx_drops_mtu)},
109 {MISCSTAT_INFO(port0_jabber_events)},
110 {MISCSTAT_INFO(port1_jabber_events)},
111 {PMEMSTAT_INFO(eth_red_drops)},
112 {DRVSTAT_INFO(be_on_die_temperature)}
103}; 113};
104#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats) 114#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
105 115
@@ -121,7 +131,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
121 "MAC Loopback test", 131 "MAC Loopback test",
122 "PHY Loopback test", 132 "PHY Loopback test",
123 "External Loopback test", 133 "External Loopback test",
124 "DDR DMA test" 134 "DDR DMA test",
125 "Link test" 135 "Link test"
126}; 136};
127 137
@@ -276,6 +286,12 @@ be_get_ethtool_stats(struct net_device *netdev,
276 case MISCSTAT: 286 case MISCSTAT:
277 p = &hw_stats->rxf; 287 p = &hw_stats->rxf;
278 break; 288 break;
289 case PMEMSTAT:
290 p = &hw_stats->pmem;
291 break;
292 case DRVSTAT:
293 p = &adapter->drv_stats;
294 break;
279 } 295 }
280 296
281 p = (u8 *)p + et_stats[i].offset; 297 p = (u8 *)p + et_stats[i].offset;
@@ -376,8 +392,9 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
376 } 392 }
377 393
378 phy_cmd.size = sizeof(struct be_cmd_req_get_phy_info); 394 phy_cmd.size = sizeof(struct be_cmd_req_get_phy_info);
379 phy_cmd.va = pci_alloc_consistent(adapter->pdev, phy_cmd.size, 395 phy_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
380 &phy_cmd.dma); 396 phy_cmd.size, &phy_cmd.dma,
397 GFP_KERNEL);
381 if (!phy_cmd.va) { 398 if (!phy_cmd.va) {
382 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 399 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
383 return -ENOMEM; 400 return -ENOMEM;
@@ -416,8 +433,8 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
416 adapter->port_type = ecmd->port; 433 adapter->port_type = ecmd->port;
417 adapter->transceiver = ecmd->transceiver; 434 adapter->transceiver = ecmd->transceiver;
418 adapter->autoneg = ecmd->autoneg; 435 adapter->autoneg = ecmd->autoneg;
419 pci_free_consistent(adapter->pdev, phy_cmd.size, 436 dma_free_coherent(&adapter->pdev->dev, phy_cmd.size, phy_cmd.va,
420 phy_cmd.va, phy_cmd.dma); 437 phy_cmd.dma);
421 } else { 438 } else {
422 ecmd->speed = adapter->link_speed; 439 ecmd->speed = adapter->link_speed;
423 ecmd->port = adapter->port_type; 440 ecmd->port = adapter->port_type;
@@ -496,7 +513,7 @@ be_phys_id(struct net_device *netdev, u32 data)
496 int status; 513 int status;
497 u32 cur; 514 u32 cur;
498 515
499 be_cmd_get_beacon_state(adapter, adapter->port_num, &cur); 516 be_cmd_get_beacon_state(adapter, adapter->hba_port_num, &cur);
500 517
501 if (cur == BEACON_STATE_ENABLED) 518 if (cur == BEACON_STATE_ENABLED)
502 return 0; 519 return 0;
@@ -504,23 +521,34 @@ be_phys_id(struct net_device *netdev, u32 data)
504 if (data < 2) 521 if (data < 2)
505 data = 2; 522 data = 2;
506 523
507 status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0, 524 status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
508 BEACON_STATE_ENABLED); 525 BEACON_STATE_ENABLED);
509 set_current_state(TASK_INTERRUPTIBLE); 526 set_current_state(TASK_INTERRUPTIBLE);
510 schedule_timeout(data*HZ); 527 schedule_timeout(data*HZ);
511 528
512 status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0, 529 status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
513 BEACON_STATE_DISABLED); 530 BEACON_STATE_DISABLED);
514 531
515 return status; 532 return status;
516} 533}
517 534
535static bool
536be_is_wol_supported(struct be_adapter *adapter)
537{
538 if (!be_physfn(adapter))
539 return false;
540 else
541 return true;
542}
543
518static void 544static void
519be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 545be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
520{ 546{
521 struct be_adapter *adapter = netdev_priv(netdev); 547 struct be_adapter *adapter = netdev_priv(netdev);
522 548
523 wol->supported = WAKE_MAGIC; 549 if (be_is_wol_supported(adapter))
550 wol->supported = WAKE_MAGIC;
551
524 if (adapter->wol) 552 if (adapter->wol)
525 wol->wolopts = WAKE_MAGIC; 553 wol->wolopts = WAKE_MAGIC;
526 else 554 else
@@ -536,7 +564,7 @@ be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
536 if (wol->wolopts & ~WAKE_MAGIC) 564 if (wol->wolopts & ~WAKE_MAGIC)
537 return -EINVAL; 565 return -EINVAL;
538 566
539 if (wol->wolopts & WAKE_MAGIC) 567 if ((wol->wolopts & WAKE_MAGIC) && be_is_wol_supported(adapter))
540 adapter->wol = true; 568 adapter->wol = true;
541 else 569 else
542 adapter->wol = false; 570 adapter->wol = false;
@@ -554,8 +582,8 @@ be_test_ddr_dma(struct be_adapter *adapter)
554 }; 582 };
555 583
556 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); 584 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
557 ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size, 585 ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
558 &ddrdma_cmd.dma); 586 &ddrdma_cmd.dma, GFP_KERNEL);
559 if (!ddrdma_cmd.va) { 587 if (!ddrdma_cmd.va) {
560 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 588 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
561 return -ENOMEM; 589 return -ENOMEM;
@@ -569,20 +597,20 @@ be_test_ddr_dma(struct be_adapter *adapter)
569 } 597 }
570 598
571err: 599err:
572 pci_free_consistent(adapter->pdev, ddrdma_cmd.size, 600 dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va,
573 ddrdma_cmd.va, ddrdma_cmd.dma); 601 ddrdma_cmd.dma);
574 return ret; 602 return ret;
575} 603}
576 604
577static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type, 605static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
578 u64 *status) 606 u64 *status)
579{ 607{
580 be_cmd_set_loopback(adapter, adapter->port_num, 608 be_cmd_set_loopback(adapter, adapter->hba_port_num,
581 loopback_type, 1); 609 loopback_type, 1);
582 *status = be_cmd_loopback_test(adapter, adapter->port_num, 610 *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
583 loopback_type, 1500, 611 loopback_type, 1500,
584 2, 0xabc); 612 2, 0xabc);
585 be_cmd_set_loopback(adapter, adapter->port_num, 613 be_cmd_set_loopback(adapter, adapter->hba_port_num,
586 BE_NO_LOOPBACK, 1); 614 BE_NO_LOOPBACK, 1);
587 return *status; 615 return *status;
588} 616}
@@ -621,7 +649,8 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
621 &qos_link_speed) != 0) { 649 &qos_link_speed) != 0) {
622 test->flags |= ETH_TEST_FL_FAILED; 650 test->flags |= ETH_TEST_FL_FAILED;
623 data[4] = -1; 651 data[4] = -1;
624 } else if (mac_speed) { 652 } else if (!mac_speed) {
653 test->flags |= ETH_TEST_FL_FAILED;
625 data[4] = 1; 654 data[4] = 1;
626 } 655 }
627} 656}
@@ -662,8 +691,8 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
662 691
663 memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); 692 memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
664 eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read); 693 eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
665 eeprom_cmd.va = pci_alloc_consistent(adapter->pdev, eeprom_cmd.size, 694 eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
666 &eeprom_cmd.dma); 695 &eeprom_cmd.dma, GFP_KERNEL);
667 696
668 if (!eeprom_cmd.va) { 697 if (!eeprom_cmd.va) {
669 dev_err(&adapter->pdev->dev, 698 dev_err(&adapter->pdev->dev,
@@ -677,8 +706,8 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
677 resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va; 706 resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va;
678 memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len); 707 memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
679 } 708 }
680 pci_free_consistent(adapter->pdev, eeprom_cmd.size, eeprom_cmd.va, 709 dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
681 eeprom_cmd.dma); 710 eeprom_cmd.dma);
682 711
683 return status; 712 return status;
684} 713}
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index 4096d9778234..d4344a06090b 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2010 ServerEngines 2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -8,11 +8,11 @@
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Contact Information: 10 * Contact Information:
11 * linux-drivers@serverengines.com 11 * linux-drivers@emulex.com
12 * 12 *
13 * ServerEngines 13 * Emulex
14 * 209 N. Fair Oaks Ave 14 * 3333 Susan Street
15 * Sunnyvale, CA 94085 15 * Costa Mesa, CA 92626
16 */ 16 */
17 17
18/********* Mailbox door bell *************/ 18/********* Mailbox door bell *************/
@@ -44,6 +44,18 @@
44#define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */ 44#define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */
45#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */ 45#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
46 46
47
48/* Lancer SLIPORT_CONTROL SLIPORT_STATUS registers */
49#define SLIPORT_STATUS_OFFSET 0x404
50#define SLIPORT_CONTROL_OFFSET 0x408
51
52#define SLIPORT_STATUS_ERR_MASK 0x80000000
53#define SLIPORT_STATUS_RN_MASK 0x01000000
54#define SLIPORT_STATUS_RDY_MASK 0x00800000
55
56
57#define SLI_PORT_CONTROL_IP_MASK 0x08000000
58
47/********* Memory BAR register ************/ 59/********* Memory BAR register ************/
48#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc 60#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
49/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt 61/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
@@ -289,10 +301,10 @@ struct be_eth_rx_d {
289 301
290/* RX Compl Queue Descriptor */ 302/* RX Compl Queue Descriptor */
291 303
292/* Pseudo amap definition for eth_rx_compl in which each bit of the 304/* Pseudo amap definition for BE2 and BE3 legacy mode eth_rx_compl in which
293 * actual structure is defined as a byte: used to calculate 305 * each bit of the actual structure is defined as a byte: used to calculate
294 * offset/shift/mask of each field */ 306 * offset/shift/mask of each field */
295struct amap_eth_rx_compl { 307struct amap_eth_rx_compl_v0 {
296 u8 vlan_tag[16]; /* dword 0 */ 308 u8 vlan_tag[16]; /* dword 0 */
297 u8 pktsize[14]; /* dword 0 */ 309 u8 pktsize[14]; /* dword 0 */
298 u8 port; /* dword 0 */ 310 u8 port; /* dword 0 */
@@ -323,10 +335,92 @@ struct amap_eth_rx_compl {
323 u8 rsshash[32]; /* dword 3 */ 335 u8 rsshash[32]; /* dword 3 */
324} __packed; 336} __packed;
325 337
338/* Pseudo amap definition for BE3 native mode eth_rx_compl in which
339 * each bit of the actual structure is defined as a byte: used to calculate
340 * offset/shift/mask of each field */
341struct amap_eth_rx_compl_v1 {
342 u8 vlan_tag[16]; /* dword 0 */
343 u8 pktsize[14]; /* dword 0 */
344 u8 vtp; /* dword 0 */
345 u8 ip_opt; /* dword 0 */
346 u8 err; /* dword 1 */
347 u8 rsshp; /* dword 1 */
348 u8 ipf; /* dword 1 */
349 u8 tcpf; /* dword 1 */
350 u8 udpf; /* dword 1 */
351 u8 ipcksm; /* dword 1 */
352 u8 l4_cksm; /* dword 1 */
353 u8 ip_version; /* dword 1 */
354 u8 macdst[7]; /* dword 1 */
355 u8 rsvd0; /* dword 1 */
356 u8 fragndx[10]; /* dword 1 */
357 u8 ct[2]; /* dword 1 */
358 u8 sw; /* dword 1 */
359 u8 numfrags[3]; /* dword 1 */
360 u8 rss_flush; /* dword 2 */
361 u8 cast_enc[2]; /* dword 2 */
362 u8 vtm; /* dword 2 */
363 u8 rss_bank; /* dword 2 */
364 u8 port[2]; /* dword 2 */
365 u8 vntagp; /* dword 2 */
366 u8 header_len[8]; /* dword 2 */
367 u8 header_split[2]; /* dword 2 */
368 u8 rsvd1[13]; /* dword 2 */
369 u8 valid; /* dword 2 */
370 u8 rsshash[32]; /* dword 3 */
371} __packed;
372
326struct be_eth_rx_compl { 373struct be_eth_rx_compl {
327 u32 dw[4]; 374 u32 dw[4];
328}; 375};
329 376
377struct mgmt_hba_attribs {
378 u8 flashrom_version_string[32];
379 u8 manufacturer_name[32];
380 u32 supported_modes;
381 u32 rsvd0[3];
382 u8 ncsi_ver_string[12];
383 u32 default_extended_timeout;
384 u8 controller_model_number[32];
385 u8 controller_description[64];
386 u8 controller_serial_number[32];
387 u8 ip_version_string[32];
388 u8 firmware_version_string[32];
389 u8 bios_version_string[32];
390 u8 redboot_version_string[32];
391 u8 driver_version_string[32];
392 u8 fw_on_flash_version_string[32];
393 u32 functionalities_supported;
394 u16 max_cdblength;
395 u8 asic_revision;
396 u8 generational_guid[16];
397 u8 hba_port_count;
398 u16 default_link_down_timeout;
399 u8 iscsi_ver_min_max;
400 u8 multifunction_device;
401 u8 cache_valid;
402 u8 hba_status;
403 u8 max_domains_supported;
404 u8 phy_port;
405 u32 firmware_post_status;
406 u32 hba_mtu[8];
407 u32 rsvd1[4];
408};
409
410struct mgmt_controller_attrib {
411 struct mgmt_hba_attribs hba_attribs;
412 u16 pci_vendor_id;
413 u16 pci_device_id;
414 u16 pci_sub_vendor_id;
415 u16 pci_sub_system_id;
416 u8 pci_bus_number;
417 u8 pci_device_number;
418 u8 pci_function_number;
419 u8 interface_type;
420 u64 unique_identifier;
421 u32 rsvd0[5];
422};
423
330struct controller_id { 424struct controller_id {
331 u32 vendor; 425 u32 vendor;
332 u32 device; 426 u32 device;
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 28a32a6c8bf1..a71163f1e34b 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2010 ServerEngines 2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -8,11 +8,11 @@
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Contact Information: 10 * Contact Information:
11 * linux-drivers@serverengines.com 11 * linux-drivers@emulex.com
12 * 12 *
13 * ServerEngines 13 * Emulex
14 * 209 N. Fair Oaks Ave 14 * 3333 Susan Street
15 * Sunnyvale, CA 94085 15 * Costa Mesa, CA 92626
16 */ 16 */
17 17
18#include "be.h" 18#include "be.h"
@@ -25,9 +25,9 @@ MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation"); 25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL"); 26MODULE_LICENSE("GPL");
27 27
28static unsigned int rx_frag_size = 2048; 28static ushort rx_frag_size = 2048;
29static unsigned int num_vfs; 29static unsigned int num_vfs;
30module_param(rx_frag_size, uint, S_IRUGO); 30module_param(rx_frag_size, ushort, S_IRUGO);
31module_param(num_vfs, uint, S_IRUGO); 31module_param(num_vfs, uint, S_IRUGO);
32MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data."); 32MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize"); 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
@@ -125,8 +125,8 @@ static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125{ 125{
126 struct be_dma_mem *mem = &q->dma_mem; 126 struct be_dma_mem *mem = &q->dma_mem;
127 if (mem->va) 127 if (mem->va)
128 pci_free_consistent(adapter->pdev, mem->size, 128 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
129 mem->va, mem->dma); 129 mem->dma);
130} 130}
131 131
132static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, 132static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
@@ -138,7 +138,8 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 q->len = len; 138 q->len = len;
139 q->entry_size = entry_size; 139 q->entry_size = entry_size;
140 mem->size = len * entry_size; 140 mem->size = len * entry_size;
141 mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma); 141 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
142 GFP_KERNEL);
142 if (!mem->va) 143 if (!mem->va)
143 return -1; 144 return -1;
144 memset(mem->va, 0, mem->size); 145 memset(mem->va, 0, mem->size);
@@ -235,12 +236,13 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
235 if (!be_physfn(adapter)) 236 if (!be_physfn(adapter))
236 goto netdev_addr; 237 goto netdev_addr;
237 238
238 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id); 239 status = be_cmd_pmac_del(adapter, adapter->if_handle,
240 adapter->pmac_id, 0);
239 if (status) 241 if (status)
240 return status; 242 return status;
241 243
242 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data, 244 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
243 adapter->if_handle, &adapter->pmac_id); 245 adapter->if_handle, &adapter->pmac_id, 0);
244netdev_addr: 246netdev_addr:
245 if (!status) 247 if (!status)
246 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 248 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
@@ -484,7 +486,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
484 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len); 486 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
485} 487}
486 488
487static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb, 489static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
488 bool unmap_single) 490 bool unmap_single)
489{ 491{
490 dma_addr_t dma; 492 dma_addr_t dma;
@@ -494,11 +496,10 @@ static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
494 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo; 496 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
495 if (wrb->frag_len) { 497 if (wrb->frag_len) {
496 if (unmap_single) 498 if (unmap_single)
497 pci_unmap_single(pdev, dma, wrb->frag_len, 499 dma_unmap_single(dev, dma, wrb->frag_len,
498 PCI_DMA_TODEVICE); 500 DMA_TO_DEVICE);
499 else 501 else
500 pci_unmap_page(pdev, dma, wrb->frag_len, 502 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
501 PCI_DMA_TODEVICE);
502 } 503 }
503} 504}
504 505
@@ -507,7 +508,7 @@ static int make_tx_wrbs(struct be_adapter *adapter,
507{ 508{
508 dma_addr_t busaddr; 509 dma_addr_t busaddr;
509 int i, copied = 0; 510 int i, copied = 0;
510 struct pci_dev *pdev = adapter->pdev; 511 struct device *dev = &adapter->pdev->dev;
511 struct sk_buff *first_skb = skb; 512 struct sk_buff *first_skb = skb;
512 struct be_queue_info *txq = &adapter->tx_obj.q; 513 struct be_queue_info *txq = &adapter->tx_obj.q;
513 struct be_eth_wrb *wrb; 514 struct be_eth_wrb *wrb;
@@ -521,9 +522,8 @@ static int make_tx_wrbs(struct be_adapter *adapter,
521 522
522 if (skb->len > skb->data_len) { 523 if (skb->len > skb->data_len) {
523 int len = skb_headlen(skb); 524 int len = skb_headlen(skb);
524 busaddr = pci_map_single(pdev, skb->data, len, 525 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
525 PCI_DMA_TODEVICE); 526 if (dma_mapping_error(dev, busaddr))
526 if (pci_dma_mapping_error(pdev, busaddr))
527 goto dma_err; 527 goto dma_err;
528 map_single = true; 528 map_single = true;
529 wrb = queue_head_node(txq); 529 wrb = queue_head_node(txq);
@@ -536,10 +536,9 @@ static int make_tx_wrbs(struct be_adapter *adapter,
536 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 536 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
537 struct skb_frag_struct *frag = 537 struct skb_frag_struct *frag =
538 &skb_shinfo(skb)->frags[i]; 538 &skb_shinfo(skb)->frags[i];
539 busaddr = pci_map_page(pdev, frag->page, 539 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
540 frag->page_offset, 540 frag->size, DMA_TO_DEVICE);
541 frag->size, PCI_DMA_TODEVICE); 541 if (dma_mapping_error(dev, busaddr))
542 if (pci_dma_mapping_error(pdev, busaddr))
543 goto dma_err; 542 goto dma_err;
544 wrb = queue_head_node(txq); 543 wrb = queue_head_node(txq);
545 wrb_fill(wrb, busaddr, frag->size); 544 wrb_fill(wrb, busaddr, frag->size);
@@ -563,7 +562,7 @@ dma_err:
563 txq->head = map_head; 562 txq->head = map_head;
564 while (copied) { 563 while (copied) {
565 wrb = queue_head_node(txq); 564 wrb = queue_head_node(txq);
566 unmap_tx_frag(pdev, wrb, map_single); 565 unmap_tx_frag(dev, wrb, map_single);
567 map_single = false; 566 map_single = false;
568 copied -= wrb->frag_len; 567 copied -= wrb->frag_len;
569 queue_head_inc(txq); 568 queue_head_inc(txq);
@@ -743,11 +742,11 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
743 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID) 742 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
744 status = be_cmd_pmac_del(adapter, 743 status = be_cmd_pmac_del(adapter,
745 adapter->vf_cfg[vf].vf_if_handle, 744 adapter->vf_cfg[vf].vf_if_handle,
746 adapter->vf_cfg[vf].vf_pmac_id); 745 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
747 746
748 status = be_cmd_pmac_add(adapter, mac, 747 status = be_cmd_pmac_add(adapter, mac,
749 adapter->vf_cfg[vf].vf_if_handle, 748 adapter->vf_cfg[vf].vf_if_handle,
750 &adapter->vf_cfg[vf].vf_pmac_id); 749 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
751 750
752 if (status) 751 if (status)
753 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n", 752 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
@@ -822,7 +821,7 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
822 rate = 10000; 821 rate = 10000;
823 822
824 adapter->vf_cfg[vf].vf_tx_rate = rate; 823 adapter->vf_cfg[vf].vf_tx_rate = rate;
825 status = be_cmd_set_qos(adapter, rate / 10, vf); 824 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
826 825
827 if (status) 826 if (status)
828 dev_info(&adapter->pdev->dev, 827 dev_info(&adapter->pdev->dev,
@@ -852,28 +851,26 @@ static void be_rx_rate_update(struct be_rx_obj *rxo)
852} 851}
853 852
854static void be_rx_stats_update(struct be_rx_obj *rxo, 853static void be_rx_stats_update(struct be_rx_obj *rxo,
855 u32 pktsize, u16 numfrags, u8 pkt_type) 854 struct be_rx_compl_info *rxcp)
856{ 855{
857 struct be_rx_stats *stats = &rxo->stats; 856 struct be_rx_stats *stats = &rxo->stats;
858 857
859 stats->rx_compl++; 858 stats->rx_compl++;
860 stats->rx_frags += numfrags; 859 stats->rx_frags += rxcp->num_rcvd;
861 stats->rx_bytes += pktsize; 860 stats->rx_bytes += rxcp->pkt_size;
862 stats->rx_pkts++; 861 stats->rx_pkts++;
863 if (pkt_type == BE_MULTICAST_PACKET) 862 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
864 stats->rx_mcast_pkts++; 863 stats->rx_mcast_pkts++;
864 if (rxcp->err)
865 stats->rxcp_err++;
865} 866}
866 867
867static inline bool csum_passed(struct be_eth_rx_compl *rxcp) 868static inline bool csum_passed(struct be_rx_compl_info *rxcp)
868{ 869{
869 u8 l4_cksm, ipv6, ipcksm; 870 /* L4 checksum is not reliable for non TCP/UDP packets.
870 871 * Also ignore ipcksm for ipv6 pkts */
871 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp); 872 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
872 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp); 873 (rxcp->ip_csum || rxcp->ipv6);
873 ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
874
875 /* Ignore ipcksm for ipv6 pkts */
876 return l4_cksm && (ipcksm || ipv6);
877} 874}
878 875
879static struct be_rx_page_info * 876static struct be_rx_page_info *
@@ -888,8 +885,9 @@ get_rx_page_info(struct be_adapter *adapter,
888 BUG_ON(!rx_page_info->page); 885 BUG_ON(!rx_page_info->page);
889 886
890 if (rx_page_info->last_page_user) { 887 if (rx_page_info->last_page_user) {
891 pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus), 888 dma_unmap_page(&adapter->pdev->dev,
892 adapter->big_page_size, PCI_DMA_FROMDEVICE); 889 dma_unmap_addr(rx_page_info, bus),
890 adapter->big_page_size, DMA_FROM_DEVICE);
893 rx_page_info->last_page_user = false; 891 rx_page_info->last_page_user = false;
894 } 892 }
895 893
@@ -900,26 +898,17 @@ get_rx_page_info(struct be_adapter *adapter,
900/* Throwaway the data in the Rx completion */ 898/* Throwaway the data in the Rx completion */
901static void be_rx_compl_discard(struct be_adapter *adapter, 899static void be_rx_compl_discard(struct be_adapter *adapter,
902 struct be_rx_obj *rxo, 900 struct be_rx_obj *rxo,
903 struct be_eth_rx_compl *rxcp) 901 struct be_rx_compl_info *rxcp)
904{ 902{
905 struct be_queue_info *rxq = &rxo->q; 903 struct be_queue_info *rxq = &rxo->q;
906 struct be_rx_page_info *page_info; 904 struct be_rx_page_info *page_info;
907 u16 rxq_idx, i, num_rcvd; 905 u16 i, num_rcvd = rxcp->num_rcvd;
908
909 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
910 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
911 906
912 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */ 907 for (i = 0; i < num_rcvd; i++) {
913 if (likely(rxq_idx != rxo->last_frag_index && num_rcvd != 0)) { 908 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
914 909 put_page(page_info->page);
915 rxo->last_frag_index = rxq_idx; 910 memset(page_info, 0, sizeof(*page_info));
916 911 index_inc(&rxcp->rxq_idx, rxq->len);
917 for (i = 0; i < num_rcvd; i++) {
918 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
919 put_page(page_info->page);
920 memset(page_info, 0, sizeof(*page_info));
921 index_inc(&rxq_idx, rxq->len);
922 }
923 } 912 }
924} 913}
925 914
@@ -928,30 +917,23 @@ static void be_rx_compl_discard(struct be_adapter *adapter,
928 * indicated by rxcp. 917 * indicated by rxcp.
929 */ 918 */
930static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo, 919static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
931 struct sk_buff *skb, struct be_eth_rx_compl *rxcp, 920 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
932 u16 num_rcvd)
933{ 921{
934 struct be_queue_info *rxq = &rxo->q; 922 struct be_queue_info *rxq = &rxo->q;
935 struct be_rx_page_info *page_info; 923 struct be_rx_page_info *page_info;
936 u16 rxq_idx, i, j; 924 u16 i, j;
937 u32 pktsize, hdr_len, curr_frag_len, size; 925 u16 hdr_len, curr_frag_len, remaining;
938 u8 *start; 926 u8 *start;
939 u8 pkt_type;
940
941 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
942 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
943 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
944
945 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
946 927
928 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
947 start = page_address(page_info->page) + page_info->page_offset; 929 start = page_address(page_info->page) + page_info->page_offset;
948 prefetch(start); 930 prefetch(start);
949 931
950 /* Copy data in the first descriptor of this completion */ 932 /* Copy data in the first descriptor of this completion */
951 curr_frag_len = min(pktsize, rx_frag_size); 933 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
952 934
953 /* Copy the header portion into skb_data */ 935 /* Copy the header portion into skb_data */
954 hdr_len = min((u32)BE_HDR_LEN, curr_frag_len); 936 hdr_len = min(BE_HDR_LEN, curr_frag_len);
955 memcpy(skb->data, start, hdr_len); 937 memcpy(skb->data, start, hdr_len);
956 skb->len = curr_frag_len; 938 skb->len = curr_frag_len;
957 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */ 939 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
@@ -970,19 +952,17 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
970 } 952 }
971 page_info->page = NULL; 953 page_info->page = NULL;
972 954
973 if (pktsize <= rx_frag_size) { 955 if (rxcp->pkt_size <= rx_frag_size) {
974 BUG_ON(num_rcvd != 1); 956 BUG_ON(rxcp->num_rcvd != 1);
975 goto done; 957 return;
976 } 958 }
977 959
978 /* More frags present for this completion */ 960 /* More frags present for this completion */
979 size = pktsize; 961 index_inc(&rxcp->rxq_idx, rxq->len);
980 for (i = 1, j = 0; i < num_rcvd; i++) { 962 remaining = rxcp->pkt_size - curr_frag_len;
981 size -= curr_frag_len; 963 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
982 index_inc(&rxq_idx, rxq->len); 964 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
983 page_info = get_rx_page_info(adapter, rxo, rxq_idx); 965 curr_frag_len = min(remaining, rx_frag_size);
984
985 curr_frag_len = min(size, rx_frag_size);
986 966
987 /* Coalesce all frags from the same physical page in one slot */ 967 /* Coalesce all frags from the same physical page in one slot */
988 if (page_info->page_offset == 0) { 968 if (page_info->page_offset == 0) {
@@ -1001,25 +981,19 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1001 skb->len += curr_frag_len; 981 skb->len += curr_frag_len;
1002 skb->data_len += curr_frag_len; 982 skb->data_len += curr_frag_len;
1003 983
984 remaining -= curr_frag_len;
985 index_inc(&rxcp->rxq_idx, rxq->len);
1004 page_info->page = NULL; 986 page_info->page = NULL;
1005 } 987 }
1006 BUG_ON(j > MAX_SKB_FRAGS); 988 BUG_ON(j > MAX_SKB_FRAGS);
1007
1008done:
1009 be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
1010} 989}
1011 990
1012/* Process the RX completion indicated by rxcp when GRO is disabled */ 991/* Process the RX completion indicated by rxcp when GRO is disabled */
1013static void be_rx_compl_process(struct be_adapter *adapter, 992static void be_rx_compl_process(struct be_adapter *adapter,
1014 struct be_rx_obj *rxo, 993 struct be_rx_obj *rxo,
1015 struct be_eth_rx_compl *rxcp) 994 struct be_rx_compl_info *rxcp)
1016{ 995{
1017 struct sk_buff *skb; 996 struct sk_buff *skb;
1018 u32 vlanf, vid;
1019 u16 num_rcvd;
1020 u8 vtm;
1021
1022 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1023 997
1024 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN); 998 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
1025 if (unlikely(!skb)) { 999 if (unlikely(!skb)) {
@@ -1029,7 +1003,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
1029 return; 1003 return;
1030 } 1004 }
1031 1005
1032 skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd); 1006 skb_fill_rx_data(adapter, rxo, skb, rxcp);
1033 1007
1034 if (likely(adapter->rx_csum && csum_passed(rxcp))) 1008 if (likely(adapter->rx_csum && csum_passed(rxcp)))
1035 skb->ip_summed = CHECKSUM_UNNECESSARY; 1009 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1039,23 +1013,12 @@ static void be_rx_compl_process(struct be_adapter *adapter,
1039 skb->truesize = skb->len + sizeof(struct sk_buff); 1013 skb->truesize = skb->len + sizeof(struct sk_buff);
1040 skb->protocol = eth_type_trans(skb, adapter->netdev); 1014 skb->protocol = eth_type_trans(skb, adapter->netdev);
1041 1015
1042 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); 1016 if (unlikely(rxcp->vlanf)) {
1043 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1044
1045 /* vlanf could be wrongly set in some cards.
1046 * ignore if vtm is not set */
1047 if ((adapter->function_mode & 0x400) && !vtm)
1048 vlanf = 0;
1049
1050 if (unlikely(vlanf)) {
1051 if (!adapter->vlan_grp || adapter->vlans_added == 0) { 1017 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1052 kfree_skb(skb); 1018 kfree_skb(skb);
1053 return; 1019 return;
1054 } 1020 }
1055 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); 1021 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, rxcp->vid);
1056 if (!lancer_chip(adapter))
1057 vid = swab16(vid);
1058 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1059 } else { 1022 } else {
1060 netif_receive_skb(skb); 1023 netif_receive_skb(skb);
1061 } 1024 }
@@ -1064,28 +1027,14 @@ static void be_rx_compl_process(struct be_adapter *adapter,
1064/* Process the RX completion indicated by rxcp when GRO is enabled */ 1027/* Process the RX completion indicated by rxcp when GRO is enabled */
1065static void be_rx_compl_process_gro(struct be_adapter *adapter, 1028static void be_rx_compl_process_gro(struct be_adapter *adapter,
1066 struct be_rx_obj *rxo, 1029 struct be_rx_obj *rxo,
1067 struct be_eth_rx_compl *rxcp) 1030 struct be_rx_compl_info *rxcp)
1068{ 1031{
1069 struct be_rx_page_info *page_info; 1032 struct be_rx_page_info *page_info;
1070 struct sk_buff *skb = NULL; 1033 struct sk_buff *skb = NULL;
1071 struct be_queue_info *rxq = &rxo->q; 1034 struct be_queue_info *rxq = &rxo->q;
1072 struct be_eq_obj *eq_obj = &rxo->rx_eq; 1035 struct be_eq_obj *eq_obj = &rxo->rx_eq;
1073 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; 1036 u16 remaining, curr_frag_len;
1074 u16 i, rxq_idx = 0, vid, j; 1037 u16 i, j;
1075 u8 vtm;
1076 u8 pkt_type;
1077
1078 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1079 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1080 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1081 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
1082 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1083 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
1084
1085 /* vlanf could be wrongly set in some cards.
1086 * ignore if vtm is not set */
1087 if ((adapter->function_mode & 0x400) && !vtm)
1088 vlanf = 0;
1089 1038
1090 skb = napi_get_frags(&eq_obj->napi); 1039 skb = napi_get_frags(&eq_obj->napi);
1091 if (!skb) { 1040 if (!skb) {
@@ -1093,9 +1042,9 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1093 return; 1042 return;
1094 } 1043 }
1095 1044
1096 remaining = pkt_size; 1045 remaining = rxcp->pkt_size;
1097 for (i = 0, j = -1; i < num_rcvd; i++) { 1046 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1098 page_info = get_rx_page_info(adapter, rxo, rxq_idx); 1047 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1099 1048
1100 curr_frag_len = min(remaining, rx_frag_size); 1049 curr_frag_len = min(remaining, rx_frag_size);
1101 1050
@@ -1113,70 +1062,125 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1113 skb_shinfo(skb)->frags[j].size += curr_frag_len; 1062 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1114 1063
1115 remaining -= curr_frag_len; 1064 remaining -= curr_frag_len;
1116 index_inc(&rxq_idx, rxq->len); 1065 index_inc(&rxcp->rxq_idx, rxq->len);
1117 memset(page_info, 0, sizeof(*page_info)); 1066 memset(page_info, 0, sizeof(*page_info));
1118 } 1067 }
1119 BUG_ON(j > MAX_SKB_FRAGS); 1068 BUG_ON(j > MAX_SKB_FRAGS);
1120 1069
1121 skb_shinfo(skb)->nr_frags = j + 1; 1070 skb_shinfo(skb)->nr_frags = j + 1;
1122 skb->len = pkt_size; 1071 skb->len = rxcp->pkt_size;
1123 skb->data_len = pkt_size; 1072 skb->data_len = rxcp->pkt_size;
1124 skb->truesize += pkt_size; 1073 skb->truesize += rxcp->pkt_size;
1125 skb->ip_summed = CHECKSUM_UNNECESSARY; 1074 skb->ip_summed = CHECKSUM_UNNECESSARY;
1126 1075
1127 if (likely(!vlanf)) { 1076 if (likely(!rxcp->vlanf))
1128 napi_gro_frags(&eq_obj->napi); 1077 napi_gro_frags(&eq_obj->napi);
1129 } else { 1078 else
1130 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); 1079 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, rxcp->vid);
1131 if (!lancer_chip(adapter)) 1080}
1132 vid = swab16(vid); 1081
1082static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1083 struct be_eth_rx_compl *compl,
1084 struct be_rx_compl_info *rxcp)
1085{
1086 rxcp->pkt_size =
1087 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1088 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1089 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1090 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1091 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1092 rxcp->ip_csum =
1093 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1094 rxcp->l4_csum =
1095 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1096 rxcp->ipv6 =
1097 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1098 rxcp->rxq_idx =
1099 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1100 rxcp->num_rcvd =
1101 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1102 rxcp->pkt_type =
1103 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1104 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, compl);
1105 rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, compl);
1106}
1107
1108static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1109 struct be_eth_rx_compl *compl,
1110 struct be_rx_compl_info *rxcp)
1111{
1112 rxcp->pkt_size =
1113 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1114 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1115 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1116 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1117 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1118 rxcp->ip_csum =
1119 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1120 rxcp->l4_csum =
1121 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1122 rxcp->ipv6 =
1123 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1124 rxcp->rxq_idx =
1125 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1126 rxcp->num_rcvd =
1127 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1128 rxcp->pkt_type =
1129 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1130 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, compl);
1131 rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, compl);
1132}
1133
1134static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1135{
1136 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1137 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1138 struct be_adapter *adapter = rxo->adapter;
1133 1139
1134 if (!adapter->vlan_grp || adapter->vlans_added == 0) 1140 /* For checking the valid bit it is Ok to use either definition as the
1135 return; 1141 * valid bit is at the same position in both v0 and v1 Rx compl */
1142 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1143 return NULL;
1136 1144
1137 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid); 1145 rmb();
1138 } 1146 be_dws_le_to_cpu(compl, sizeof(*compl));
1139 1147
1140 be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type); 1148 if (adapter->be3_native)
1141} 1149 be_parse_rx_compl_v1(adapter, compl, rxcp);
1150 else
1151 be_parse_rx_compl_v0(adapter, compl, rxcp);
1142 1152
1143static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo) 1153 /* vlanf could be wrongly set in some cards. ignore if vtm is not set */
1144{ 1154 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1145 struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq); 1155 rxcp->vlanf = 0;
1146 1156
1147 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0) 1157 if (!lancer_chip(adapter))
1148 return NULL; 1158 rxcp->vid = swab16(rxcp->vid);
1149 1159
1150 rmb(); 1160 if ((adapter->pvid == rxcp->vid) && !adapter->vlan_tag[rxcp->vid])
1151 be_dws_le_to_cpu(rxcp, sizeof(*rxcp)); 1161 rxcp->vlanf = 0;
1162
1163 /* As the compl has been parsed, reset it; we wont touch it again */
1164 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1152 1165
1153 queue_tail_inc(&rxo->cq); 1166 queue_tail_inc(&rxo->cq);
1154 return rxcp; 1167 return rxcp;
1155} 1168}
1156 1169
1157/* To reset the valid bit, we need to reset the whole word as 1170static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1158 * when walking the queue the valid entries are little-endian
1159 * and invalid entries are host endian
1160 */
1161static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1162{ 1171{
1163 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1164}
1165
1166static inline struct page *be_alloc_pages(u32 size)
1167{
1168 gfp_t alloc_flags = GFP_ATOMIC;
1169 u32 order = get_order(size); 1172 u32 order = get_order(size);
1173
1170 if (order > 0) 1174 if (order > 0)
1171 alloc_flags |= __GFP_COMP; 1175 gfp |= __GFP_COMP;
1172 return alloc_pages(alloc_flags, order); 1176 return alloc_pages(gfp, order);
1173} 1177}
1174 1178
1175/* 1179/*
1176 * Allocate a page, split it to fragments of size rx_frag_size and post as 1180 * Allocate a page, split it to fragments of size rx_frag_size and post as
1177 * receive buffers to BE 1181 * receive buffers to BE
1178 */ 1182 */
1179static void be_post_rx_frags(struct be_rx_obj *rxo) 1183static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1180{ 1184{
1181 struct be_adapter *adapter = rxo->adapter; 1185 struct be_adapter *adapter = rxo->adapter;
1182 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl; 1186 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
@@ -1190,14 +1194,14 @@ static void be_post_rx_frags(struct be_rx_obj *rxo)
1190 page_info = &rxo->page_info_tbl[rxq->head]; 1194 page_info = &rxo->page_info_tbl[rxq->head];
1191 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) { 1195 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1192 if (!pagep) { 1196 if (!pagep) {
1193 pagep = be_alloc_pages(adapter->big_page_size); 1197 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1194 if (unlikely(!pagep)) { 1198 if (unlikely(!pagep)) {
1195 rxo->stats.rx_post_fail++; 1199 rxo->stats.rx_post_fail++;
1196 break; 1200 break;
1197 } 1201 }
1198 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0, 1202 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1199 adapter->big_page_size, 1203 0, adapter->big_page_size,
1200 PCI_DMA_FROMDEVICE); 1204 DMA_FROM_DEVICE);
1201 page_info->page_offset = 0; 1205 page_info->page_offset = 0;
1202 } else { 1206 } else {
1203 get_page(pagep); 1207 get_page(pagep);
@@ -1270,8 +1274,8 @@ static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1270 do { 1274 do {
1271 cur_index = txq->tail; 1275 cur_index = txq->tail;
1272 wrb = queue_tail_node(txq); 1276 wrb = queue_tail_node(txq);
1273 unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr && 1277 unmap_tx_frag(&adapter->pdev->dev, wrb,
1274 skb_headlen(sent_skb))); 1278 (unmap_skb_hdr && skb_headlen(sent_skb)));
1275 unmap_skb_hdr = false; 1279 unmap_skb_hdr = false;
1276 1280
1277 num_wrbs++; 1281 num_wrbs++;
@@ -1339,13 +1343,12 @@ static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1339 struct be_rx_page_info *page_info; 1343 struct be_rx_page_info *page_info;
1340 struct be_queue_info *rxq = &rxo->q; 1344 struct be_queue_info *rxq = &rxo->q;
1341 struct be_queue_info *rx_cq = &rxo->cq; 1345 struct be_queue_info *rx_cq = &rxo->cq;
1342 struct be_eth_rx_compl *rxcp; 1346 struct be_rx_compl_info *rxcp;
1343 u16 tail; 1347 u16 tail;
1344 1348
1345 /* First cleanup pending rx completions */ 1349 /* First cleanup pending rx completions */
1346 while ((rxcp = be_rx_compl_get(rxo)) != NULL) { 1350 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1347 be_rx_compl_discard(adapter, rxo, rxcp); 1351 be_rx_compl_discard(adapter, rxo, rxcp);
1348 be_rx_compl_reset(rxcp);
1349 be_cq_notify(adapter, rx_cq->id, false, 1); 1352 be_cq_notify(adapter, rx_cq->id, false, 1);
1350 } 1353 }
1351 1354
@@ -1573,9 +1576,6 @@ static int be_rx_queues_create(struct be_adapter *adapter)
1573 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; 1576 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1574 for_all_rx_queues(adapter, rxo, i) { 1577 for_all_rx_queues(adapter, rxo, i) {
1575 rxo->adapter = adapter; 1578 rxo->adapter = adapter;
1576 /* Init last_frag_index so that the frag index in the first
1577 * completion will never match */
1578 rxo->last_frag_index = 0xffff;
1579 rxo->rx_eq.max_eqd = BE_MAX_EQD; 1579 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1580 rxo->rx_eq.enable_aic = true; 1580 rxo->rx_eq.enable_aic = true;
1581 1581
@@ -1697,15 +1697,9 @@ static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1697 return IRQ_HANDLED; 1697 return IRQ_HANDLED;
1698} 1698}
1699 1699
1700static inline bool do_gro(struct be_rx_obj *rxo, 1700static inline bool do_gro(struct be_rx_compl_info *rxcp)
1701 struct be_eth_rx_compl *rxcp, u8 err)
1702{ 1701{
1703 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp); 1702 return (rxcp->tcpf && !rxcp->err) ? true : false;
1704
1705 if (err)
1706 rxo->stats.rxcp_err++;
1707
1708 return (tcp_frame && !err) ? true : false;
1709} 1703}
1710 1704
1711static int be_poll_rx(struct napi_struct *napi, int budget) 1705static int be_poll_rx(struct napi_struct *napi, int budget)
@@ -1714,10 +1708,8 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
1714 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq); 1708 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1715 struct be_adapter *adapter = rxo->adapter; 1709 struct be_adapter *adapter = rxo->adapter;
1716 struct be_queue_info *rx_cq = &rxo->cq; 1710 struct be_queue_info *rx_cq = &rxo->cq;
1717 struct be_eth_rx_compl *rxcp; 1711 struct be_rx_compl_info *rxcp;
1718 u32 work_done; 1712 u32 work_done;
1719 u16 frag_index, num_rcvd;
1720 u8 err;
1721 1713
1722 rxo->stats.rx_polls++; 1714 rxo->stats.rx_polls++;
1723 for (work_done = 0; work_done < budget; work_done++) { 1715 for (work_done = 0; work_done < budget; work_done++) {
@@ -1725,29 +1717,19 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
1725 if (!rxcp) 1717 if (!rxcp)
1726 break; 1718 break;
1727 1719
1728 err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp); 1720 /* Ignore flush completions */
1729 frag_index = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, 1721 if (rxcp->num_rcvd) {
1730 rxcp); 1722 if (do_gro(rxcp))
1731 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags,
1732 rxcp);
1733
1734 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
1735 if (likely(frag_index != rxo->last_frag_index &&
1736 num_rcvd != 0)) {
1737 rxo->last_frag_index = frag_index;
1738
1739 if (do_gro(rxo, rxcp, err))
1740 be_rx_compl_process_gro(adapter, rxo, rxcp); 1723 be_rx_compl_process_gro(adapter, rxo, rxcp);
1741 else 1724 else
1742 be_rx_compl_process(adapter, rxo, rxcp); 1725 be_rx_compl_process(adapter, rxo, rxcp);
1743 } 1726 }
1744 1727 be_rx_stats_update(rxo, rxcp);
1745 be_rx_compl_reset(rxcp);
1746 } 1728 }
1747 1729
1748 /* Refill the queue */ 1730 /* Refill the queue */
1749 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM) 1731 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1750 be_post_rx_frags(rxo); 1732 be_post_rx_frags(rxo, GFP_ATOMIC);
1751 1733
1752 /* All consumed */ 1734 /* All consumed */
1753 if (work_done < budget) { 1735 if (work_done < budget) {
@@ -1827,6 +1809,7 @@ void be_detect_dump_ue(struct be_adapter *adapter)
1827 1809
1828 if (ue_status_lo || ue_status_hi) { 1810 if (ue_status_lo || ue_status_hi) {
1829 adapter->ue_detected = true; 1811 adapter->ue_detected = true;
1812 adapter->eeh_err = true;
1830 dev_err(&adapter->pdev->dev, "UE Detected!!\n"); 1813 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1831 } 1814 }
1832 1815
@@ -1865,10 +1848,14 @@ static void be_worker(struct work_struct *work)
1865 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 1848 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1866 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl); 1849 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1867 } 1850 }
1851
1852 if (!adapter->ue_detected && !lancer_chip(adapter))
1853 be_detect_dump_ue(adapter);
1854
1868 goto reschedule; 1855 goto reschedule;
1869 } 1856 }
1870 1857
1871 if (!adapter->stats_ioctl_sent) 1858 if (!adapter->stats_cmd_sent)
1872 be_cmd_get_stats(adapter, &adapter->stats_cmd); 1859 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1873 1860
1874 be_tx_rate_update(adapter); 1861 be_tx_rate_update(adapter);
@@ -1879,7 +1866,7 @@ static void be_worker(struct work_struct *work)
1879 1866
1880 if (rxo->rx_post_starved) { 1867 if (rxo->rx_post_starved) {
1881 rxo->rx_post_starved = false; 1868 rxo->rx_post_starved = false;
1882 be_post_rx_frags(rxo); 1869 be_post_rx_frags(rxo, GFP_KERNEL);
1883 } 1870 }
1884 } 1871 }
1885 if (!adapter->ue_detected && !lancer_chip(adapter)) 1872 if (!adapter->ue_detected && !lancer_chip(adapter))
@@ -2083,13 +2070,24 @@ static int be_close(struct net_device *netdev)
2083 2070
2084 be_async_mcc_disable(adapter); 2071 be_async_mcc_disable(adapter);
2085 2072
2086 netif_stop_queue(netdev);
2087 netif_carrier_off(netdev); 2073 netif_carrier_off(netdev);
2088 adapter->link_up = false; 2074 adapter->link_up = false;
2089 2075
2090 if (!lancer_chip(adapter)) 2076 if (!lancer_chip(adapter))
2091 be_intr_set(adapter, false); 2077 be_intr_set(adapter, false);
2092 2078
2079 for_all_rx_queues(adapter, rxo, i)
2080 napi_disable(&rxo->rx_eq.napi);
2081
2082 napi_disable(&tx_eq->napi);
2083
2084 if (lancer_chip(adapter)) {
2085 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2086 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2087 for_all_rx_queues(adapter, rxo, i)
2088 be_cq_notify(adapter, rxo->cq.id, false, 0);
2089 }
2090
2093 if (adapter->msix_enabled) { 2091 if (adapter->msix_enabled) {
2094 vec = be_msix_vec_get(adapter, tx_eq); 2092 vec = be_msix_vec_get(adapter, tx_eq);
2095 synchronize_irq(vec); 2093 synchronize_irq(vec);
@@ -2103,11 +2101,6 @@ static int be_close(struct net_device *netdev)
2103 } 2101 }
2104 be_irq_unregister(adapter); 2102 be_irq_unregister(adapter);
2105 2103
2106 for_all_rx_queues(adapter, rxo, i)
2107 napi_disable(&rxo->rx_eq.napi);
2108
2109 napi_disable(&tx_eq->napi);
2110
2111 /* Wait for all pending tx completions to arrive so that 2104 /* Wait for all pending tx completions to arrive so that
2112 * all tx skbs are freed. 2105 * all tx skbs are freed.
2113 */ 2106 */
@@ -2127,7 +2120,7 @@ static int be_open(struct net_device *netdev)
2127 u16 link_speed; 2120 u16 link_speed;
2128 2121
2129 for_all_rx_queues(adapter, rxo, i) { 2122 for_all_rx_queues(adapter, rxo, i) {
2130 be_post_rx_frags(rxo); 2123 be_post_rx_frags(rxo, GFP_KERNEL);
2131 napi_enable(&rxo->rx_eq.napi); 2124 napi_enable(&rxo->rx_eq.napi);
2132 } 2125 }
2133 napi_enable(&tx_eq->napi); 2126 napi_enable(&tx_eq->napi);
@@ -2179,7 +2172,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
2179 memset(mac, 0, ETH_ALEN); 2172 memset(mac, 0, ETH_ALEN);
2180 2173
2181 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); 2174 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2182 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 2175 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2176 GFP_KERNEL);
2183 if (cmd.va == NULL) 2177 if (cmd.va == NULL)
2184 return -1; 2178 return -1;
2185 memset(cmd.va, 0, cmd.size); 2179 memset(cmd.va, 0, cmd.size);
@@ -2190,8 +2184,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
2190 if (status) { 2184 if (status) {
2191 dev_err(&adapter->pdev->dev, 2185 dev_err(&adapter->pdev->dev,
2192 "Could not enable Wake-on-lan\n"); 2186 "Could not enable Wake-on-lan\n");
2193 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, 2187 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2194 cmd.dma); 2188 cmd.dma);
2195 return status; 2189 return status;
2196 } 2190 }
2197 status = be_cmd_enable_magic_wol(adapter, 2191 status = be_cmd_enable_magic_wol(adapter,
@@ -2204,7 +2198,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
2204 pci_enable_wake(adapter->pdev, PCI_D3cold, 0); 2198 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2205 } 2199 }
2206 2200
2207 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 2201 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2208 return status; 2202 return status;
2209} 2203}
2210 2204
@@ -2225,7 +2219,8 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2225 for (vf = 0; vf < num_vfs; vf++) { 2219 for (vf = 0; vf < num_vfs; vf++) {
2226 status = be_cmd_pmac_add(adapter, mac, 2220 status = be_cmd_pmac_add(adapter, mac,
2227 adapter->vf_cfg[vf].vf_if_handle, 2221 adapter->vf_cfg[vf].vf_if_handle,
2228 &adapter->vf_cfg[vf].vf_pmac_id); 2222 &adapter->vf_cfg[vf].vf_pmac_id,
2223 vf + 1);
2229 if (status) 2224 if (status)
2230 dev_err(&adapter->pdev->dev, 2225 dev_err(&adapter->pdev->dev,
2231 "Mac address add failed for VF %d\n", vf); 2226 "Mac address add failed for VF %d\n", vf);
@@ -2245,7 +2240,7 @@ static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2245 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID) 2240 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2246 be_cmd_pmac_del(adapter, 2241 be_cmd_pmac_del(adapter,
2247 adapter->vf_cfg[vf].vf_if_handle, 2242 adapter->vf_cfg[vf].vf_if_handle,
2248 adapter->vf_cfg[vf].vf_pmac_id); 2243 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2249 } 2244 }
2250} 2245}
2251 2246
@@ -2256,7 +2251,9 @@ static int be_setup(struct be_adapter *adapter)
2256 int status; 2251 int status;
2257 u8 mac[ETH_ALEN]; 2252 u8 mac[ETH_ALEN];
2258 2253
2259 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST; 2254 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2255 BE_IF_FLAGS_BROADCAST |
2256 BE_IF_FLAGS_MULTICAST;
2260 2257
2261 if (be_physfn(adapter)) { 2258 if (be_physfn(adapter)) {
2262 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS | 2259 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
@@ -2277,22 +2274,26 @@ static int be_setup(struct be_adapter *adapter)
2277 goto do_none; 2274 goto do_none;
2278 2275
2279 if (be_physfn(adapter)) { 2276 if (be_physfn(adapter)) {
2280 while (vf < num_vfs) { 2277 if (adapter->sriov_enabled) {
2281 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED 2278 while (vf < num_vfs) {
2282 | BE_IF_FLAGS_BROADCAST; 2279 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2283 status = be_cmd_if_create(adapter, cap_flags, en_flags, 2280 BE_IF_FLAGS_BROADCAST;
2284 mac, true, 2281 status = be_cmd_if_create(adapter, cap_flags,
2282 en_flags, mac, true,
2285 &adapter->vf_cfg[vf].vf_if_handle, 2283 &adapter->vf_cfg[vf].vf_if_handle,
2286 NULL, vf+1); 2284 NULL, vf+1);
2287 if (status) { 2285 if (status) {
2288 dev_err(&adapter->pdev->dev, 2286 dev_err(&adapter->pdev->dev,
2289 "Interface Create failed for VF %d\n", vf); 2287 "Interface Create failed for VF %d\n",
2290 goto if_destroy; 2288 vf);
2289 goto if_destroy;
2290 }
2291 adapter->vf_cfg[vf].vf_pmac_id =
2292 BE_INVALID_PMAC_ID;
2293 vf++;
2291 } 2294 }
2292 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2293 vf++;
2294 } 2295 }
2295 } else if (!be_physfn(adapter)) { 2296 } else {
2296 status = be_cmd_mac_addr_query(adapter, mac, 2297 status = be_cmd_mac_addr_query(adapter, mac,
2297 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle); 2298 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2298 if (!status) { 2299 if (!status) {
@@ -2313,44 +2314,46 @@ static int be_setup(struct be_adapter *adapter)
2313 if (status != 0) 2314 if (status != 0)
2314 goto rx_qs_destroy; 2315 goto rx_qs_destroy;
2315 2316
2316 if (be_physfn(adapter)) {
2317 status = be_vf_eth_addr_config(adapter);
2318 if (status)
2319 goto mcc_q_destroy;
2320 }
2321
2322 adapter->link_speed = -1; 2317 adapter->link_speed = -1;
2323 2318
2324 return 0; 2319 return 0;
2325 2320
2326mcc_q_destroy:
2327 if (be_physfn(adapter))
2328 be_vf_eth_addr_rem(adapter);
2329 be_mcc_queues_destroy(adapter); 2321 be_mcc_queues_destroy(adapter);
2330rx_qs_destroy: 2322rx_qs_destroy:
2331 be_rx_queues_destroy(adapter); 2323 be_rx_queues_destroy(adapter);
2332tx_qs_destroy: 2324tx_qs_destroy:
2333 be_tx_queues_destroy(adapter); 2325 be_tx_queues_destroy(adapter);
2334if_destroy: 2326if_destroy:
2335 for (vf = 0; vf < num_vfs; vf++) 2327 if (be_physfn(adapter) && adapter->sriov_enabled)
2336 if (adapter->vf_cfg[vf].vf_if_handle) 2328 for (vf = 0; vf < num_vfs; vf++)
2337 be_cmd_if_destroy(adapter, 2329 if (adapter->vf_cfg[vf].vf_if_handle)
2338 adapter->vf_cfg[vf].vf_if_handle); 2330 be_cmd_if_destroy(adapter,
2339 be_cmd_if_destroy(adapter, adapter->if_handle); 2331 adapter->vf_cfg[vf].vf_if_handle,
2332 vf + 1);
2333 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2340do_none: 2334do_none:
2341 return status; 2335 return status;
2342} 2336}
2343 2337
2344static int be_clear(struct be_adapter *adapter) 2338static int be_clear(struct be_adapter *adapter)
2345{ 2339{
2346 if (be_physfn(adapter)) 2340 int vf;
2341
2342 if (be_physfn(adapter) && adapter->sriov_enabled)
2347 be_vf_eth_addr_rem(adapter); 2343 be_vf_eth_addr_rem(adapter);
2348 2344
2349 be_mcc_queues_destroy(adapter); 2345 be_mcc_queues_destroy(adapter);
2350 be_rx_queues_destroy(adapter); 2346 be_rx_queues_destroy(adapter);
2351 be_tx_queues_destroy(adapter); 2347 be_tx_queues_destroy(adapter);
2352 2348
2353 be_cmd_if_destroy(adapter, adapter->if_handle); 2349 if (be_physfn(adapter) && adapter->sriov_enabled)
2350 for (vf = 0; vf < num_vfs; vf++)
2351 if (adapter->vf_cfg[vf].vf_if_handle)
2352 be_cmd_if_destroy(adapter,
2353 adapter->vf_cfg[vf].vf_if_handle,
2354 vf + 1);
2355
2356 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2354 2357
2355 /* tell fw we're done with firing cmds */ 2358 /* tell fw we're done with firing cmds */
2356 be_cmd_fw_clean(adapter); 2359 be_cmd_fw_clean(adapter);
@@ -2453,8 +2456,8 @@ static int be_flash_data(struct be_adapter *adapter,
2453 continue; 2456 continue;
2454 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) && 2457 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2455 (!be_flash_redboot(adapter, fw->data, 2458 (!be_flash_redboot(adapter, fw->data,
2456 pflashcomp[i].offset, pflashcomp[i].size, 2459 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2457 filehdr_size))) 2460 (num_of_images * sizeof(struct image_hdr)))))
2458 continue; 2461 continue;
2459 p = fw->data; 2462 p = fw->data;
2460 p += filehdr_size + pflashcomp[i].offset 2463 p += filehdr_size + pflashcomp[i].offset
@@ -2528,8 +2531,8 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
2528 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file); 2531 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2529 2532
2530 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024; 2533 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2531 flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size, 2534 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2532 &flash_cmd.dma); 2535 &flash_cmd.dma, GFP_KERNEL);
2533 if (!flash_cmd.va) { 2536 if (!flash_cmd.va) {
2534 status = -ENOMEM; 2537 status = -ENOMEM;
2535 dev_err(&adapter->pdev->dev, 2538 dev_err(&adapter->pdev->dev,
@@ -2558,8 +2561,8 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
2558 status = -1; 2561 status = -1;
2559 } 2562 }
2560 2563
2561 pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va, 2564 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2562 flash_cmd.dma); 2565 flash_cmd.dma);
2563 if (status) { 2566 if (status) {
2564 dev_err(&adapter->pdev->dev, "Firmware load error\n"); 2567 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2565 goto fw_exit; 2568 goto fw_exit;
@@ -2700,13 +2703,13 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
2700 be_unmap_pci_bars(adapter); 2703 be_unmap_pci_bars(adapter);
2701 2704
2702 if (mem->va) 2705 if (mem->va)
2703 pci_free_consistent(adapter->pdev, mem->size, 2706 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2704 mem->va, mem->dma); 2707 mem->dma);
2705 2708
2706 mem = &adapter->mc_cmd_mem; 2709 mem = &adapter->mc_cmd_mem;
2707 if (mem->va) 2710 if (mem->va)
2708 pci_free_consistent(adapter->pdev, mem->size, 2711 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2709 mem->va, mem->dma); 2712 mem->dma);
2710} 2713}
2711 2714
2712static int be_ctrl_init(struct be_adapter *adapter) 2715static int be_ctrl_init(struct be_adapter *adapter)
@@ -2721,8 +2724,10 @@ static int be_ctrl_init(struct be_adapter *adapter)
2721 goto done; 2724 goto done;
2722 2725
2723 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; 2726 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2724 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev, 2727 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2725 mbox_mem_alloc->size, &mbox_mem_alloc->dma); 2728 mbox_mem_alloc->size,
2729 &mbox_mem_alloc->dma,
2730 GFP_KERNEL);
2726 if (!mbox_mem_alloc->va) { 2731 if (!mbox_mem_alloc->va) {
2727 status = -ENOMEM; 2732 status = -ENOMEM;
2728 goto unmap_pci_bars; 2733 goto unmap_pci_bars;
@@ -2734,8 +2739,9 @@ static int be_ctrl_init(struct be_adapter *adapter)
2734 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); 2739 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2735 2740
2736 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config); 2741 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2737 mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size, 2742 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2738 &mc_cmd_mem->dma); 2743 mc_cmd_mem->size, &mc_cmd_mem->dma,
2744 GFP_KERNEL);
2739 if (mc_cmd_mem->va == NULL) { 2745 if (mc_cmd_mem->va == NULL) {
2740 status = -ENOMEM; 2746 status = -ENOMEM;
2741 goto free_mbox; 2747 goto free_mbox;
@@ -2751,8 +2757,8 @@ static int be_ctrl_init(struct be_adapter *adapter)
2751 return 0; 2757 return 0;
2752 2758
2753free_mbox: 2759free_mbox:
2754 pci_free_consistent(adapter->pdev, mbox_mem_alloc->size, 2760 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2755 mbox_mem_alloc->va, mbox_mem_alloc->dma); 2761 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2756 2762
2757unmap_pci_bars: 2763unmap_pci_bars:
2758 be_unmap_pci_bars(adapter); 2764 be_unmap_pci_bars(adapter);
@@ -2766,8 +2772,8 @@ static void be_stats_cleanup(struct be_adapter *adapter)
2766 struct be_dma_mem *cmd = &adapter->stats_cmd; 2772 struct be_dma_mem *cmd = &adapter->stats_cmd;
2767 2773
2768 if (cmd->va) 2774 if (cmd->va)
2769 pci_free_consistent(adapter->pdev, cmd->size, 2775 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2770 cmd->va, cmd->dma); 2776 cmd->va, cmd->dma);
2771} 2777}
2772 2778
2773static int be_stats_init(struct be_adapter *adapter) 2779static int be_stats_init(struct be_adapter *adapter)
@@ -2775,7 +2781,8 @@ static int be_stats_init(struct be_adapter *adapter)
2775 struct be_dma_mem *cmd = &adapter->stats_cmd; 2781 struct be_dma_mem *cmd = &adapter->stats_cmd;
2776 2782
2777 cmd->size = sizeof(struct be_cmd_req_get_stats); 2783 cmd->size = sizeof(struct be_cmd_req_get_stats);
2778 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma); 2784 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2785 GFP_KERNEL);
2779 if (cmd->va == NULL) 2786 if (cmd->va == NULL)
2780 return -1; 2787 return -1;
2781 memset(cmd->va, 0, cmd->size); 2788 memset(cmd->va, 0, cmd->size);
@@ -2845,6 +2852,11 @@ static int be_get_config(struct be_adapter *adapter)
2845 else 2852 else
2846 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED; 2853 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2847 2854
2855 status = be_cmd_get_cntl_attributes(adapter);
2856 if (status)
2857 return status;
2858
2859 be_cmd_check_native_mode(adapter);
2848 return 0; 2860 return 0;
2849} 2861}
2850 2862
@@ -2886,6 +2898,54 @@ static int be_dev_family_check(struct be_adapter *adapter)
2886 return 0; 2898 return 0;
2887} 2899}
2888 2900
2901static int lancer_wait_ready(struct be_adapter *adapter)
2902{
2903#define SLIPORT_READY_TIMEOUT 500
2904 u32 sliport_status;
2905 int status = 0, i;
2906
2907 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
2908 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2909 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
2910 break;
2911
2912 msleep(20);
2913 }
2914
2915 if (i == SLIPORT_READY_TIMEOUT)
2916 status = -1;
2917
2918 return status;
2919}
2920
2921static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
2922{
2923 int status;
2924 u32 sliport_status, err, reset_needed;
2925 status = lancer_wait_ready(adapter);
2926 if (!status) {
2927 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2928 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
2929 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
2930 if (err && reset_needed) {
2931 iowrite32(SLI_PORT_CONTROL_IP_MASK,
2932 adapter->db + SLIPORT_CONTROL_OFFSET);
2933
2934 /* check adapter has corrected the error */
2935 status = lancer_wait_ready(adapter);
2936 sliport_status = ioread32(adapter->db +
2937 SLIPORT_STATUS_OFFSET);
2938 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
2939 SLIPORT_STATUS_RN_MASK);
2940 if (status || sliport_status)
2941 status = -1;
2942 } else if (err || reset_needed) {
2943 status = -1;
2944 }
2945 }
2946 return status;
2947}
2948
2889static int __devinit be_probe(struct pci_dev *pdev, 2949static int __devinit be_probe(struct pci_dev *pdev,
2890 const struct pci_device_id *pdev_id) 2950 const struct pci_device_id *pdev_id)
2891{ 2951{
@@ -2918,11 +2978,11 @@ static int __devinit be_probe(struct pci_dev *pdev,
2918 adapter->netdev = netdev; 2978 adapter->netdev = netdev;
2919 SET_NETDEV_DEV(netdev, &pdev->dev); 2979 SET_NETDEV_DEV(netdev, &pdev->dev);
2920 2980
2921 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 2981 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
2922 if (!status) { 2982 if (!status) {
2923 netdev->features |= NETIF_F_HIGHDMA; 2983 netdev->features |= NETIF_F_HIGHDMA;
2924 } else { 2984 } else {
2925 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2985 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2926 if (status) { 2986 if (status) {
2927 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n"); 2987 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2928 goto free_netdev; 2988 goto free_netdev;
@@ -2935,6 +2995,14 @@ static int __devinit be_probe(struct pci_dev *pdev,
2935 if (status) 2995 if (status)
2936 goto free_netdev; 2996 goto free_netdev;
2937 2997
2998 if (lancer_chip(adapter)) {
2999 status = lancer_test_and_set_rdy_state(adapter);
3000 if (status) {
3001 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3002 goto free_netdev;
3003 }
3004 }
3005
2938 /* sync up with fw's ready state */ 3006 /* sync up with fw's ready state */
2939 if (be_physfn(adapter)) { 3007 if (be_physfn(adapter)) {
2940 status = be_cmd_POST(adapter); 3008 status = be_cmd_POST(adapter);
@@ -2947,11 +3015,9 @@ static int __devinit be_probe(struct pci_dev *pdev,
2947 if (status) 3015 if (status)
2948 goto ctrl_clean; 3016 goto ctrl_clean;
2949 3017
2950 if (be_physfn(adapter)) { 3018 status = be_cmd_reset_function(adapter);
2951 status = be_cmd_reset_function(adapter); 3019 if (status)
2952 if (status) 3020 goto ctrl_clean;
2953 goto ctrl_clean;
2954 }
2955 3021
2956 status = be_stats_init(adapter); 3022 status = be_stats_init(adapter);
2957 if (status) 3023 if (status)
@@ -2975,10 +3041,18 @@ static int __devinit be_probe(struct pci_dev *pdev,
2975 goto unsetup; 3041 goto unsetup;
2976 netif_carrier_off(netdev); 3042 netif_carrier_off(netdev);
2977 3043
3044 if (be_physfn(adapter) && adapter->sriov_enabled) {
3045 status = be_vf_eth_addr_config(adapter);
3046 if (status)
3047 goto unreg_netdev;
3048 }
3049
2978 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num); 3050 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
2979 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); 3051 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
2980 return 0; 3052 return 0;
2981 3053
3054unreg_netdev:
3055 unregister_netdev(netdev);
2982unsetup: 3056unsetup:
2983 be_clear(adapter); 3057 be_clear(adapter);
2984msix_disable: 3058msix_disable:
@@ -3005,6 +3079,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3005 struct be_adapter *adapter = pci_get_drvdata(pdev); 3079 struct be_adapter *adapter = pci_get_drvdata(pdev);
3006 struct net_device *netdev = adapter->netdev; 3080 struct net_device *netdev = adapter->netdev;
3007 3081
3082 cancel_delayed_work_sync(&adapter->work);
3008 if (adapter->wol) 3083 if (adapter->wol)
3009 be_setup_wol(adapter, true); 3084 be_setup_wol(adapter, true);
3010 3085
@@ -3017,6 +3092,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3017 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc); 3092 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3018 be_clear(adapter); 3093 be_clear(adapter);
3019 3094
3095 be_msix_disable(adapter);
3020 pci_save_state(pdev); 3096 pci_save_state(pdev);
3021 pci_disable_device(pdev); 3097 pci_disable_device(pdev);
3022 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 3098 pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -3038,6 +3114,7 @@ static int be_resume(struct pci_dev *pdev)
3038 pci_set_power_state(pdev, 0); 3114 pci_set_power_state(pdev, 0);
3039 pci_restore_state(pdev); 3115 pci_restore_state(pdev);
3040 3116
3117 be_msix_enable(adapter);
3041 /* tell fw we're ready to fire cmds */ 3118 /* tell fw we're ready to fire cmds */
3042 status = be_cmd_fw_init(adapter); 3119 status = be_cmd_fw_init(adapter);
3043 if (status) 3120 if (status)
@@ -3053,6 +3130,8 @@ static int be_resume(struct pci_dev *pdev)
3053 3130
3054 if (adapter->wol) 3131 if (adapter->wol)
3055 be_setup_wol(adapter, false); 3132 be_setup_wol(adapter, false);
3133
3134 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3056 return 0; 3135 return 0;
3057} 3136}
3058 3137
@@ -3064,6 +3143,9 @@ static void be_shutdown(struct pci_dev *pdev)
3064 struct be_adapter *adapter = pci_get_drvdata(pdev); 3143 struct be_adapter *adapter = pci_get_drvdata(pdev);
3065 struct net_device *netdev = adapter->netdev; 3144 struct net_device *netdev = adapter->netdev;
3066 3145
3146 if (netif_running(netdev))
3147 cancel_delayed_work_sync(&adapter->work);
3148
3067 netif_device_detach(netdev); 3149 netif_device_detach(netdev);
3068 3150
3069 be_cmd_reset_function(adapter); 3151 be_cmd_reset_function(adapter);
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
index fad912656fe4..9f356d5d0f33 100644
--- a/drivers/net/bna/bnad.c
+++ b/drivers/net/bna/bnad.c
@@ -126,22 +126,22 @@ bnad_free_all_txbufs(struct bnad *bnad,
126 } 126 }
127 unmap_array[unmap_cons].skb = NULL; 127 unmap_array[unmap_cons].skb = NULL;
128 128
129 pci_unmap_single(bnad->pcidev, 129 dma_unmap_single(&bnad->pcidev->dev,
130 pci_unmap_addr(&unmap_array[unmap_cons], 130 dma_unmap_addr(&unmap_array[unmap_cons],
131 dma_addr), skb_headlen(skb), 131 dma_addr), skb_headlen(skb),
132 PCI_DMA_TODEVICE); 132 DMA_TO_DEVICE);
133 133
134 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); 134 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
135 if (++unmap_cons >= unmap_q->q_depth) 135 if (++unmap_cons >= unmap_q->q_depth)
136 break; 136 break;
137 137
138 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 138 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
139 pci_unmap_page(bnad->pcidev, 139 dma_unmap_page(&bnad->pcidev->dev,
140 pci_unmap_addr(&unmap_array[unmap_cons], 140 dma_unmap_addr(&unmap_array[unmap_cons],
141 dma_addr), 141 dma_addr),
142 skb_shinfo(skb)->frags[i].size, 142 skb_shinfo(skb)->frags[i].size,
143 PCI_DMA_TODEVICE); 143 DMA_TO_DEVICE);
144 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 144 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
145 0); 145 0);
146 if (++unmap_cons >= unmap_q->q_depth) 146 if (++unmap_cons >= unmap_q->q_depth)
147 break; 147 break;
@@ -199,23 +199,23 @@ bnad_free_txbufs(struct bnad *bnad,
199 sent_bytes += skb->len; 199 sent_bytes += skb->len;
200 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags); 200 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
201 201
202 pci_unmap_single(bnad->pcidev, 202 dma_unmap_single(&bnad->pcidev->dev,
203 pci_unmap_addr(&unmap_array[unmap_cons], 203 dma_unmap_addr(&unmap_array[unmap_cons],
204 dma_addr), skb_headlen(skb), 204 dma_addr), skb_headlen(skb),
205 PCI_DMA_TODEVICE); 205 DMA_TO_DEVICE);
206 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); 206 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
207 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth); 207 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
208 208
209 prefetch(&unmap_array[unmap_cons + 1]); 209 prefetch(&unmap_array[unmap_cons + 1]);
210 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 210 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
211 prefetch(&unmap_array[unmap_cons + 1]); 211 prefetch(&unmap_array[unmap_cons + 1]);
212 212
213 pci_unmap_page(bnad->pcidev, 213 dma_unmap_page(&bnad->pcidev->dev,
214 pci_unmap_addr(&unmap_array[unmap_cons], 214 dma_unmap_addr(&unmap_array[unmap_cons],
215 dma_addr), 215 dma_addr),
216 skb_shinfo(skb)->frags[i].size, 216 skb_shinfo(skb)->frags[i].size,
217 PCI_DMA_TODEVICE); 217 DMA_TO_DEVICE);
218 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 218 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
219 0); 219 0);
220 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth); 220 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
221 } 221 }
@@ -340,19 +340,22 @@ static void
340bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) 340bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
341{ 341{
342 struct bnad_unmap_q *unmap_q; 342 struct bnad_unmap_q *unmap_q;
343 struct bnad_skb_unmap *unmap_array;
343 struct sk_buff *skb; 344 struct sk_buff *skb;
344 int unmap_cons; 345 int unmap_cons;
345 346
346 unmap_q = rcb->unmap_q; 347 unmap_q = rcb->unmap_q;
348 unmap_array = unmap_q->unmap_array;
347 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) { 349 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
348 skb = unmap_q->unmap_array[unmap_cons].skb; 350 skb = unmap_array[unmap_cons].skb;
349 if (!skb) 351 if (!skb)
350 continue; 352 continue;
351 unmap_q->unmap_array[unmap_cons].skb = NULL; 353 unmap_array[unmap_cons].skb = NULL;
352 pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q-> 354 dma_unmap_single(&bnad->pcidev->dev,
353 unmap_array[unmap_cons], 355 dma_unmap_addr(&unmap_array[unmap_cons],
354 dma_addr), rcb->rxq->buffer_size, 356 dma_addr),
355 PCI_DMA_FROMDEVICE); 357 rcb->rxq->buffer_size,
358 DMA_FROM_DEVICE);
356 dev_kfree_skb(skb); 359 dev_kfree_skb(skb);
357 } 360 }
358 bnad_reset_rcb(bnad, rcb); 361 bnad_reset_rcb(bnad, rcb);
@@ -391,9 +394,10 @@ bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
391 skb->dev = bnad->netdev; 394 skb->dev = bnad->netdev;
392 skb_reserve(skb, NET_IP_ALIGN); 395 skb_reserve(skb, NET_IP_ALIGN);
393 unmap_array[unmap_prod].skb = skb; 396 unmap_array[unmap_prod].skb = skb;
394 dma_addr = pci_map_single(bnad->pcidev, skb->data, 397 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
395 rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE); 398 rcb->rxq->buffer_size,
396 pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr, 399 DMA_FROM_DEVICE);
400 dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
397 dma_addr); 401 dma_addr);
398 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); 402 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
399 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); 403 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
@@ -434,8 +438,9 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
434 struct bna_rcb *rcb = NULL; 438 struct bna_rcb *rcb = NULL;
435 unsigned int wi_range, packets = 0, wis = 0; 439 unsigned int wi_range, packets = 0, wis = 0;
436 struct bnad_unmap_q *unmap_q; 440 struct bnad_unmap_q *unmap_q;
441 struct bnad_skb_unmap *unmap_array;
437 struct sk_buff *skb; 442 struct sk_buff *skb;
438 u32 flags; 443 u32 flags, unmap_cons;
439 u32 qid0 = ccb->rcb[0]->rxq->rxq_id; 444 u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
440 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; 445 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
441 446
@@ -456,17 +461,17 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
456 rcb = ccb->rcb[1]; 461 rcb = ccb->rcb[1];
457 462
458 unmap_q = rcb->unmap_q; 463 unmap_q = rcb->unmap_q;
464 unmap_array = unmap_q->unmap_array;
465 unmap_cons = unmap_q->consumer_index;
459 466
460 skb = unmap_q->unmap_array[unmap_q->consumer_index].skb; 467 skb = unmap_array[unmap_cons].skb;
461 BUG_ON(!(skb)); 468 BUG_ON(!(skb));
462 unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL; 469 unmap_array[unmap_cons].skb = NULL;
463 pci_unmap_single(bnad->pcidev, 470 dma_unmap_single(&bnad->pcidev->dev,
464 pci_unmap_addr(&unmap_q-> 471 dma_unmap_addr(&unmap_array[unmap_cons],
465 unmap_array[unmap_q->
466 consumer_index],
467 dma_addr), 472 dma_addr),
468 rcb->rxq->buffer_size, 473 rcb->rxq->buffer_size,
469 PCI_DMA_FROMDEVICE); 474 DMA_FROM_DEVICE);
470 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth); 475 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
471 476
472 /* Should be more efficient ? Performance ? */ 477 /* Should be more efficient ? Performance ? */
@@ -1015,9 +1020,9 @@ bnad_mem_free(struct bnad *bnad,
1015 if (mem_info->mem_type == BNA_MEM_T_DMA) { 1020 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1016 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma), 1021 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1017 dma_pa); 1022 dma_pa);
1018 pci_free_consistent(bnad->pcidev, 1023 dma_free_coherent(&bnad->pcidev->dev,
1019 mem_info->mdl[i].len, 1024 mem_info->mdl[i].len,
1020 mem_info->mdl[i].kva, dma_pa); 1025 mem_info->mdl[i].kva, dma_pa);
1021 } else 1026 } else
1022 kfree(mem_info->mdl[i].kva); 1027 kfree(mem_info->mdl[i].kva);
1023 } 1028 }
@@ -1047,8 +1052,9 @@ bnad_mem_alloc(struct bnad *bnad,
1047 for (i = 0; i < mem_info->num; i++) { 1052 for (i = 0; i < mem_info->num; i++) {
1048 mem_info->mdl[i].len = mem_info->len; 1053 mem_info->mdl[i].len = mem_info->len;
1049 mem_info->mdl[i].kva = 1054 mem_info->mdl[i].kva =
1050 pci_alloc_consistent(bnad->pcidev, 1055 dma_alloc_coherent(&bnad->pcidev->dev,
1051 mem_info->len, &dma_pa); 1056 mem_info->len, &dma_pa,
1057 GFP_KERNEL);
1052 1058
1053 if (mem_info->mdl[i].kva == NULL) 1059 if (mem_info->mdl[i].kva == NULL)
1054 goto err_return; 1060 goto err_return;
@@ -2600,9 +2606,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2600 unmap_q->unmap_array[unmap_prod].skb = skb; 2606 unmap_q->unmap_array[unmap_prod].skb = skb;
2601 BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR)); 2607 BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
2602 txqent->vector[vect_id].length = htons(skb_headlen(skb)); 2608 txqent->vector[vect_id].length = htons(skb_headlen(skb));
2603 dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb), 2609 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2604 PCI_DMA_TODEVICE); 2610 skb_headlen(skb), DMA_TO_DEVICE);
2605 pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr, 2611 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2606 dma_addr); 2612 dma_addr);
2607 2613
2608 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); 2614 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
@@ -2630,11 +2636,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2630 2636
2631 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR)); 2637 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2632 txqent->vector[vect_id].length = htons(size); 2638 txqent->vector[vect_id].length = htons(size);
2633 dma_addr = 2639 dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page,
2634 pci_map_page(bnad->pcidev, frag->page, 2640 frag->page_offset, size, DMA_TO_DEVICE);
2635 frag->page_offset, size, 2641 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2636 PCI_DMA_TODEVICE);
2637 pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2638 dma_addr); 2642 dma_addr);
2639 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); 2643 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2640 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); 2644 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
@@ -3022,14 +3026,14 @@ bnad_pci_init(struct bnad *bnad,
3022 err = pci_request_regions(pdev, BNAD_NAME); 3026 err = pci_request_regions(pdev, BNAD_NAME);
3023 if (err) 3027 if (err)
3024 goto disable_device; 3028 goto disable_device;
3025 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && 3029 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3026 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 3030 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3027 *using_dac = 1; 3031 *using_dac = 1;
3028 } else { 3032 } else {
3029 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3033 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3030 if (err) { 3034 if (err) {
3031 err = pci_set_consistent_dma_mask(pdev, 3035 err = dma_set_coherent_mask(&pdev->dev,
3032 DMA_BIT_MASK(32)); 3036 DMA_BIT_MASK(32));
3033 if (err) 3037 if (err)
3034 goto release_regions; 3038 goto release_regions;
3035 } 3039 }
diff --git a/drivers/net/bna/bnad.h b/drivers/net/bna/bnad.h
index 8b1d51557def..a89117fa4970 100644
--- a/drivers/net/bna/bnad.h
+++ b/drivers/net/bna/bnad.h
@@ -181,7 +181,7 @@ struct bnad_rx_info {
181/* Unmap queues for Tx / Rx cleanup */ 181/* Unmap queues for Tx / Rx cleanup */
182struct bnad_skb_unmap { 182struct bnad_skb_unmap {
183 struct sk_buff *skb; 183 struct sk_buff *skb;
184 DECLARE_PCI_UNMAP_ADDR(dma_addr) 184 DEFINE_DMA_UNMAP_ADDR(dma_addr);
185}; 185};
186 186
187struct bnad_unmap_q { 187struct bnad_unmap_q {
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 0ba59d5aeb7f..d1865cc97313 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -1,6 +1,6 @@
1/* bnx2.c: Broadcom NX2 network driver. 1/* bnx2.c: Broadcom NX2 network driver.
2 * 2 *
3 * Copyright (c) 2004-2010 Broadcom Corporation 3 * Copyright (c) 2004-2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -56,11 +56,11 @@
56#include "bnx2_fw.h" 56#include "bnx2_fw.h"
57 57
58#define DRV_MODULE_NAME "bnx2" 58#define DRV_MODULE_NAME "bnx2"
59#define DRV_MODULE_VERSION "2.0.21" 59#define DRV_MODULE_VERSION "2.1.6"
60#define DRV_MODULE_RELDATE "Dec 23, 2010" 60#define DRV_MODULE_RELDATE "Mar 7, 2011"
61#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.1.fw" 61#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.1.fw"
62#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw" 62#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
63#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1.fw" 63#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1a.fw"
64#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw" 64#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
65#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw" 65#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
66 66
@@ -435,7 +435,8 @@ bnx2_cnic_stop(struct bnx2 *bp)
435 struct cnic_ctl_info info; 435 struct cnic_ctl_info info;
436 436
437 mutex_lock(&bp->cnic_lock); 437 mutex_lock(&bp->cnic_lock);
438 c_ops = bp->cnic_ops; 438 c_ops = rcu_dereference_protected(bp->cnic_ops,
439 lockdep_is_held(&bp->cnic_lock));
439 if (c_ops) { 440 if (c_ops) {
440 info.cmd = CNIC_CTL_STOP_CMD; 441 info.cmd = CNIC_CTL_STOP_CMD;
441 c_ops->cnic_ctl(bp->cnic_data, &info); 442 c_ops->cnic_ctl(bp->cnic_data, &info);
@@ -450,7 +451,8 @@ bnx2_cnic_start(struct bnx2 *bp)
450 struct cnic_ctl_info info; 451 struct cnic_ctl_info info;
451 452
452 mutex_lock(&bp->cnic_lock); 453 mutex_lock(&bp->cnic_lock);
453 c_ops = bp->cnic_ops; 454 c_ops = rcu_dereference_protected(bp->cnic_ops,
455 lockdep_is_held(&bp->cnic_lock));
454 if (c_ops) { 456 if (c_ops) {
455 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) { 457 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
456 struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; 458 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
@@ -8315,7 +8317,7 @@ static const struct net_device_ops bnx2_netdev_ops = {
8315#endif 8317#endif
8316}; 8318};
8317 8319
8318static void inline vlan_features_add(struct net_device *dev, unsigned long flags) 8320static void inline vlan_features_add(struct net_device *dev, u32 flags)
8319{ 8321{
8320 dev->vlan_features |= flags; 8322 dev->vlan_features |= flags;
8321} 8323}
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index f459fb2f9add..68020451dc4f 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -1,6 +1,6 @@
1/* bnx2.h: Broadcom NX2 network driver. 1/* bnx2.h: Broadcom NX2 network driver.
2 * 2 *
3 * Copyright (c) 2004-2009 Broadcom Corporation 3 * Copyright (c) 2004-2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -6207,6 +6207,8 @@ struct l2_fhdr {
6207 6207
6208#define BNX2_CP_SCRATCH 0x001a0000 6208#define BNX2_CP_SCRATCH 0x001a0000
6209 6209
6210#define BNX2_FW_MAX_ISCSI_CONN 0x001a0080
6211
6210 6212
6211/* 6213/*
6212 * mcp_reg definition 6214 * mcp_reg definition
@@ -6759,7 +6761,7 @@ struct bnx2 {
6759 u32 tx_wake_thresh; 6761 u32 tx_wake_thresh;
6760 6762
6761#ifdef BCM_CNIC 6763#ifdef BCM_CNIC
6762 struct cnic_ops *cnic_ops; 6764 struct cnic_ops __rcu *cnic_ops;
6763 void *cnic_data; 6765 void *cnic_data;
6764#endif 6766#endif
6765 6767
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 8849699c66c4..b7ff87b35fbb 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -22,8 +22,8 @@
22 * (you will need to reboot afterwards) */ 22 * (you will need to reboot afterwards) */
23/* #define BNX2X_STOP_ON_ERROR */ 23/* #define BNX2X_STOP_ON_ERROR */
24 24
25#define DRV_MODULE_VERSION "1.62.00-6" 25#define DRV_MODULE_VERSION "1.62.11-0"
26#define DRV_MODULE_RELDATE "2011/01/30" 26#define DRV_MODULE_RELDATE "2011/01/31"
27#define BNX2X_BC_VER 0x040200 27#define BNX2X_BC_VER 0x040200
28 28
29#define BNX2X_MULTI_QUEUE 29#define BNX2X_MULTI_QUEUE
@@ -31,7 +31,7 @@
31#define BNX2X_NEW_NAPI 31#define BNX2X_NEW_NAPI
32 32
33#if defined(CONFIG_DCB) 33#if defined(CONFIG_DCB)
34#define BCM_DCB 34#define BCM_DCBNL
35#endif 35#endif
36#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) 36#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
37#define BCM_CNIC 1 37#define BCM_CNIC 1
@@ -129,6 +129,7 @@ void bnx2x_panic_dump(struct bnx2x *bp);
129#endif 129#endif
130 130
131#define bnx2x_mc_addr(ha) ((ha)->addr) 131#define bnx2x_mc_addr(ha) ((ha)->addr)
132#define bnx2x_uc_addr(ha) ((ha)->addr)
132 133
133#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff) 134#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff)
134#define U64_HI(x) (u32)(((u64)(x)) >> 32) 135#define U64_HI(x) (u32)(((u64)(x)) >> 32)
@@ -341,6 +342,8 @@ struct bnx2x_fastpath {
341 /* chip independed shortcut into rx_prods_offset memory */ 342 /* chip independed shortcut into rx_prods_offset memory */
342 u32 ustorm_rx_prods_offset; 343 u32 ustorm_rx_prods_offset;
343 344
345 u32 rx_buf_size;
346
344 dma_addr_t status_blk_mapping; 347 dma_addr_t status_blk_mapping;
345 348
346 struct sw_tx_bd *tx_buf_ring; 349 struct sw_tx_bd *tx_buf_ring;
@@ -428,6 +431,10 @@ struct bnx2x_fastpath {
428}; 431};
429 432
430#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) 433#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
434
435/* Use 2500 as a mini-jumbo MTU for FCoE */
436#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
437
431#ifdef BCM_CNIC 438#ifdef BCM_CNIC
432/* FCoE L2 `fastpath' is right after the eth entries */ 439/* FCoE L2 `fastpath' is right after the eth entries */
433#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp) 440#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp)
@@ -810,6 +817,7 @@ struct bnx2x_slowpath {
810 struct eth_stats_query fw_stats; 817 struct eth_stats_query fw_stats;
811 struct mac_configuration_cmd mac_config; 818 struct mac_configuration_cmd mac_config;
812 struct mac_configuration_cmd mcast_config; 819 struct mac_configuration_cmd mcast_config;
820 struct mac_configuration_cmd uc_mac_config;
813 struct client_init_ramrod_data client_init_data; 821 struct client_init_ramrod_data client_init_data;
814 822
815 /* used by dmae command executer */ 823 /* used by dmae command executer */
@@ -911,7 +919,6 @@ struct bnx2x {
911 int tx_ring_size; 919 int tx_ring_size;
912 920
913 u32 rx_csum; 921 u32 rx_csum;
914 u32 rx_buf_size;
915/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ 922/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
916#define ETH_OVREHEAD (ETH_HLEN + 8 + 8) 923#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
917#define ETH_MIN_PACKET_SIZE 60 924#define ETH_MIN_PACKET_SIZE 60
@@ -939,7 +946,7 @@ struct bnx2x {
939 struct eth_spe *spq_prod_bd; 946 struct eth_spe *spq_prod_bd;
940 struct eth_spe *spq_last_bd; 947 struct eth_spe *spq_last_bd;
941 __le16 *dsb_sp_prod; 948 __le16 *dsb_sp_prod;
942 atomic_t spq_left; /* serialize spq */ 949 atomic_t cq_spq_left; /* ETH_XXX ramrods credit */
943 /* used to synchronize spq accesses */ 950 /* used to synchronize spq accesses */
944 spinlock_t spq_lock; 951 spinlock_t spq_lock;
945 952
@@ -949,6 +956,7 @@ struct bnx2x {
949 u16 eq_prod; 956 u16 eq_prod;
950 u16 eq_cons; 957 u16 eq_cons;
951 __le16 *eq_cons_sb; 958 __le16 *eq_cons_sb;
959 atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */
952 960
953 /* Flags for marking that there is a STAT_QUERY or 961 /* Flags for marking that there is a STAT_QUERY or
954 SET_MAC ramrod pending */ 962 SET_MAC ramrod pending */
@@ -976,8 +984,12 @@ struct bnx2x {
976#define MF_FUNC_DIS 0x1000 984#define MF_FUNC_DIS 0x1000
977#define FCOE_MACS_SET 0x2000 985#define FCOE_MACS_SET 0x2000
978#define NO_FCOE_FLAG 0x4000 986#define NO_FCOE_FLAG 0x4000
987#define NO_ISCSI_OOO_FLAG 0x8000
988#define NO_ISCSI_FLAG 0x10000
979 989
980#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG) 990#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG)
991#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
992#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
981 993
982 int pf_num; /* absolute PF number */ 994 int pf_num; /* absolute PF number */
983 int pfid; /* per-path PF number */ 995 int pfid; /* per-path PF number */
@@ -1064,6 +1076,7 @@ struct bnx2x {
1064 int num_queues; 1076 int num_queues;
1065 int disable_tpa; 1077 int disable_tpa;
1066 int int_mode; 1078 int int_mode;
1079 u32 *rx_indir_table;
1067 1080
1068 struct tstorm_eth_mac_filter_config mac_filters; 1081 struct tstorm_eth_mac_filter_config mac_filters;
1069#define BNX2X_ACCEPT_NONE 0x0000 1082#define BNX2X_ACCEPT_NONE 0x0000
@@ -1110,7 +1123,7 @@ struct bnx2x {
1110#define BNX2X_CNIC_FLAG_MAC_SET 1 1123#define BNX2X_CNIC_FLAG_MAC_SET 1
1111 void *t2; 1124 void *t2;
1112 dma_addr_t t2_mapping; 1125 dma_addr_t t2_mapping;
1113 struct cnic_ops *cnic_ops; 1126 struct cnic_ops __rcu *cnic_ops;
1114 void *cnic_data; 1127 void *cnic_data;
1115 u32 cnic_tag; 1128 u32 cnic_tag;
1116 struct cnic_eth_dev cnic_eth_dev; 1129 struct cnic_eth_dev cnic_eth_dev;
@@ -1125,13 +1138,12 @@ struct bnx2x {
1125 u16 cnic_kwq_pending; 1138 u16 cnic_kwq_pending;
1126 u16 cnic_spq_pending; 1139 u16 cnic_spq_pending;
1127 struct mutex cnic_mutex; 1140 struct mutex cnic_mutex;
1128 u8 iscsi_mac[ETH_ALEN];
1129 u8 fip_mac[ETH_ALEN]; 1141 u8 fip_mac[ETH_ALEN];
1130#endif 1142#endif
1131 1143
1132 int dmae_ready; 1144 int dmae_ready;
1133 /* used to synchronize dmae accesses */ 1145 /* used to synchronize dmae accesses */
1134 struct mutex dmae_mutex; 1146 spinlock_t dmae_lock;
1135 1147
1136 /* used to protect the FW mail box */ 1148 /* used to protect the FW mail box */
1137 struct mutex fw_mb_mutex; 1149 struct mutex fw_mb_mutex;
@@ -1448,6 +1460,12 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
1448void bnx2x_calc_fc_adv(struct bnx2x *bp); 1460void bnx2x_calc_fc_adv(struct bnx2x *bp);
1449int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, 1461int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1450 u32 data_hi, u32 data_lo, int common); 1462 u32 data_hi, u32 data_lo, int common);
1463
1464/* Clears multicast and unicast list configuration in the chip. */
1465void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp);
1466void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp);
1467void bnx2x_invalidate_uc_list(struct bnx2x *bp);
1468
1451void bnx2x_update_coalesce(struct bnx2x *bp); 1469void bnx2x_update_coalesce(struct bnx2x *bp);
1452int bnx2x_get_link_cfg_idx(struct bnx2x *bp); 1470int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
1453 1471
@@ -1787,5 +1805,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1787BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */ 1805BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */
1788 1806
1789extern void bnx2x_set_ethtool_ops(struct net_device *netdev); 1807extern void bnx2x_set_ethtool_ops(struct net_device *netdev);
1808void bnx2x_push_indir_table(struct bnx2x *bp);
1790 1809
1791#endif /* bnx2x.h */ 1810#endif /* bnx2x.h */
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index a71b32940533..e83ac6dd6fc0 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -232,7 +232,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
232 /* move empty skb from pool to prod and map it */ 232 /* move empty skb from pool to prod and map it */
233 prod_rx_buf->skb = fp->tpa_pool[queue].skb; 233 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
234 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data, 234 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
235 bp->rx_buf_size, DMA_FROM_DEVICE); 235 fp->rx_buf_size, DMA_FROM_DEVICE);
236 dma_unmap_addr_set(prod_rx_buf, mapping, mapping); 236 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
237 237
238 /* move partial skb from cons to pool (don't unmap yet) */ 238 /* move partial skb from cons to pool (don't unmap yet) */
@@ -367,13 +367,13 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
367 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue]; 367 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
368 struct sk_buff *skb = rx_buf->skb; 368 struct sk_buff *skb = rx_buf->skb;
369 /* alloc new skb */ 369 /* alloc new skb */
370 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); 370 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
371 371
372 /* Unmap skb in the pool anyway, as we are going to change 372 /* Unmap skb in the pool anyway, as we are going to change
373 pool entry status to BNX2X_TPA_STOP even if new skb allocation 373 pool entry status to BNX2X_TPA_STOP even if new skb allocation
374 fails. */ 374 fails. */
375 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), 375 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
376 bp->rx_buf_size, DMA_FROM_DEVICE); 376 fp->rx_buf_size, DMA_FROM_DEVICE);
377 377
378 if (likely(new_skb)) { 378 if (likely(new_skb)) {
379 /* fix ip xsum and give it to the stack */ 379 /* fix ip xsum and give it to the stack */
@@ -385,10 +385,10 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
385 prefetch(((char *)(skb)) + L1_CACHE_BYTES); 385 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
386 386
387#ifdef BNX2X_STOP_ON_ERROR 387#ifdef BNX2X_STOP_ON_ERROR
388 if (pad + len > bp->rx_buf_size) { 388 if (pad + len > fp->rx_buf_size) {
389 BNX2X_ERR("skb_put is about to fail... " 389 BNX2X_ERR("skb_put is about to fail... "
390 "pad %d len %d rx_buf_size %d\n", 390 "pad %d len %d rx_buf_size %d\n",
391 pad, len, bp->rx_buf_size); 391 pad, len, fp->rx_buf_size);
392 bnx2x_panic(); 392 bnx2x_panic();
393 return; 393 return;
394 } 394 }
@@ -618,7 +618,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
618 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) { 618 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
619 dma_unmap_single(&bp->pdev->dev, 619 dma_unmap_single(&bp->pdev->dev,
620 dma_unmap_addr(rx_buf, mapping), 620 dma_unmap_addr(rx_buf, mapping),
621 bp->rx_buf_size, 621 fp->rx_buf_size,
622 DMA_FROM_DEVICE); 622 DMA_FROM_DEVICE);
623 skb_reserve(skb, pad); 623 skb_reserve(skb, pad);
624 skb_put(skb, len); 624 skb_put(skb, len);
@@ -858,19 +858,16 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
858 u16 ring_prod; 858 u16 ring_prod;
859 int i, j; 859 int i, j;
860 860
861 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
862 IP_HEADER_ALIGNMENT_PADDING;
863
864 DP(NETIF_MSG_IFUP,
865 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
866
867 for_each_rx_queue(bp, j) { 861 for_each_rx_queue(bp, j) {
868 struct bnx2x_fastpath *fp = &bp->fp[j]; 862 struct bnx2x_fastpath *fp = &bp->fp[j];
869 863
864 DP(NETIF_MSG_IFUP,
865 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
866
870 if (!fp->disable_tpa) { 867 if (!fp->disable_tpa) {
871 for (i = 0; i < max_agg_queues; i++) { 868 for (i = 0; i < max_agg_queues; i++) {
872 fp->tpa_pool[i].skb = 869 fp->tpa_pool[i].skb =
873 netdev_alloc_skb(bp->dev, bp->rx_buf_size); 870 netdev_alloc_skb(bp->dev, fp->rx_buf_size);
874 if (!fp->tpa_pool[i].skb) { 871 if (!fp->tpa_pool[i].skb) {
875 BNX2X_ERR("Failed to allocate TPA " 872 BNX2X_ERR("Failed to allocate TPA "
876 "skb pool for queue[%d] - " 873 "skb pool for queue[%d] - "
@@ -978,7 +975,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
978 975
979 dma_unmap_single(&bp->pdev->dev, 976 dma_unmap_single(&bp->pdev->dev,
980 dma_unmap_addr(rx_buf, mapping), 977 dma_unmap_addr(rx_buf, mapping),
981 bp->rx_buf_size, DMA_FROM_DEVICE); 978 fp->rx_buf_size, DMA_FROM_DEVICE);
982 979
983 rx_buf->skb = NULL; 980 rx_buf->skb = NULL;
984 dev_kfree_skb(skb); 981 dev_kfree_skb(skb);
@@ -1303,6 +1300,31 @@ static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1303 return rc; 1300 return rc;
1304} 1301}
1305 1302
1303static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1304{
1305 int i;
1306
1307 for_each_queue(bp, i) {
1308 struct bnx2x_fastpath *fp = &bp->fp[i];
1309
1310 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1311 if (IS_FCOE_IDX(i))
1312 /*
1313 * Although there are no IP frames expected to arrive to
1314 * this ring we still want to add an
1315 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1316 * overrun attack.
1317 */
1318 fp->rx_buf_size =
1319 BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
1320 BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
1321 else
1322 fp->rx_buf_size =
1323 bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
1324 IP_HEADER_ALIGNMENT_PADDING;
1325 }
1326}
1327
1306/* must be called with rtnl_lock */ 1328/* must be called with rtnl_lock */
1307int bnx2x_nic_load(struct bnx2x *bp, int load_mode) 1329int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1308{ 1330{
@@ -1326,6 +1348,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1326 /* must be called before memory allocation and HW init */ 1348 /* must be called before memory allocation and HW init */
1327 bnx2x_ilt_set_info(bp); 1349 bnx2x_ilt_set_info(bp);
1328 1350
1351 /* Set the receive queues buffer size */
1352 bnx2x_set_rx_buf_size(bp);
1353
1329 if (bnx2x_alloc_mem(bp)) 1354 if (bnx2x_alloc_mem(bp))
1330 return -ENOMEM; 1355 return -ENOMEM;
1331 1356
@@ -1481,6 +1506,15 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1481 1506
1482 bnx2x_set_eth_mac(bp, 1); 1507 bnx2x_set_eth_mac(bp, 1);
1483 1508
1509 /* Clear MC configuration */
1510 if (CHIP_IS_E1(bp))
1511 bnx2x_invalidate_e1_mc_list(bp);
1512 else
1513 bnx2x_invalidate_e1h_mc_list(bp);
1514
1515 /* Clear UC lists configuration */
1516 bnx2x_invalidate_uc_list(bp);
1517
1484 if (bp->pending_max) { 1518 if (bp->pending_max) {
1485 bnx2x_update_max_mf_config(bp, bp->pending_max); 1519 bnx2x_update_max_mf_config(bp, bp->pending_max);
1486 bp->pending_max = 0; 1520 bp->pending_max = 0;
@@ -1489,25 +1523,23 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1489 if (bp->port.pmf) 1523 if (bp->port.pmf)
1490 bnx2x_initial_phy_init(bp, load_mode); 1524 bnx2x_initial_phy_init(bp, load_mode);
1491 1525
1526 /* Initialize Rx filtering */
1527 bnx2x_set_rx_mode(bp->dev);
1528
1492 /* Start fast path */ 1529 /* Start fast path */
1493 switch (load_mode) { 1530 switch (load_mode) {
1494 case LOAD_NORMAL: 1531 case LOAD_NORMAL:
1495 /* Tx queue should be only reenabled */ 1532 /* Tx queue should be only reenabled */
1496 netif_tx_wake_all_queues(bp->dev); 1533 netif_tx_wake_all_queues(bp->dev);
1497 /* Initialize the receive filter. */ 1534 /* Initialize the receive filter. */
1498 bnx2x_set_rx_mode(bp->dev);
1499 break; 1535 break;
1500 1536
1501 case LOAD_OPEN: 1537 case LOAD_OPEN:
1502 netif_tx_start_all_queues(bp->dev); 1538 netif_tx_start_all_queues(bp->dev);
1503 smp_mb__after_clear_bit(); 1539 smp_mb__after_clear_bit();
1504 /* Initialize the receive filter. */
1505 bnx2x_set_rx_mode(bp->dev);
1506 break; 1540 break;
1507 1541
1508 case LOAD_DIAG: 1542 case LOAD_DIAG:
1509 /* Initialize the receive filter. */
1510 bnx2x_set_rx_mode(bp->dev);
1511 bp->state = BNX2X_STATE_DIAG; 1543 bp->state = BNX2X_STATE_DIAG;
1512 break; 1544 break;
1513 1545
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 85ea7f26b51f..ef37b98d6146 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -831,11 +831,11 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
831 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; 831 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
832 dma_addr_t mapping; 832 dma_addr_t mapping;
833 833
834 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); 834 skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
835 if (unlikely(skb == NULL)) 835 if (unlikely(skb == NULL))
836 return -ENOMEM; 836 return -ENOMEM;
837 837
838 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size, 838 mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size,
839 DMA_FROM_DEVICE); 839 DMA_FROM_DEVICE);
840 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 840 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
841 dev_kfree_skb(skb); 841 dev_kfree_skb(skb);
@@ -901,7 +901,7 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
901 if (fp->tpa_state[i] == BNX2X_TPA_START) 901 if (fp->tpa_state[i] == BNX2X_TPA_START)
902 dma_unmap_single(&bp->pdev->dev, 902 dma_unmap_single(&bp->pdev->dev,
903 dma_unmap_addr(rx_buf, mapping), 903 dma_unmap_addr(rx_buf, mapping),
904 bp->rx_buf_size, DMA_FROM_DEVICE); 904 fp->rx_buf_size, DMA_FROM_DEVICE);
905 905
906 dev_kfree_skb(skb); 906 dev_kfree_skb(skb);
907 rx_buf->skb = NULL; 907 rx_buf->skb = NULL;
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c
index fb60021f81fb..9a24d79c71d9 100644
--- a/drivers/net/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/bnx2x/bnx2x_dcb.c
@@ -19,6 +19,9 @@
19#include <linux/netdevice.h> 19#include <linux/netdevice.h>
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/errno.h> 21#include <linux/errno.h>
22#ifdef BCM_DCBNL
23#include <linux/dcbnl.h>
24#endif
22 25
23#include "bnx2x.h" 26#include "bnx2x.h"
24#include "bnx2x_cmn.h" 27#include "bnx2x_cmn.h"
@@ -508,13 +511,75 @@ static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp)
508 return 0; 511 return 0;
509} 512}
510 513
514
515#ifdef BCM_DCBNL
516static inline
517u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent)
518{
519 u8 pri;
520
521 /* Choose the highest priority */
522 for (pri = MAX_PFC_PRIORITIES - 1; pri > 0; pri--)
523 if (ent->pri_bitmap & (1 << pri))
524 break;
525 return pri;
526}
527
528static inline
529u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent)
530{
531 return ((ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) ==
532 DCBX_APP_SF_PORT) ? DCB_APP_IDTYPE_PORTNUM :
533 DCB_APP_IDTYPE_ETHTYPE;
534}
535
536static inline
537void bnx2x_dcbx_invalidate_local_apps(struct bnx2x *bp)
538{
539 int i;
540 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
541 bp->dcbx_local_feat.app.app_pri_tbl[i].appBitfield &=
542 ~DCBX_APP_ENTRY_VALID;
543}
544
545int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
546{
547 int i, err = 0;
548
549 for (i = 0; i < DCBX_MAX_APP_PROTOCOL && err == 0; i++) {
550 struct dcbx_app_priority_entry *ent =
551 &bp->dcbx_local_feat.app.app_pri_tbl[i];
552
553 if (ent->appBitfield & DCBX_APP_ENTRY_VALID) {
554 u8 up = bnx2x_dcbx_dcbnl_app_up(ent);
555
556 /* avoid invalid user-priority */
557 if (up) {
558 struct dcb_app app;
559 app.selector = bnx2x_dcbx_dcbnl_app_idtype(ent);
560 app.protocol = ent->app_id;
561 app.priority = delall ? 0 : up;
562 err = dcb_setapp(bp->dev, &app);
563 }
564 }
565 }
566 return err;
567}
568#endif
569
511void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) 570void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
512{ 571{
513 switch (state) { 572 switch (state) {
514 case BNX2X_DCBX_STATE_NEG_RECEIVED: 573 case BNX2X_DCBX_STATE_NEG_RECEIVED:
515 { 574 {
516 DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n"); 575 DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
517 576#ifdef BCM_DCBNL
577 /**
578 * Delete app tlvs from dcbnl before reading new
579 * negotiation results
580 */
581 bnx2x_dcbnl_update_applist(bp, true);
582#endif
518 /* Read neg results if dcbx is in the FW */ 583 /* Read neg results if dcbx is in the FW */
519 if (bnx2x_dcbx_read_shmem_neg_results(bp)) 584 if (bnx2x_dcbx_read_shmem_neg_results(bp))
520 return; 585 return;
@@ -526,10 +591,24 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
526 bp->dcbx_error); 591 bp->dcbx_error);
527 592
528 if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) { 593 if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
594#ifdef BCM_DCBNL
595 /**
596 * Add new app tlvs to dcbnl
597 */
598 bnx2x_dcbnl_update_applist(bp, false);
599#endif
529 bnx2x_dcbx_stop_hw_tx(bp); 600 bnx2x_dcbx_stop_hw_tx(bp);
530 return; 601 return;
531 } 602 }
532 /* fall through */ 603 /* fall through */
604#ifdef BCM_DCBNL
605 /**
606 * Invalidate the local app tlvs if they are not added
607 * to the dcbnl app list to avoid deleting them from
608 * the list later on
609 */
610 bnx2x_dcbx_invalidate_local_apps(bp);
611#endif
533 } 612 }
534 case BNX2X_DCBX_STATE_TX_PAUSED: 613 case BNX2X_DCBX_STATE_TX_PAUSED:
535 DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n"); 614 DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n");
@@ -1505,8 +1584,7 @@ static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp)
1505 bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg); 1584 bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg);
1506} 1585}
1507/* DCB netlink */ 1586/* DCB netlink */
1508#ifdef BCM_DCB 1587#ifdef BCM_DCBNL
1509#include <linux/dcbnl.h>
1510 1588
1511#define BNX2X_DCBX_CAPS (DCB_CAP_DCBX_LLD_MANAGED | \ 1589#define BNX2X_DCBX_CAPS (DCB_CAP_DCBX_LLD_MANAGED | \
1512 DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_STATIC) 1590 DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_STATIC)
@@ -1816,32 +1894,6 @@ static void bnx2x_dcbnl_set_pfc_state(struct net_device *netdev, u8 state)
1816 bp->dcbx_config_params.admin_pfc_enable = (state ? 1 : 0); 1894 bp->dcbx_config_params.admin_pfc_enable = (state ? 1 : 0);
1817} 1895}
1818 1896
1819static bool bnx2x_app_is_equal(struct dcbx_app_priority_entry *app_ent,
1820 u8 idtype, u16 idval)
1821{
1822 if (!(app_ent->appBitfield & DCBX_APP_ENTRY_VALID))
1823 return false;
1824
1825 switch (idtype) {
1826 case DCB_APP_IDTYPE_ETHTYPE:
1827 if ((app_ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) !=
1828 DCBX_APP_SF_ETH_TYPE)
1829 return false;
1830 break;
1831 case DCB_APP_IDTYPE_PORTNUM:
1832 if ((app_ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) !=
1833 DCBX_APP_SF_PORT)
1834 return false;
1835 break;
1836 default:
1837 return false;
1838 }
1839 if (app_ent->app_id != idval)
1840 return false;
1841
1842 return true;
1843}
1844
1845static void bnx2x_admin_app_set_ent( 1897static void bnx2x_admin_app_set_ent(
1846 struct bnx2x_admin_priority_app_table *app_ent, 1898 struct bnx2x_admin_priority_app_table *app_ent,
1847 u8 idtype, u16 idval, u8 up) 1899 u8 idtype, u16 idval, u8 up)
@@ -1943,30 +1995,6 @@ static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype,
1943 return bnx2x_set_admin_app_up(bp, idtype, idval, up); 1995 return bnx2x_set_admin_app_up(bp, idtype, idval, up);
1944} 1996}
1945 1997
1946static u8 bnx2x_dcbnl_get_app_up(struct net_device *netdev, u8 idtype,
1947 u16 idval)
1948{
1949 int i;
1950 u8 up = 0;
1951
1952 struct bnx2x *bp = netdev_priv(netdev);
1953 DP(NETIF_MSG_LINK, "app_type %d, app_id 0x%x\n", idtype, idval);
1954
1955 /* iterate over the app entries looking for idtype and idval */
1956 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
1957 if (bnx2x_app_is_equal(&bp->dcbx_local_feat.app.app_pri_tbl[i],
1958 idtype, idval))
1959 break;
1960
1961 if (i < DCBX_MAX_APP_PROTOCOL)
1962 /* if found return up */
1963 up = bp->dcbx_local_feat.app.app_pri_tbl[i].pri_bitmap;
1964 else
1965 DP(NETIF_MSG_LINK, "app not found\n");
1966
1967 return up;
1968}
1969
1970static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev) 1998static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev)
1971{ 1999{
1972 struct bnx2x *bp = netdev_priv(netdev); 2000 struct bnx2x *bp = netdev_priv(netdev);
@@ -2107,7 +2135,6 @@ const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = {
2107 .setnumtcs = bnx2x_dcbnl_set_numtcs, 2135 .setnumtcs = bnx2x_dcbnl_set_numtcs,
2108 .getpfcstate = bnx2x_dcbnl_get_pfc_state, 2136 .getpfcstate = bnx2x_dcbnl_get_pfc_state,
2109 .setpfcstate = bnx2x_dcbnl_set_pfc_state, 2137 .setpfcstate = bnx2x_dcbnl_set_pfc_state,
2110 .getapp = bnx2x_dcbnl_get_app_up,
2111 .setapp = bnx2x_dcbnl_set_app_up, 2138 .setapp = bnx2x_dcbnl_set_app_up,
2112 .getdcbx = bnx2x_dcbnl_get_dcbx, 2139 .getdcbx = bnx2x_dcbnl_get_dcbx,
2113 .setdcbx = bnx2x_dcbnl_set_dcbx, 2140 .setdcbx = bnx2x_dcbnl_set_dcbx,
@@ -2115,4 +2142,4 @@ const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = {
2115 .setfeatcfg = bnx2x_dcbnl_set_featcfg, 2142 .setfeatcfg = bnx2x_dcbnl_set_featcfg,
2116}; 2143};
2117 2144
2118#endif /* BCM_DCB */ 2145#endif /* BCM_DCBNL */
diff --git a/drivers/net/bnx2x/bnx2x_dcb.h b/drivers/net/bnx2x/bnx2x_dcb.h
index f650f98e4092..71b8eda43bd0 100644
--- a/drivers/net/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/bnx2x/bnx2x_dcb.h
@@ -189,8 +189,9 @@ enum {
189void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state); 189void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state);
190 190
191/* DCB netlink */ 191/* DCB netlink */
192#ifdef BCM_DCB 192#ifdef BCM_DCBNL
193extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops; 193extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops;
194#endif /* BCM_DCB */ 194int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall);
195#endif /* BCM_DCBNL */
195 196
196#endif /* BNX2X_DCB_H */ 197#endif /* BNX2X_DCB_H */
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 7e92f9d0dcfd..f5050155c6b5 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -1617,7 +1617,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
1617 /* prepare the loopback packet */ 1617 /* prepare the loopback packet */
1618 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ? 1618 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
1619 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN); 1619 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
1620 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); 1620 skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size);
1621 if (!skb) { 1621 if (!skb) {
1622 rc = -ENOMEM; 1622 rc = -ENOMEM;
1623 goto test_loopback_exit; 1623 goto test_loopback_exit;
@@ -2131,6 +2131,59 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
2131 return 0; 2131 return 0;
2132} 2132}
2133 2133
2134static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2135 void *rules __always_unused)
2136{
2137 struct bnx2x *bp = netdev_priv(dev);
2138
2139 switch (info->cmd) {
2140 case ETHTOOL_GRXRINGS:
2141 info->data = BNX2X_NUM_ETH_QUEUES(bp);
2142 return 0;
2143
2144 default:
2145 return -EOPNOTSUPP;
2146 }
2147}
2148
2149static int bnx2x_get_rxfh_indir(struct net_device *dev,
2150 struct ethtool_rxfh_indir *indir)
2151{
2152 struct bnx2x *bp = netdev_priv(dev);
2153 size_t copy_size =
2154 min_t(size_t, indir->size, TSTORM_INDIRECTION_TABLE_SIZE);
2155
2156 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
2157 return -EOPNOTSUPP;
2158
2159 indir->size = TSTORM_INDIRECTION_TABLE_SIZE;
2160 memcpy(indir->ring_index, bp->rx_indir_table,
2161 copy_size * sizeof(bp->rx_indir_table[0]));
2162 return 0;
2163}
2164
2165static int bnx2x_set_rxfh_indir(struct net_device *dev,
2166 const struct ethtool_rxfh_indir *indir)
2167{
2168 struct bnx2x *bp = netdev_priv(dev);
2169 size_t i;
2170
2171 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
2172 return -EOPNOTSUPP;
2173
2174 /* Validate size and indices */
2175 if (indir->size != TSTORM_INDIRECTION_TABLE_SIZE)
2176 return -EINVAL;
2177 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
2178 if (indir->ring_index[i] >= BNX2X_NUM_ETH_QUEUES(bp))
2179 return -EINVAL;
2180
2181 memcpy(bp->rx_indir_table, indir->ring_index,
2182 indir->size * sizeof(bp->rx_indir_table[0]));
2183 bnx2x_push_indir_table(bp);
2184 return 0;
2185}
2186
2134static const struct ethtool_ops bnx2x_ethtool_ops = { 2187static const struct ethtool_ops bnx2x_ethtool_ops = {
2135 .get_settings = bnx2x_get_settings, 2188 .get_settings = bnx2x_get_settings,
2136 .set_settings = bnx2x_set_settings, 2189 .set_settings = bnx2x_set_settings,
@@ -2167,6 +2220,9 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
2167 .get_strings = bnx2x_get_strings, 2220 .get_strings = bnx2x_get_strings,
2168 .phys_id = bnx2x_phys_id, 2221 .phys_id = bnx2x_phys_id,
2169 .get_ethtool_stats = bnx2x_get_ethtool_stats, 2222 .get_ethtool_stats = bnx2x_get_ethtool_stats,
2223 .get_rxnfc = bnx2x_get_rxnfc,
2224 .get_rxfh_indir = bnx2x_get_rxfh_indir,
2225 .set_rxfh_indir = bnx2x_set_rxfh_indir,
2170}; 2226};
2171 2227
2172void bnx2x_set_ethtool_ops(struct net_device *netdev) 2228void bnx2x_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 548f5631c0dc..be503cc0a50b 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -11,20 +11,27 @@
11 11
12#include "bnx2x_fw_defs.h" 12#include "bnx2x_fw_defs.h"
13 13
14#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e
15
14struct license_key { 16struct license_key {
15 u32 reserved[6]; 17 u32 reserved[6];
16 18
17#if defined(__BIG_ENDIAN) 19 u32 max_iscsi_conn;
18 u16 max_iscsi_init_conn; 20#define BNX2X_MAX_ISCSI_TRGT_CONN_MASK 0xFFFF
19 u16 max_iscsi_trgt_conn; 21#define BNX2X_MAX_ISCSI_TRGT_CONN_SHIFT 0
20#elif defined(__LITTLE_ENDIAN) 22#define BNX2X_MAX_ISCSI_INIT_CONN_MASK 0xFFFF0000
21 u16 max_iscsi_trgt_conn; 23#define BNX2X_MAX_ISCSI_INIT_CONN_SHIFT 16
22 u16 max_iscsi_init_conn;
23#endif
24 24
25 u32 reserved_a[6]; 25 u32 reserved_a;
26}; 26
27 u32 max_fcoe_conn;
28#define BNX2X_MAX_FCOE_TRGT_CONN_MASK 0xFFFF
29#define BNX2X_MAX_FCOE_TRGT_CONN_SHIFT 0
30#define BNX2X_MAX_FCOE_INIT_CONN_MASK 0xFFFF0000
31#define BNX2X_MAX_FCOE_INIT_CONN_SHIFT 16
27 32
33 u32 reserved_b[4];
34};
28 35
29#define PORT_0 0 36#define PORT_0 0
30#define PORT_1 1 37#define PORT_1 1
@@ -237,8 +244,26 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
237#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16 244#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16
238 245
239 246
240 u32 Reserved0[16]; /* 0x158 */ 247 u32 Reserved0[3]; /* 0x158 */
241 248 /* Controls the TX laser of the SFP+ module */
249 u32 sfp_ctrl; /* 0x164 */
250#define PORT_HW_CFG_TX_LASER_MASK 0x000000FF
251#define PORT_HW_CFG_TX_LASER_SHIFT 0
252#define PORT_HW_CFG_TX_LASER_MDIO 0x00000000
253#define PORT_HW_CFG_TX_LASER_GPIO0 0x00000001
254#define PORT_HW_CFG_TX_LASER_GPIO1 0x00000002
255#define PORT_HW_CFG_TX_LASER_GPIO2 0x00000003
256#define PORT_HW_CFG_TX_LASER_GPIO3 0x00000004
257
258 /* Controls the fault module LED of the SFP+ */
259#define PORT_HW_CFG_FAULT_MODULE_LED_MASK 0x0000FF00
260#define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT 8
261#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0 0x00000000
262#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1 0x00000100
263#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2 0x00000200
264#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3 0x00000300
265#define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED 0x00000400
266 u32 Reserved01[12]; /* 0x158 */
242 /* for external PHY, or forced mode or during AN */ 267 /* for external PHY, or forced mode or during AN */
243 u16 xgxs_config_rx[4]; /* 0x198 */ 268 u16 xgxs_config_rx[4]; /* 0x198 */
244 269
@@ -246,12 +271,78 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
246 271
247 u32 Reserved1[56]; /* 0x1A8 */ 272 u32 Reserved1[56]; /* 0x1A8 */
248 u32 default_cfg; /* 0x288 */ 273 u32 default_cfg; /* 0x288 */
274#define PORT_HW_CFG_GPIO0_CONFIG_MASK 0x00000003
275#define PORT_HW_CFG_GPIO0_CONFIG_SHIFT 0
276#define PORT_HW_CFG_GPIO0_CONFIG_NA 0x00000000
277#define PORT_HW_CFG_GPIO0_CONFIG_LOW 0x00000001
278#define PORT_HW_CFG_GPIO0_CONFIG_HIGH 0x00000002
279#define PORT_HW_CFG_GPIO0_CONFIG_INPUT 0x00000003
280
281#define PORT_HW_CFG_GPIO1_CONFIG_MASK 0x0000000C
282#define PORT_HW_CFG_GPIO1_CONFIG_SHIFT 2
283#define PORT_HW_CFG_GPIO1_CONFIG_NA 0x00000000
284#define PORT_HW_CFG_GPIO1_CONFIG_LOW 0x00000004
285#define PORT_HW_CFG_GPIO1_CONFIG_HIGH 0x00000008
286#define PORT_HW_CFG_GPIO1_CONFIG_INPUT 0x0000000c
287
288#define PORT_HW_CFG_GPIO2_CONFIG_MASK 0x00000030
289#define PORT_HW_CFG_GPIO2_CONFIG_SHIFT 4
290#define PORT_HW_CFG_GPIO2_CONFIG_NA 0x00000000
291#define PORT_HW_CFG_GPIO2_CONFIG_LOW 0x00000010
292#define PORT_HW_CFG_GPIO2_CONFIG_HIGH 0x00000020
293#define PORT_HW_CFG_GPIO2_CONFIG_INPUT 0x00000030
294
295#define PORT_HW_CFG_GPIO3_CONFIG_MASK 0x000000C0
296#define PORT_HW_CFG_GPIO3_CONFIG_SHIFT 6
297#define PORT_HW_CFG_GPIO3_CONFIG_NA 0x00000000
298#define PORT_HW_CFG_GPIO3_CONFIG_LOW 0x00000040
299#define PORT_HW_CFG_GPIO3_CONFIG_HIGH 0x00000080
300#define PORT_HW_CFG_GPIO3_CONFIG_INPUT 0x000000c0
301
302 /*
303 * When KR link is required to be set to force which is not
304 * KR-compliant, this parameter determine what is the trigger for it.
305 * When GPIO is selected, low input will force the speed. Currently
306 * default speed is 1G. In the future, it may be widen to select the
307 * forced speed in with another parameter. Note when force-1G is
308 * enabled, it override option 56: Link Speed option.
309 */
310#define PORT_HW_CFG_FORCE_KR_ENABLER_MASK 0x00000F00
311#define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT 8
312#define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED 0x00000000
313#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0 0x00000100
314#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0 0x00000200
315#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0 0x00000300
316#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0 0x00000400
317#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1 0x00000500
318#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1 0x00000600
319#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1 0x00000700
320#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1 0x00000800
321#define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED 0x00000900
322 /* Enable to determine with which GPIO to reset the external phy */
323#define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK 0x000F0000
324#define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT 16
325#define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE 0x00000000
326#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0 0x00010000
327#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0 0x00020000
328#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0 0x00030000
329#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0 0x00040000
330#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1 0x00050000
331#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1 0x00060000
332#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1 0x00070000
333#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1 0x00080000
249 /* Enable BAM on KR */ 334 /* Enable BAM on KR */
250#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000 335#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000
251#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20 336#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20
252#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000 337#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000
253#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000 338#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000
254 339
340 /* Enable Common Mode Sense */
341#define PORT_HW_CFG_ENABLE_CMS_MASK 0x00200000
342#define PORT_HW_CFG_ENABLE_CMS_SHIFT 21
343#define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000
344#define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000
345
255 u32 speed_capability_mask2; /* 0x28C */ 346 u32 speed_capability_mask2; /* 0x28C */
256#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF 347#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF
257#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0 348#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
@@ -381,6 +472,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
381#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900 472#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900
382#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00 473#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00
383#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00 474#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00
475#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833 0x00000d00
384#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00 476#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00
385#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00 477#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00
386 478
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index dd1210fddfff..f2f367d4e74d 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -1,4 +1,4 @@
1/* Copyright 2008-2009 Broadcom Corporation 1/* Copyright 2008-2011 Broadcom Corporation
2 * 2 *
3 * Unless you and Broadcom execute a separate written software license 3 * Unless you and Broadcom execute a separate written software license
4 * agreement governing use of this software, this software is licensed to you 4 * agreement governing use of this software, this software is licensed to you
@@ -28,12 +28,13 @@
28 28
29/********************************************************/ 29/********************************************************/
30#define ETH_HLEN 14 30#define ETH_HLEN 14
31#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)/* 16 for CRC + VLAN + LLC */ 31/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
32#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
32#define ETH_MIN_PACKET_SIZE 60 33#define ETH_MIN_PACKET_SIZE 60
33#define ETH_MAX_PACKET_SIZE 1500 34#define ETH_MAX_PACKET_SIZE 1500
34#define ETH_MAX_JUMBO_PACKET_SIZE 9600 35#define ETH_MAX_JUMBO_PACKET_SIZE 9600
35#define MDIO_ACCESS_TIMEOUT 1000 36#define MDIO_ACCESS_TIMEOUT 1000
36#define BMAC_CONTROL_RX_ENABLE 2 37#define BMAC_CONTROL_RX_ENABLE 2
37 38
38/***********************************************************/ 39/***********************************************************/
39/* Shortcut definitions */ 40/* Shortcut definitions */
@@ -79,7 +80,7 @@
79 80
80#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37 81#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37
81#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73 82#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73
82#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM 83#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
83#define AUTONEG_PARALLEL \ 84#define AUTONEG_PARALLEL \
84 SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION 85 SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
85#define AUTONEG_SGMII_FIBER_AUTODET \ 86#define AUTONEG_SGMII_FIBER_AUTODET \
@@ -112,10 +113,10 @@
112#define GP_STATUS_10G_KX4 \ 113#define GP_STATUS_10G_KX4 \
113 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 114 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4
114 115
115#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD 116#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD
116#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD 117#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD
117#define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD 118#define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
118#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4 119#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4
119#define LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD 120#define LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD
120#define LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD 121#define LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD
121#define LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD 122#define LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
@@ -123,18 +124,18 @@
123#define LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD 124#define LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD
124#define LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD 125#define LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD
125#define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD 126#define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD
126#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD 127#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
127#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD 128#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
128#define LINK_12GTFD LINK_STATUS_SPEED_AND_DUPLEX_12GTFD 129#define LINK_12GTFD LINK_STATUS_SPEED_AND_DUPLEX_12GTFD
129#define LINK_12GXFD LINK_STATUS_SPEED_AND_DUPLEX_12GXFD 130#define LINK_12GXFD LINK_STATUS_SPEED_AND_DUPLEX_12GXFD
130#define LINK_12_5GTFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD 131#define LINK_12_5GTFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD
131#define LINK_12_5GXFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD 132#define LINK_12_5GXFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD
132#define LINK_13GTFD LINK_STATUS_SPEED_AND_DUPLEX_13GTFD 133#define LINK_13GTFD LINK_STATUS_SPEED_AND_DUPLEX_13GTFD
133#define LINK_13GXFD LINK_STATUS_SPEED_AND_DUPLEX_13GXFD 134#define LINK_13GXFD LINK_STATUS_SPEED_AND_DUPLEX_13GXFD
134#define LINK_15GTFD LINK_STATUS_SPEED_AND_DUPLEX_15GTFD 135#define LINK_15GTFD LINK_STATUS_SPEED_AND_DUPLEX_15GTFD
135#define LINK_15GXFD LINK_STATUS_SPEED_AND_DUPLEX_15GXFD 136#define LINK_15GXFD LINK_STATUS_SPEED_AND_DUPLEX_15GXFD
136#define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD 137#define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD
137#define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD 138#define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD
138 139
139#define PHY_XGXS_FLAG 0x1 140#define PHY_XGXS_FLAG 0x1
140#define PHY_SGMII_FLAG 0x2 141#define PHY_SGMII_FLAG 0x2
@@ -142,7 +143,7 @@
142 143
143/* */ 144/* */
144#define SFP_EEPROM_CON_TYPE_ADDR 0x2 145#define SFP_EEPROM_CON_TYPE_ADDR 0x2
145 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 146 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
146 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21 147 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
147 148
148 149
@@ -153,15 +154,15 @@
153 154
154#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8 155#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8
155 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4 156 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
156 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8 157 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8
157 158
158#define SFP_EEPROM_OPTIONS_ADDR 0x40 159#define SFP_EEPROM_OPTIONS_ADDR 0x40
159 #define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1 160 #define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1
160#define SFP_EEPROM_OPTIONS_SIZE 2 161#define SFP_EEPROM_OPTIONS_SIZE 2
161 162
162#define EDC_MODE_LINEAR 0x0022 163#define EDC_MODE_LINEAR 0x0022
163#define EDC_MODE_LIMITING 0x0044 164#define EDC_MODE_LIMITING 0x0044
164#define EDC_MODE_PASSIVE_DAC 0x0055 165#define EDC_MODE_PASSIVE_DAC 0x0055
165 166
166 167
167#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000) 168#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000)
@@ -170,24 +171,18 @@
170/* INTERFACE */ 171/* INTERFACE */
171/**********************************************************/ 172/**********************************************************/
172 173
173#define CL45_WR_OVER_CL22(_bp, _phy, _bank, _addr, _val) \ 174#define CL22_WR_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
174 bnx2x_cl45_write(_bp, _phy, \ 175 bnx2x_cl45_write(_bp, _phy, \
175 (_phy)->def_md_devad, \ 176 (_phy)->def_md_devad, \
176 (_bank + (_addr & 0xf)), \ 177 (_bank + (_addr & 0xf)), \
177 _val) 178 _val)
178 179
179#define CL45_RD_OVER_CL22(_bp, _phy, _bank, _addr, _val) \ 180#define CL22_RD_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
180 bnx2x_cl45_read(_bp, _phy, \ 181 bnx2x_cl45_read(_bp, _phy, \
181 (_phy)->def_md_devad, \ 182 (_phy)->def_md_devad, \
182 (_bank + (_addr & 0xf)), \ 183 (_bank + (_addr & 0xf)), \
183 _val) 184 _val)
184 185
185static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
186 u8 devad, u16 reg, u16 *ret_val);
187
188static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
189 u8 devad, u16 reg, u16 val);
190
191static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits) 186static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
192{ 187{
193 u32 val = REG_RD(bp, reg); 188 u32 val = REG_RD(bp, reg);
@@ -216,7 +211,7 @@ void bnx2x_ets_disabled(struct link_params *params)
216 211
217 DP(NETIF_MSG_LINK, "ETS disabled configuration\n"); 212 DP(NETIF_MSG_LINK, "ETS disabled configuration\n");
218 213
219 /** 214 /*
220 * mapping between entry priority to client number (0,1,2 -debug and 215 * mapping between entry priority to client number (0,1,2 -debug and
221 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) 216 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
222 * 3bits client num. 217 * 3bits client num.
@@ -225,7 +220,7 @@ void bnx2x_ets_disabled(struct link_params *params)
225 */ 220 */
226 221
227 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688); 222 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
228 /** 223 /*
229 * Bitmap of 5bits length. Each bit specifies whether the entry behaves 224 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
230 * as strict. Bits 0,1,2 - debug and management entries, 3 - 225 * as strict. Bits 0,1,2 - debug and management entries, 3 -
231 * COS0 entry, 4 - COS1 entry. 226 * COS0 entry, 4 - COS1 entry.
@@ -237,12 +232,12 @@ void bnx2x_ets_disabled(struct link_params *params)
237 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); 232 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
238 /* defines which entries (clients) are subjected to WFQ arbitration */ 233 /* defines which entries (clients) are subjected to WFQ arbitration */
239 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0); 234 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
240 /** 235 /*
241 * For strict priority entries defines the number of consecutive 236 * For strict priority entries defines the number of consecutive
242 * slots for the highest priority. 237 * slots for the highest priority.
243 */ 238 */
244 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); 239 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
245 /** 240 /*
246 * mapping between the CREDIT_WEIGHT registers and actual client 241 * mapping between the CREDIT_WEIGHT registers and actual client
247 * numbers 242 * numbers
248 */ 243 */
@@ -255,7 +250,7 @@ void bnx2x_ets_disabled(struct link_params *params)
255 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0); 250 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
256 /* ETS mode disable */ 251 /* ETS mode disable */
257 REG_WR(bp, PBF_REG_ETS_ENABLED, 0); 252 REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
258 /** 253 /*
259 * If ETS mode is enabled (there is no strict priority) defines a WFQ 254 * If ETS mode is enabled (there is no strict priority) defines a WFQ
260 * weight for COS0/COS1. 255 * weight for COS0/COS1.
261 */ 256 */
@@ -268,24 +263,24 @@ void bnx2x_ets_disabled(struct link_params *params)
268 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); 263 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
269} 264}
270 265
271void bnx2x_ets_bw_limit_common(const struct link_params *params) 266static void bnx2x_ets_bw_limit_common(const struct link_params *params)
272{ 267{
273 /* ETS disabled configuration */ 268 /* ETS disabled configuration */
274 struct bnx2x *bp = params->bp; 269 struct bnx2x *bp = params->bp;
275 DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n"); 270 DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
276 /** 271 /*
277 * defines which entries (clients) are subjected to WFQ arbitration 272 * defines which entries (clients) are subjected to WFQ arbitration
278 * COS0 0x8 273 * COS0 0x8
279 * COS1 0x10 274 * COS1 0x10
280 */ 275 */
281 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18); 276 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
282 /** 277 /*
283 * mapping between the ARB_CREDIT_WEIGHT registers and actual 278 * mapping between the ARB_CREDIT_WEIGHT registers and actual
284 * client numbers (WEIGHT_0 does not actually have to represent 279 * client numbers (WEIGHT_0 does not actually have to represent
285 * client 0) 280 * client 0)
286 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 281 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
287 * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010 282 * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010
288 */ 283 */
289 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A); 284 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A);
290 285
291 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, 286 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0,
@@ -298,14 +293,14 @@ void bnx2x_ets_bw_limit_common(const struct link_params *params)
298 293
299 /* Defines the number of consecutive slots for the strict priority */ 294 /* Defines the number of consecutive slots for the strict priority */
300 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); 295 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
301 /** 296 /*
302 * Bitmap of 5bits length. Each bit specifies whether the entry behaves 297 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
303 * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0 298 * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0
304 * entry, 4 - COS1 entry. 299 * entry, 4 - COS1 entry.
305 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT 300 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
306 * bit4 bit3 bit2 bit1 bit0 301 * bit4 bit3 bit2 bit1 bit0
307 * MCP and debug are strict 302 * MCP and debug are strict
308 */ 303 */
309 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); 304 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
310 305
311 /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/ 306 /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/
@@ -329,8 +324,7 @@ void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
329 if ((0 == total_bw) || 324 if ((0 == total_bw) ||
330 (0 == cos0_bw) || 325 (0 == cos0_bw) ||
331 (0 == cos1_bw)) { 326 (0 == cos1_bw)) {
332 DP(NETIF_MSG_LINK, 327 DP(NETIF_MSG_LINK, "Total BW can't be zero\n");
333 "bnx2x_ets_bw_limit: Total BW can't be zero\n");
334 return; 328 return;
335 } 329 }
336 330
@@ -355,7 +349,7 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
355 u32 val = 0; 349 u32 val = 0;
356 350
357 DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n"); 351 DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
358 /** 352 /*
359 * Bitmap of 5bits length. Each bit specifies whether the entry behaves 353 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
360 * as strict. Bits 0,1,2 - debug and management entries, 354 * as strict. Bits 0,1,2 - debug and management entries,
361 * 3 - COS0 entry, 4 - COS1 entry. 355 * 3 - COS0 entry, 4 - COS1 entry.
@@ -364,7 +358,7 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
364 * MCP and debug are strict 358 * MCP and debug are strict
365 */ 359 */
366 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F); 360 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
367 /** 361 /*
368 * For strict priority entries defines the number of consecutive slots 362 * For strict priority entries defines the number of consecutive slots
369 * for the highest priority. 363 * for the highest priority.
370 */ 364 */
@@ -377,14 +371,14 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
377 /* Defines the number of consecutive slots for the strict priority */ 371 /* Defines the number of consecutive slots for the strict priority */
378 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos); 372 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
379 373
380 /** 374 /*
381 * mapping between entry priority to client number (0,1,2 -debug and 375 * mapping between entry priority to client number (0,1,2 -debug and
382 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) 376 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
383 * 3bits client num. 377 * 3bits client num.
384 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 378 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
385 * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000 379 * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000
386 * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000 380 * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000
387 */ 381 */
388 val = (0 == strict_cos) ? 0x2318 : 0x22E0; 382 val = (0 == strict_cos) ? 0x2318 : 0x22E0;
389 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val); 383 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
390 384
@@ -471,7 +465,7 @@ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
471/* MAC/PBF section */ 465/* MAC/PBF section */
472/******************************************************************/ 466/******************************************************************/
473static void bnx2x_emac_init(struct link_params *params, 467static void bnx2x_emac_init(struct link_params *params,
474 struct link_vars *vars) 468 struct link_vars *vars)
475{ 469{
476 /* reset and unreset the emac core */ 470 /* reset and unreset the emac core */
477 struct bnx2x *bp = params->bp; 471 struct bnx2x *bp = params->bp;
@@ -481,10 +475,10 @@ static void bnx2x_emac_init(struct link_params *params,
481 u16 timeout; 475 u16 timeout;
482 476
483 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 477 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
484 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); 478 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
485 udelay(5); 479 udelay(5);
486 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 480 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
487 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); 481 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
488 482
489 /* init emac - use read-modify-write */ 483 /* init emac - use read-modify-write */
490 /* self clear reset */ 484 /* self clear reset */
@@ -515,7 +509,7 @@ static void bnx2x_emac_init(struct link_params *params,
515} 509}
516 510
517static u8 bnx2x_emac_enable(struct link_params *params, 511static u8 bnx2x_emac_enable(struct link_params *params,
518 struct link_vars *vars, u8 lb) 512 struct link_vars *vars, u8 lb)
519{ 513{
520 struct bnx2x *bp = params->bp; 514 struct bnx2x *bp = params->bp;
521 u8 port = params->port; 515 u8 port = params->port;
@@ -527,55 +521,33 @@ static u8 bnx2x_emac_enable(struct link_params *params,
527 /* enable emac and not bmac */ 521 /* enable emac and not bmac */
528 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1); 522 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
529 523
530 /* for paladium */
531 if (CHIP_REV_IS_EMUL(bp)) {
532 /* Use lane 1 (of lanes 0-3) */
533 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
534 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
535 port*4, 1);
536 }
537 /* for fpga */
538 else
539
540 if (CHIP_REV_IS_FPGA(bp)) {
541 /* Use lane 1 (of lanes 0-3) */
542 DP(NETIF_MSG_LINK, "bnx2x_emac_enable: Setting FPGA\n");
543
544 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
545 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4,
546 0);
547 } else
548 /* ASIC */ 524 /* ASIC */
549 if (vars->phy_flags & PHY_XGXS_FLAG) { 525 if (vars->phy_flags & PHY_XGXS_FLAG) {
550 u32 ser_lane = ((params->lane_config & 526 u32 ser_lane = ((params->lane_config &
551 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> 527 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
552 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); 528 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
553 529
554 DP(NETIF_MSG_LINK, "XGXS\n"); 530 DP(NETIF_MSG_LINK, "XGXS\n");
555 /* select the master lanes (out of 0-3) */ 531 /* select the master lanes (out of 0-3) */
556 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + 532 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, ser_lane);
557 port*4, ser_lane);
558 /* select XGXS */ 533 /* select XGXS */
559 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + 534 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
560 port*4, 1);
561 535
562 } else { /* SerDes */ 536 } else { /* SerDes */
563 DP(NETIF_MSG_LINK, "SerDes\n"); 537 DP(NETIF_MSG_LINK, "SerDes\n");
564 /* select SerDes */ 538 /* select SerDes */
565 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + 539 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
566 port*4, 0);
567 } 540 }
568 541
569 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE, 542 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
570 EMAC_RX_MODE_RESET); 543 EMAC_RX_MODE_RESET);
571 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, 544 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
572 EMAC_TX_MODE_RESET); 545 EMAC_TX_MODE_RESET);
573 546
574 if (CHIP_REV_IS_SLOW(bp)) { 547 if (CHIP_REV_IS_SLOW(bp)) {
575 /* config GMII mode */ 548 /* config GMII mode */
576 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); 549 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
577 EMAC_WR(bp, EMAC_REG_EMAC_MODE, 550 EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
578 (val | EMAC_MODE_PORT_GMII));
579 } else { /* ASIC */ 551 } else { /* ASIC */
580 /* pause enable/disable */ 552 /* pause enable/disable */
581 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE, 553 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
@@ -605,14 +577,14 @@ static u8 bnx2x_emac_enable(struct link_params *params,
605 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); 577 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
606 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS; 578 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
607 579
608 /** 580 /*
609 * Setting this bit causes MAC control frames (except for pause 581 * Setting this bit causes MAC control frames (except for pause
610 * frames) to be passed on for processing. This setting has no 582 * frames) to be passed on for processing. This setting has no
611 * affect on the operation of the pause frames. This bit effects 583 * affect on the operation of the pause frames. This bit effects
612 * all packets regardless of RX Parser packet sorting logic. 584 * all packets regardless of RX Parser packet sorting logic.
613 * Turn the PFC off to make sure we are in Xon state before 585 * Turn the PFC off to make sure we are in Xon state before
614 * enabling it. 586 * enabling it.
615 */ 587 */
616 EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0); 588 EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0);
617 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) { 589 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
618 DP(NETIF_MSG_LINK, "PFC is enabled\n"); 590 DP(NETIF_MSG_LINK, "PFC is enabled\n");
@@ -666,16 +638,7 @@ static u8 bnx2x_emac_enable(struct link_params *params,
666 REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val); 638 REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
667 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1); 639 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
668 640
669 if (CHIP_REV_IS_EMUL(bp)) { 641 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
670 /* take the BigMac out of reset */
671 REG_WR(bp,
672 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
673 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
674
675 /* enable access for bmac registers */
676 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
677 } else
678 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
679 642
680 vars->mac_type = MAC_TYPE_EMAC; 643 vars->mac_type = MAC_TYPE_EMAC;
681 return 0; 644 return 0;
@@ -731,8 +694,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
731 val |= (1<<5); 694 val |= (1<<5);
732 wb_data[0] = val; 695 wb_data[0] = val;
733 wb_data[1] = 0; 696 wb_data[1] = 0;
734 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, 697 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2);
735 wb_data, 2);
736 udelay(30); 698 udelay(30);
737 699
738 /* Tx control */ 700 /* Tx control */
@@ -768,12 +730,12 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
768 730
769 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2); 731 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
770 732
771 /** 733 /*
772 * Set Time (based unit is 512 bit time) between automatic 734 * Set Time (based unit is 512 bit time) between automatic
773 * re-sending of PP packets amd enable automatic re-send of 735 * re-sending of PP packets amd enable automatic re-send of
774 * Per-Priroity Packet as long as pp_gen is asserted and 736 * Per-Priroity Packet as long as pp_gen is asserted and
775 * pp_disable is low. 737 * pp_disable is low.
776 */ 738 */
777 val = 0x8000; 739 val = 0x8000;
778 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) 740 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
779 val |= (1<<16); /* enable automatic re-send */ 741 val |= (1<<16); /* enable automatic re-send */
@@ -781,7 +743,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
781 wb_data[0] = val; 743 wb_data[0] = val;
782 wb_data[1] = 0; 744 wb_data[1] = 0;
783 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL, 745 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL,
784 wb_data, 2); 746 wb_data, 2);
785 747
786 /* mac control */ 748 /* mac control */
787 val = 0x3; /* Enable RX and TX */ 749 val = 0x3; /* Enable RX and TX */
@@ -795,8 +757,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
795 757
796 wb_data[0] = val; 758 wb_data[0] = val;
797 wb_data[1] = 0; 759 wb_data[1] = 0;
798 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, 760 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
799 wb_data, 2);
800} 761}
801 762
802static void bnx2x_update_pfc_brb(struct link_params *params, 763static void bnx2x_update_pfc_brb(struct link_params *params,
@@ -825,17 +786,25 @@ static void bnx2x_update_pfc_brb(struct link_params *params,
825 full_xon_th = 786 full_xon_th =
826 PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE; 787 PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
827 } 788 }
828 /* The number of free blocks below which the pause signal to class 0 789 /*
829 of MAC #n is asserted. n=0,1 */ 790 * The number of free blocks below which the pause signal to class 0
791 * of MAC #n is asserted. n=0,1
792 */
830 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , pause_xoff_th); 793 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , pause_xoff_th);
831 /* The number of free blocks above which the pause signal to class 0 794 /*
832 of MAC #n is de-asserted. n=0,1 */ 795 * The number of free blocks above which the pause signal to class 0
796 * of MAC #n is de-asserted. n=0,1
797 */
833 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , pause_xon_th); 798 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , pause_xon_th);
834 /* The number of free blocks below which the full signal to class 0 799 /*
835 of MAC #n is asserted. n=0,1 */ 800 * The number of free blocks below which the full signal to class 0
801 * of MAC #n is asserted. n=0,1
802 */
836 REG_WR(bp, BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , full_xoff_th); 803 REG_WR(bp, BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , full_xoff_th);
837 /* The number of free blocks above which the full signal to class 0 804 /*
838 of MAC #n is de-asserted. n=0,1 */ 805 * The number of free blocks above which the full signal to class 0
806 * of MAC #n is de-asserted. n=0,1
807 */
839 REG_WR(bp, BRB1_REG_FULL_0_XON_THRESHOLD_0 , full_xon_th); 808 REG_WR(bp, BRB1_REG_FULL_0_XON_THRESHOLD_0 , full_xon_th);
840 809
841 if (set_pfc && pfc_params) { 810 if (set_pfc && pfc_params) {
@@ -859,25 +828,25 @@ static void bnx2x_update_pfc_brb(struct link_params *params,
859 full_xon_th = 828 full_xon_th =
860 PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE; 829 PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
861 } 830 }
862 /** 831 /*
863 * The number of free blocks below which the pause signal to 832 * The number of free blocks below which the pause signal to
864 * class 1 of MAC #n is asserted. n=0,1 833 * class 1 of MAC #n is asserted. n=0,1
865 **/ 834 */
866 REG_WR(bp, BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, pause_xoff_th); 835 REG_WR(bp, BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, pause_xoff_th);
867 /** 836 /*
868 * The number of free blocks above which the pause signal to 837 * The number of free blocks above which the pause signal to
869 * class 1 of MAC #n is de-asserted. n=0,1 838 * class 1 of MAC #n is de-asserted. n=0,1
870 **/ 839 */
871 REG_WR(bp, BRB1_REG_PAUSE_1_XON_THRESHOLD_0, pause_xon_th); 840 REG_WR(bp, BRB1_REG_PAUSE_1_XON_THRESHOLD_0, pause_xon_th);
872 /** 841 /*
873 * The number of free blocks below which the full signal to 842 * The number of free blocks below which the full signal to
874 * class 1 of MAC #n is asserted. n=0,1 843 * class 1 of MAC #n is asserted. n=0,1
875 **/ 844 */
876 REG_WR(bp, BRB1_REG_FULL_1_XOFF_THRESHOLD_0, full_xoff_th); 845 REG_WR(bp, BRB1_REG_FULL_1_XOFF_THRESHOLD_0, full_xoff_th);
877 /** 846 /*
878 * The number of free blocks above which the full signal to 847 * The number of free blocks above which the full signal to
879 * class 1 of MAC #n is de-asserted. n=0,1 848 * class 1 of MAC #n is de-asserted. n=0,1
880 **/ 849 */
881 REG_WR(bp, BRB1_REG_FULL_1_XON_THRESHOLD_0, full_xon_th); 850 REG_WR(bp, BRB1_REG_FULL_1_XON_THRESHOLD_0, full_xon_th);
882 } 851 }
883} 852}
@@ -896,7 +865,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
896 FEATURE_CONFIG_PFC_ENABLED; 865 FEATURE_CONFIG_PFC_ENABLED;
897 DP(NETIF_MSG_LINK, "updating pfc nig parameters\n"); 866 DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
898 867
899 /** 868 /*
900 * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set 869 * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
901 * MAC control frames (that are not pause packets) 870 * MAC control frames (that are not pause packets)
902 * will be forwarded to the XCM. 871 * will be forwarded to the XCM.
@@ -904,7 +873,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
904 xcm_mask = REG_RD(bp, 873 xcm_mask = REG_RD(bp,
905 port ? NIG_REG_LLH1_XCM_MASK : 874 port ? NIG_REG_LLH1_XCM_MASK :
906 NIG_REG_LLH0_XCM_MASK); 875 NIG_REG_LLH0_XCM_MASK);
907 /** 876 /*
908 * nig params will override non PFC params, since it's possible to 877 * nig params will override non PFC params, since it's possible to
909 * do transition from PFC to SAFC 878 * do transition from PFC to SAFC
910 */ 879 */
@@ -994,7 +963,7 @@ void bnx2x_update_pfc(struct link_params *params,
994 struct link_vars *vars, 963 struct link_vars *vars,
995 struct bnx2x_nig_brb_pfc_port_params *pfc_params) 964 struct bnx2x_nig_brb_pfc_port_params *pfc_params)
996{ 965{
997 /** 966 /*
998 * The PFC and pause are orthogonal to one another, meaning when 967 * The PFC and pause are orthogonal to one another, meaning when
999 * PFC is enabled, the pause are disabled, and when PFC is 968 * PFC is enabled, the pause are disabled, and when PFC is
1000 * disabled, pause are set according to the pause result. 969 * disabled, pause are set according to the pause result.
@@ -1035,7 +1004,7 @@ void bnx2x_update_pfc(struct link_params *params,
1035 1004
1036static u8 bnx2x_bmac1_enable(struct link_params *params, 1005static u8 bnx2x_bmac1_enable(struct link_params *params,
1037 struct link_vars *vars, 1006 struct link_vars *vars,
1038 u8 is_lb) 1007 u8 is_lb)
1039{ 1008{
1040 struct bnx2x *bp = params->bp; 1009 struct bnx2x *bp = params->bp;
1041 u8 port = params->port; 1010 u8 port = params->port;
@@ -1049,9 +1018,8 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
1049 /* XGXS control */ 1018 /* XGXS control */
1050 wb_data[0] = 0x3c; 1019 wb_data[0] = 0x3c;
1051 wb_data[1] = 0; 1020 wb_data[1] = 0;
1052 REG_WR_DMAE(bp, bmac_addr + 1021 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
1053 BIGMAC_REGISTER_BMAC_XGXS_CONTROL, 1022 wb_data, 2);
1054 wb_data, 2);
1055 1023
1056 /* tx MAC SA */ 1024 /* tx MAC SA */
1057 wb_data[0] = ((params->mac_addr[2] << 24) | 1025 wb_data[0] = ((params->mac_addr[2] << 24) |
@@ -1060,8 +1028,7 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
1060 params->mac_addr[5]); 1028 params->mac_addr[5]);
1061 wb_data[1] = ((params->mac_addr[0] << 8) | 1029 wb_data[1] = ((params->mac_addr[0] << 8) |
1062 params->mac_addr[1]); 1030 params->mac_addr[1]);
1063 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, 1031 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
1064 wb_data, 2);
1065 1032
1066 /* mac control */ 1033 /* mac control */
1067 val = 0x3; 1034 val = 0x3;
@@ -1071,43 +1038,30 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
1071 } 1038 }
1072 wb_data[0] = val; 1039 wb_data[0] = val;
1073 wb_data[1] = 0; 1040 wb_data[1] = 0;
1074 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, 1041 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
1075 wb_data, 2);
1076 1042
1077 /* set rx mtu */ 1043 /* set rx mtu */
1078 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 1044 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
1079 wb_data[1] = 0; 1045 wb_data[1] = 0;
1080 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, 1046 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
1081 wb_data, 2);
1082 1047
1083 bnx2x_update_pfc_bmac1(params, vars); 1048 bnx2x_update_pfc_bmac1(params, vars);
1084 1049
1085 /* set tx mtu */ 1050 /* set tx mtu */
1086 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 1051 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
1087 wb_data[1] = 0; 1052 wb_data[1] = 0;
1088 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, 1053 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
1089 wb_data, 2);
1090 1054
1091 /* set cnt max size */ 1055 /* set cnt max size */
1092 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 1056 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
1093 wb_data[1] = 0; 1057 wb_data[1] = 0;
1094 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, 1058 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
1095 wb_data, 2);
1096 1059
1097 /* configure safc */ 1060 /* configure safc */
1098 wb_data[0] = 0x1000200; 1061 wb_data[0] = 0x1000200;
1099 wb_data[1] = 0; 1062 wb_data[1] = 0;
1100 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS, 1063 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
1101 wb_data, 2); 1064 wb_data, 2);
1102 /* fix for emulation */
1103 if (CHIP_REV_IS_EMUL(bp)) {
1104 wb_data[0] = 0xf000;
1105 wb_data[1] = 0;
1106 REG_WR_DMAE(bp,
1107 bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
1108 wb_data, 2);
1109 }
1110
1111 1065
1112 return 0; 1066 return 0;
1113} 1067}
@@ -1126,16 +1080,14 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
1126 1080
1127 wb_data[0] = 0; 1081 wb_data[0] = 0;
1128 wb_data[1] = 0; 1082 wb_data[1] = 0;
1129 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, 1083 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
1130 wb_data, 2);
1131 udelay(30); 1084 udelay(30);
1132 1085
1133 /* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */ 1086 /* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */
1134 wb_data[0] = 0x3c; 1087 wb_data[0] = 0x3c;
1135 wb_data[1] = 0; 1088 wb_data[1] = 0;
1136 REG_WR_DMAE(bp, bmac_addr + 1089 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
1137 BIGMAC2_REGISTER_BMAC_XGXS_CONTROL, 1090 wb_data, 2);
1138 wb_data, 2);
1139 1091
1140 udelay(30); 1092 udelay(30);
1141 1093
@@ -1147,7 +1099,7 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
1147 wb_data[1] = ((params->mac_addr[0] << 8) | 1099 wb_data[1] = ((params->mac_addr[0] << 8) |
1148 params->mac_addr[1]); 1100 params->mac_addr[1]);
1149 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR, 1101 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR,
1150 wb_data, 2); 1102 wb_data, 2);
1151 1103
1152 udelay(30); 1104 udelay(30);
1153 1105
@@ -1155,27 +1107,24 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
1155 wb_data[0] = 0x1000200; 1107 wb_data[0] = 0x1000200;
1156 wb_data[1] = 0; 1108 wb_data[1] = 0;
1157 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS, 1109 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS,
1158 wb_data, 2); 1110 wb_data, 2);
1159 udelay(30); 1111 udelay(30);
1160 1112
1161 /* set rx mtu */ 1113 /* set rx mtu */
1162 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 1114 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
1163 wb_data[1] = 0; 1115 wb_data[1] = 0;
1164 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, 1116 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
1165 wb_data, 2);
1166 udelay(30); 1117 udelay(30);
1167 1118
1168 /* set tx mtu */ 1119 /* set tx mtu */
1169 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 1120 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
1170 wb_data[1] = 0; 1121 wb_data[1] = 0;
1171 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, 1122 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
1172 wb_data, 2);
1173 udelay(30); 1123 udelay(30);
1174 /* set cnt max size */ 1124 /* set cnt max size */
1175 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2; 1125 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
1176 wb_data[1] = 0; 1126 wb_data[1] = 0;
1177 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, 1127 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
1178 wb_data, 2);
1179 udelay(30); 1128 udelay(30);
1180 bnx2x_update_pfc_bmac2(params, vars, is_lb); 1129 bnx2x_update_pfc_bmac2(params, vars, is_lb);
1181 1130
@@ -1191,11 +1140,11 @@ static u8 bnx2x_bmac_enable(struct link_params *params,
1191 u32 val; 1140 u32 val;
1192 /* reset and unreset the BigMac */ 1141 /* reset and unreset the BigMac */
1193 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 1142 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
1194 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 1143 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
1195 msleep(1); 1144 msleep(1);
1196 1145
1197 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 1146 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
1198 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 1147 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
1199 1148
1200 /* enable access for bmac registers */ 1149 /* enable access for bmac registers */
1201 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1); 1150 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
@@ -1230,15 +1179,14 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status)
1230 struct bnx2x *bp = params->bp; 1179 struct bnx2x *bp = params->bp;
1231 1180
1232 REG_WR(bp, params->shmem_base + 1181 REG_WR(bp, params->shmem_base +
1233 offsetof(struct shmem_region, 1182 offsetof(struct shmem_region,
1234 port_mb[params->port].link_status), 1183 port_mb[params->port].link_status), link_status);
1235 link_status);
1236} 1184}
1237 1185
1238static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port) 1186static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
1239{ 1187{
1240 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : 1188 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
1241 NIG_REG_INGRESS_BMAC0_MEM; 1189 NIG_REG_INGRESS_BMAC0_MEM;
1242 u32 wb_data[2]; 1190 u32 wb_data[2];
1243 u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4); 1191 u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
1244 1192
@@ -1250,12 +1198,12 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
1250 if (CHIP_IS_E2(bp)) { 1198 if (CHIP_IS_E2(bp)) {
1251 /* Clear Rx Enable bit in BMAC_CONTROL register */ 1199 /* Clear Rx Enable bit in BMAC_CONTROL register */
1252 REG_RD_DMAE(bp, bmac_addr + 1200 REG_RD_DMAE(bp, bmac_addr +
1253 BIGMAC2_REGISTER_BMAC_CONTROL, 1201 BIGMAC2_REGISTER_BMAC_CONTROL,
1254 wb_data, 2); 1202 wb_data, 2);
1255 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; 1203 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
1256 REG_WR_DMAE(bp, bmac_addr + 1204 REG_WR_DMAE(bp, bmac_addr +
1257 BIGMAC2_REGISTER_BMAC_CONTROL, 1205 BIGMAC2_REGISTER_BMAC_CONTROL,
1258 wb_data, 2); 1206 wb_data, 2);
1259 } else { 1207 } else {
1260 /* Clear Rx Enable bit in BMAC_CONTROL register */ 1208 /* Clear Rx Enable bit in BMAC_CONTROL register */
1261 REG_RD_DMAE(bp, bmac_addr + 1209 REG_RD_DMAE(bp, bmac_addr +
@@ -1271,7 +1219,7 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
1271} 1219}
1272 1220
1273static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, 1221static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
1274 u32 line_speed) 1222 u32 line_speed)
1275{ 1223{
1276 struct bnx2x *bp = params->bp; 1224 struct bnx2x *bp = params->bp;
1277 u8 port = params->port; 1225 u8 port = params->port;
@@ -1308,7 +1256,7 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
1308 /* update threshold */ 1256 /* update threshold */
1309 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0); 1257 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
1310 /* update init credit */ 1258 /* update init credit */
1311 init_crd = 778; /* (800-18-4) */ 1259 init_crd = 778; /* (800-18-4) */
1312 1260
1313 } else { 1261 } else {
1314 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + 1262 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
@@ -1353,6 +1301,23 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
1353 return 0; 1301 return 0;
1354} 1302}
1355 1303
1304/*
1305 * get_emac_base
1306 *
1307 * @param cb
1308 * @param mdc_mdio_access
1309 * @param port
1310 *
1311 * @return u32
1312 *
1313 * This function selects the MDC/MDIO access (through emac0 or
1314 * emac1) depend on the mdc_mdio_access, port, port swapped. Each
1315 * phy has a default access mode, which could also be overridden
1316 * by nvram configuration. This parameter, whether this is the
1317 * default phy configuration, or the nvram overrun
1318 * configuration, is passed here as mdc_mdio_access and selects
1319 * the emac_base for the CL45 read/writes operations
1320 */
1356static u32 bnx2x_get_emac_base(struct bnx2x *bp, 1321static u32 bnx2x_get_emac_base(struct bnx2x *bp,
1357 u32 mdc_mdio_access, u8 port) 1322 u32 mdc_mdio_access, u8 port)
1358{ 1323{
@@ -1385,13 +1350,16 @@ static u32 bnx2x_get_emac_base(struct bnx2x *bp,
1385 1350
1386} 1351}
1387 1352
1388u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, 1353/******************************************************************/
1389 u8 devad, u16 reg, u16 val) 1354/* CL45 access functions */
1355/******************************************************************/
1356static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
1357 u8 devad, u16 reg, u16 val)
1390{ 1358{
1391 u32 tmp, saved_mode; 1359 u32 tmp, saved_mode;
1392 u8 i, rc = 0; 1360 u8 i, rc = 0;
1393 1361 /*
1394 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz 1362 * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
1395 * (a value of 49==0x31) and make sure that the AUTO poll is off 1363 * (a value of 49==0x31) and make sure that the AUTO poll is off
1396 */ 1364 */
1397 1365
@@ -1414,8 +1382,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
1414 for (i = 0; i < 50; i++) { 1382 for (i = 0; i < 50; i++) {
1415 udelay(10); 1383 udelay(10);
1416 1384
1417 tmp = REG_RD(bp, phy->mdio_ctrl + 1385 tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1418 EMAC_REG_EMAC_MDIO_COMM);
1419 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { 1386 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1420 udelay(5); 1387 udelay(5);
1421 break; 1388 break;
@@ -1423,6 +1390,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
1423 } 1390 }
1424 if (tmp & EMAC_MDIO_COMM_START_BUSY) { 1391 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1425 DP(NETIF_MSG_LINK, "write phy register failed\n"); 1392 DP(NETIF_MSG_LINK, "write phy register failed\n");
1393 netdev_err(bp->dev, "MDC/MDIO access timeout\n");
1426 rc = -EFAULT; 1394 rc = -EFAULT;
1427 } else { 1395 } else {
1428 /* data */ 1396 /* data */
@@ -1435,7 +1403,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
1435 udelay(10); 1403 udelay(10);
1436 1404
1437 tmp = REG_RD(bp, phy->mdio_ctrl + 1405 tmp = REG_RD(bp, phy->mdio_ctrl +
1438 EMAC_REG_EMAC_MDIO_COMM); 1406 EMAC_REG_EMAC_MDIO_COMM);
1439 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { 1407 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1440 udelay(5); 1408 udelay(5);
1441 break; 1409 break;
@@ -1443,6 +1411,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
1443 } 1411 }
1444 if (tmp & EMAC_MDIO_COMM_START_BUSY) { 1412 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1445 DP(NETIF_MSG_LINK, "write phy register failed\n"); 1413 DP(NETIF_MSG_LINK, "write phy register failed\n");
1414 netdev_err(bp->dev, "MDC/MDIO access timeout\n");
1446 rc = -EFAULT; 1415 rc = -EFAULT;
1447 } 1416 }
1448 } 1417 }
@@ -1453,20 +1422,20 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
1453 return rc; 1422 return rc;
1454} 1423}
1455 1424
1456u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, 1425static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
1457 u8 devad, u16 reg, u16 *ret_val) 1426 u8 devad, u16 reg, u16 *ret_val)
1458{ 1427{
1459 u32 val, saved_mode; 1428 u32 val, saved_mode;
1460 u16 i; 1429 u16 i;
1461 u8 rc = 0; 1430 u8 rc = 0;
1462 1431 /*
1463 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz 1432 * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
1464 * (a value of 49==0x31) and make sure that the AUTO poll is off 1433 * (a value of 49==0x31) and make sure that the AUTO poll is off
1465 */ 1434 */
1466 1435
1467 saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); 1436 saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1468 val = saved_mode & ~((EMAC_MDIO_MODE_AUTO_POLL | 1437 val = saved_mode & ~((EMAC_MDIO_MODE_AUTO_POLL |
1469 EMAC_MDIO_MODE_CLOCK_CNT)); 1438 EMAC_MDIO_MODE_CLOCK_CNT));
1470 val |= (EMAC_MDIO_MODE_CLAUSE_45 | 1439 val |= (EMAC_MDIO_MODE_CLAUSE_45 |
1471 (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT)); 1440 (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
1472 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val); 1441 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
@@ -1490,7 +1459,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
1490 } 1459 }
1491 if (val & EMAC_MDIO_COMM_START_BUSY) { 1460 if (val & EMAC_MDIO_COMM_START_BUSY) {
1492 DP(NETIF_MSG_LINK, "read phy register failed\n"); 1461 DP(NETIF_MSG_LINK, "read phy register failed\n");
1493 1462 netdev_err(bp->dev, "MDC/MDIO access timeout\n");
1494 *ret_val = 0; 1463 *ret_val = 0;
1495 rc = -EFAULT; 1464 rc = -EFAULT;
1496 1465
@@ -1505,7 +1474,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
1505 udelay(10); 1474 udelay(10);
1506 1475
1507 val = REG_RD(bp, phy->mdio_ctrl + 1476 val = REG_RD(bp, phy->mdio_ctrl +
1508 EMAC_REG_EMAC_MDIO_COMM); 1477 EMAC_REG_EMAC_MDIO_COMM);
1509 if (!(val & EMAC_MDIO_COMM_START_BUSY)) { 1478 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1510 *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA); 1479 *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
1511 break; 1480 break;
@@ -1513,7 +1482,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
1513 } 1482 }
1514 if (val & EMAC_MDIO_COMM_START_BUSY) { 1483 if (val & EMAC_MDIO_COMM_START_BUSY) {
1515 DP(NETIF_MSG_LINK, "read phy register failed\n"); 1484 DP(NETIF_MSG_LINK, "read phy register failed\n");
1516 1485 netdev_err(bp->dev, "MDC/MDIO access timeout\n");
1517 *ret_val = 0; 1486 *ret_val = 0;
1518 rc = -EFAULT; 1487 rc = -EFAULT;
1519 } 1488 }
@@ -1529,7 +1498,7 @@ u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
1529 u8 devad, u16 reg, u16 *ret_val) 1498 u8 devad, u16 reg, u16 *ret_val)
1530{ 1499{
1531 u8 phy_index; 1500 u8 phy_index;
1532 /** 1501 /*
1533 * Probe for the phy according to the given phy_addr, and execute 1502 * Probe for the phy according to the given phy_addr, and execute
1534 * the read request on it 1503 * the read request on it
1535 */ 1504 */
@@ -1547,7 +1516,7 @@ u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
1547 u8 devad, u16 reg, u16 val) 1516 u8 devad, u16 reg, u16 val)
1548{ 1517{
1549 u8 phy_index; 1518 u8 phy_index;
1550 /** 1519 /*
1551 * Probe for the phy according to the given phy_addr, and execute 1520 * Probe for the phy according to the given phy_addr, and execute
1552 * the write request on it 1521 * the write request on it
1553 */ 1522 */
@@ -1576,16 +1545,15 @@ static void bnx2x_set_aer_mmd_xgxs(struct link_params *params,
1576 aer_val = 0x3800 + offset - 1; 1545 aer_val = 0x3800 + offset - 1;
1577 else 1546 else
1578 aer_val = 0x3800 + offset; 1547 aer_val = 0x3800 + offset;
1579 CL45_WR_OVER_CL22(bp, phy, 1548 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
1580 MDIO_REG_BANK_AER_BLOCK, 1549 MDIO_AER_BLOCK_AER_REG, aer_val);
1581 MDIO_AER_BLOCK_AER_REG, aer_val);
1582} 1550}
1583static void bnx2x_set_aer_mmd_serdes(struct bnx2x *bp, 1551static void bnx2x_set_aer_mmd_serdes(struct bnx2x *bp,
1584 struct bnx2x_phy *phy) 1552 struct bnx2x_phy *phy)
1585{ 1553{
1586 CL45_WR_OVER_CL22(bp, phy, 1554 CL22_WR_OVER_CL45(bp, phy,
1587 MDIO_REG_BANK_AER_BLOCK, 1555 MDIO_REG_BANK_AER_BLOCK,
1588 MDIO_AER_BLOCK_AER_REG, 0x3800); 1556 MDIO_AER_BLOCK_AER_REG, 0x3800);
1589} 1557}
1590 1558
1591/******************************************************************/ 1559/******************************************************************/
@@ -1621,9 +1589,8 @@ static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
1621 1589
1622 bnx2x_set_serdes_access(bp, port); 1590 bnx2x_set_serdes_access(bp, port);
1623 1591
1624 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + 1592 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + port*0x10,
1625 port*0x10, 1593 DEFAULT_PHY_DEV_ADDR);
1626 DEFAULT_PHY_DEV_ADDR);
1627} 1594}
1628 1595
1629static void bnx2x_xgxs_deassert(struct link_params *params) 1596static void bnx2x_xgxs_deassert(struct link_params *params)
@@ -1641,23 +1608,22 @@ static void bnx2x_xgxs_deassert(struct link_params *params)
1641 udelay(500); 1608 udelay(500);
1642 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); 1609 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
1643 1610
1644 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + 1611 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + port*0x18, 0);
1645 port*0x18, 0);
1646 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 1612 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
1647 params->phy[INT_PHY].def_md_devad); 1613 params->phy[INT_PHY].def_md_devad);
1648} 1614}
1649 1615
1650 1616
1651void bnx2x_link_status_update(struct link_params *params, 1617void bnx2x_link_status_update(struct link_params *params,
1652 struct link_vars *vars) 1618 struct link_vars *vars)
1653{ 1619{
1654 struct bnx2x *bp = params->bp; 1620 struct bnx2x *bp = params->bp;
1655 u8 link_10g; 1621 u8 link_10g;
1656 u8 port = params->port; 1622 u8 port = params->port;
1657 1623
1658 vars->link_status = REG_RD(bp, params->shmem_base + 1624 vars->link_status = REG_RD(bp, params->shmem_base +
1659 offsetof(struct shmem_region, 1625 offsetof(struct shmem_region,
1660 port_mb[port].link_status)); 1626 port_mb[port].link_status));
1661 1627
1662 vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP); 1628 vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
1663 1629
@@ -1667,7 +1633,7 @@ void bnx2x_link_status_update(struct link_params *params,
1667 vars->phy_link_up = 1; 1633 vars->phy_link_up = 1;
1668 vars->duplex = DUPLEX_FULL; 1634 vars->duplex = DUPLEX_FULL;
1669 switch (vars->link_status & 1635 switch (vars->link_status &
1670 LINK_STATUS_SPEED_AND_DUPLEX_MASK) { 1636 LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
1671 case LINK_10THD: 1637 case LINK_10THD:
1672 vars->duplex = DUPLEX_HALF; 1638 vars->duplex = DUPLEX_HALF;
1673 /* fall thru */ 1639 /* fall thru */
@@ -1779,20 +1745,20 @@ static void bnx2x_set_master_ln(struct link_params *params,
1779{ 1745{
1780 struct bnx2x *bp = params->bp; 1746 struct bnx2x *bp = params->bp;
1781 u16 new_master_ln, ser_lane; 1747 u16 new_master_ln, ser_lane;
1782 ser_lane = ((params->lane_config & 1748 ser_lane = ((params->lane_config &
1783 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> 1749 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
1784 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); 1750 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
1785 1751
1786 /* set the master_ln for AN */ 1752 /* set the master_ln for AN */
1787 CL45_RD_OVER_CL22(bp, phy, 1753 CL22_RD_OVER_CL45(bp, phy,
1788 MDIO_REG_BANK_XGXS_BLOCK2, 1754 MDIO_REG_BANK_XGXS_BLOCK2,
1789 MDIO_XGXS_BLOCK2_TEST_MODE_LANE, 1755 MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
1790 &new_master_ln); 1756 &new_master_ln);
1791 1757
1792 CL45_WR_OVER_CL22(bp, phy, 1758 CL22_WR_OVER_CL45(bp, phy,
1793 MDIO_REG_BANK_XGXS_BLOCK2 , 1759 MDIO_REG_BANK_XGXS_BLOCK2 ,
1794 MDIO_XGXS_BLOCK2_TEST_MODE_LANE, 1760 MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
1795 (new_master_ln | ser_lane)); 1761 (new_master_ln | ser_lane));
1796} 1762}
1797 1763
1798static u8 bnx2x_reset_unicore(struct link_params *params, 1764static u8 bnx2x_reset_unicore(struct link_params *params,
@@ -1802,17 +1768,16 @@ static u8 bnx2x_reset_unicore(struct link_params *params,
1802 struct bnx2x *bp = params->bp; 1768 struct bnx2x *bp = params->bp;
1803 u16 mii_control; 1769 u16 mii_control;
1804 u16 i; 1770 u16 i;
1805 1771 CL22_RD_OVER_CL45(bp, phy,
1806 CL45_RD_OVER_CL22(bp, phy, 1772 MDIO_REG_BANK_COMBO_IEEE0,
1807 MDIO_REG_BANK_COMBO_IEEE0, 1773 MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
1808 MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
1809 1774
1810 /* reset the unicore */ 1775 /* reset the unicore */
1811 CL45_WR_OVER_CL22(bp, phy, 1776 CL22_WR_OVER_CL45(bp, phy,
1812 MDIO_REG_BANK_COMBO_IEEE0, 1777 MDIO_REG_BANK_COMBO_IEEE0,
1813 MDIO_COMBO_IEEE0_MII_CONTROL, 1778 MDIO_COMBO_IEEE0_MII_CONTROL,
1814 (mii_control | 1779 (mii_control |
1815 MDIO_COMBO_IEEO_MII_CONTROL_RESET)); 1780 MDIO_COMBO_IEEO_MII_CONTROL_RESET));
1816 if (set_serdes) 1781 if (set_serdes)
1817 bnx2x_set_serdes_access(bp, params->port); 1782 bnx2x_set_serdes_access(bp, params->port);
1818 1783
@@ -1821,10 +1786,10 @@ static u8 bnx2x_reset_unicore(struct link_params *params,
1821 udelay(5); 1786 udelay(5);
1822 1787
1823 /* the reset erased the previous bank value */ 1788 /* the reset erased the previous bank value */
1824 CL45_RD_OVER_CL22(bp, phy, 1789 CL22_RD_OVER_CL45(bp, phy,
1825 MDIO_REG_BANK_COMBO_IEEE0, 1790 MDIO_REG_BANK_COMBO_IEEE0,
1826 MDIO_COMBO_IEEE0_MII_CONTROL, 1791 MDIO_COMBO_IEEE0_MII_CONTROL,
1827 &mii_control); 1792 &mii_control);
1828 1793
1829 if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) { 1794 if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
1830 udelay(5); 1795 udelay(5);
@@ -1832,6 +1797,9 @@ static u8 bnx2x_reset_unicore(struct link_params *params,
1832 } 1797 }
1833 } 1798 }
1834 1799
1800 netdev_err(bp->dev, "Warning: PHY was not initialized,"
1801 " Port %d\n",
1802 params->port);
1835 DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n"); 1803 DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n");
1836 return -EINVAL; 1804 return -EINVAL;
1837 1805
@@ -1841,43 +1809,45 @@ static void bnx2x_set_swap_lanes(struct link_params *params,
1841 struct bnx2x_phy *phy) 1809 struct bnx2x_phy *phy)
1842{ 1810{
1843 struct bnx2x *bp = params->bp; 1811 struct bnx2x *bp = params->bp;
1844 /* Each two bits represents a lane number: 1812 /*
1845 No swap is 0123 => 0x1b no need to enable the swap */ 1813 * Each two bits represents a lane number:
1814 * No swap is 0123 => 0x1b no need to enable the swap
1815 */
1846 u16 ser_lane, rx_lane_swap, tx_lane_swap; 1816 u16 ser_lane, rx_lane_swap, tx_lane_swap;
1847 1817
1848 ser_lane = ((params->lane_config & 1818 ser_lane = ((params->lane_config &
1849 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> 1819 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
1850 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); 1820 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
1851 rx_lane_swap = ((params->lane_config & 1821 rx_lane_swap = ((params->lane_config &
1852 PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >> 1822 PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
1853 PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT); 1823 PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
1854 tx_lane_swap = ((params->lane_config & 1824 tx_lane_swap = ((params->lane_config &
1855 PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >> 1825 PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
1856 PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT); 1826 PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
1857 1827
1858 if (rx_lane_swap != 0x1b) { 1828 if (rx_lane_swap != 0x1b) {
1859 CL45_WR_OVER_CL22(bp, phy, 1829 CL22_WR_OVER_CL45(bp, phy,
1860 MDIO_REG_BANK_XGXS_BLOCK2, 1830 MDIO_REG_BANK_XGXS_BLOCK2,
1861 MDIO_XGXS_BLOCK2_RX_LN_SWAP, 1831 MDIO_XGXS_BLOCK2_RX_LN_SWAP,
1862 (rx_lane_swap | 1832 (rx_lane_swap |
1863 MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE | 1833 MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
1864 MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE)); 1834 MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
1865 } else { 1835 } else {
1866 CL45_WR_OVER_CL22(bp, phy, 1836 CL22_WR_OVER_CL45(bp, phy,
1867 MDIO_REG_BANK_XGXS_BLOCK2, 1837 MDIO_REG_BANK_XGXS_BLOCK2,
1868 MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0); 1838 MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
1869 } 1839 }
1870 1840
1871 if (tx_lane_swap != 0x1b) { 1841 if (tx_lane_swap != 0x1b) {
1872 CL45_WR_OVER_CL22(bp, phy, 1842 CL22_WR_OVER_CL45(bp, phy,
1873 MDIO_REG_BANK_XGXS_BLOCK2, 1843 MDIO_REG_BANK_XGXS_BLOCK2,
1874 MDIO_XGXS_BLOCK2_TX_LN_SWAP, 1844 MDIO_XGXS_BLOCK2_TX_LN_SWAP,
1875 (tx_lane_swap | 1845 (tx_lane_swap |
1876 MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE)); 1846 MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
1877 } else { 1847 } else {
1878 CL45_WR_OVER_CL22(bp, phy, 1848 CL22_WR_OVER_CL45(bp, phy,
1879 MDIO_REG_BANK_XGXS_BLOCK2, 1849 MDIO_REG_BANK_XGXS_BLOCK2,
1880 MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0); 1850 MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
1881 } 1851 }
1882} 1852}
1883 1853
@@ -1886,66 +1856,66 @@ static void bnx2x_set_parallel_detection(struct bnx2x_phy *phy,
1886{ 1856{
1887 struct bnx2x *bp = params->bp; 1857 struct bnx2x *bp = params->bp;
1888 u16 control2; 1858 u16 control2;
1889 CL45_RD_OVER_CL22(bp, phy, 1859 CL22_RD_OVER_CL45(bp, phy,
1890 MDIO_REG_BANK_SERDES_DIGITAL, 1860 MDIO_REG_BANK_SERDES_DIGITAL,
1891 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, 1861 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
1892 &control2); 1862 &control2);
1893 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) 1863 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
1894 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; 1864 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
1895 else 1865 else
1896 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; 1866 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
1897 DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n", 1867 DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n",
1898 phy->speed_cap_mask, control2); 1868 phy->speed_cap_mask, control2);
1899 CL45_WR_OVER_CL22(bp, phy, 1869 CL22_WR_OVER_CL45(bp, phy,
1900 MDIO_REG_BANK_SERDES_DIGITAL, 1870 MDIO_REG_BANK_SERDES_DIGITAL,
1901 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, 1871 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
1902 control2); 1872 control2);
1903 1873
1904 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) && 1874 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
1905 (phy->speed_cap_mask & 1875 (phy->speed_cap_mask &
1906 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { 1876 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
1907 DP(NETIF_MSG_LINK, "XGXS\n"); 1877 DP(NETIF_MSG_LINK, "XGXS\n");
1908 1878
1909 CL45_WR_OVER_CL22(bp, phy, 1879 CL22_WR_OVER_CL45(bp, phy,
1910 MDIO_REG_BANK_10G_PARALLEL_DETECT, 1880 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1911 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK, 1881 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
1912 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT); 1882 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
1913 1883
1914 CL45_RD_OVER_CL22(bp, phy, 1884 CL22_RD_OVER_CL45(bp, phy,
1915 MDIO_REG_BANK_10G_PARALLEL_DETECT, 1885 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1916 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, 1886 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
1917 &control2); 1887 &control2);
1918 1888
1919 1889
1920 control2 |= 1890 control2 |=
1921 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN; 1891 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
1922 1892
1923 CL45_WR_OVER_CL22(bp, phy, 1893 CL22_WR_OVER_CL45(bp, phy,
1924 MDIO_REG_BANK_10G_PARALLEL_DETECT, 1894 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1925 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, 1895 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
1926 control2); 1896 control2);
1927 1897
1928 /* Disable parallel detection of HiG */ 1898 /* Disable parallel detection of HiG */
1929 CL45_WR_OVER_CL22(bp, phy, 1899 CL22_WR_OVER_CL45(bp, phy,
1930 MDIO_REG_BANK_XGXS_BLOCK2, 1900 MDIO_REG_BANK_XGXS_BLOCK2,
1931 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G, 1901 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
1932 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS | 1902 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
1933 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS); 1903 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
1934 } 1904 }
1935} 1905}
1936 1906
1937static void bnx2x_set_autoneg(struct bnx2x_phy *phy, 1907static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
1938 struct link_params *params, 1908 struct link_params *params,
1939 struct link_vars *vars, 1909 struct link_vars *vars,
1940 u8 enable_cl73) 1910 u8 enable_cl73)
1941{ 1911{
1942 struct bnx2x *bp = params->bp; 1912 struct bnx2x *bp = params->bp;
1943 u16 reg_val; 1913 u16 reg_val;
1944 1914
1945 /* CL37 Autoneg */ 1915 /* CL37 Autoneg */
1946 CL45_RD_OVER_CL22(bp, phy, 1916 CL22_RD_OVER_CL45(bp, phy,
1947 MDIO_REG_BANK_COMBO_IEEE0, 1917 MDIO_REG_BANK_COMBO_IEEE0,
1948 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val); 1918 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
1949 1919
1950 /* CL37 Autoneg Enabled */ 1920 /* CL37 Autoneg Enabled */
1951 if (vars->line_speed == SPEED_AUTO_NEG) 1921 if (vars->line_speed == SPEED_AUTO_NEG)
@@ -1954,15 +1924,15 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
1954 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 1924 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
1955 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN); 1925 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
1956 1926
1957 CL45_WR_OVER_CL22(bp, phy, 1927 CL22_WR_OVER_CL45(bp, phy,
1958 MDIO_REG_BANK_COMBO_IEEE0, 1928 MDIO_REG_BANK_COMBO_IEEE0,
1959 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); 1929 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
1960 1930
1961 /* Enable/Disable Autodetection */ 1931 /* Enable/Disable Autodetection */
1962 1932
1963 CL45_RD_OVER_CL22(bp, phy, 1933 CL22_RD_OVER_CL45(bp, phy,
1964 MDIO_REG_BANK_SERDES_DIGITAL, 1934 MDIO_REG_BANK_SERDES_DIGITAL,
1965 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val); 1935 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
1966 reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN | 1936 reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN |
1967 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT); 1937 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT);
1968 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE; 1938 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE;
@@ -1971,14 +1941,14 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
1971 else 1941 else
1972 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; 1942 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
1973 1943
1974 CL45_WR_OVER_CL22(bp, phy, 1944 CL22_WR_OVER_CL45(bp, phy,
1975 MDIO_REG_BANK_SERDES_DIGITAL, 1945 MDIO_REG_BANK_SERDES_DIGITAL,
1976 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val); 1946 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
1977 1947
1978 /* Enable TetonII and BAM autoneg */ 1948 /* Enable TetonII and BAM autoneg */
1979 CL45_RD_OVER_CL22(bp, phy, 1949 CL22_RD_OVER_CL45(bp, phy,
1980 MDIO_REG_BANK_BAM_NEXT_PAGE, 1950 MDIO_REG_BANK_BAM_NEXT_PAGE,
1981 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, 1951 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
1982 &reg_val); 1952 &reg_val);
1983 if (vars->line_speed == SPEED_AUTO_NEG) { 1953 if (vars->line_speed == SPEED_AUTO_NEG) {
1984 /* Enable BAM aneg Mode and TetonII aneg Mode */ 1954 /* Enable BAM aneg Mode and TetonII aneg Mode */
@@ -1989,20 +1959,20 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
1989 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE | 1959 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
1990 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN); 1960 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
1991 } 1961 }
1992 CL45_WR_OVER_CL22(bp, phy, 1962 CL22_WR_OVER_CL45(bp, phy,
1993 MDIO_REG_BANK_BAM_NEXT_PAGE, 1963 MDIO_REG_BANK_BAM_NEXT_PAGE,
1994 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, 1964 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
1995 reg_val); 1965 reg_val);
1996 1966
1997 if (enable_cl73) { 1967 if (enable_cl73) {
1998 /* Enable Cl73 FSM status bits */ 1968 /* Enable Cl73 FSM status bits */
1999 CL45_WR_OVER_CL22(bp, phy, 1969 CL22_WR_OVER_CL45(bp, phy,
2000 MDIO_REG_BANK_CL73_USERB0, 1970 MDIO_REG_BANK_CL73_USERB0,
2001 MDIO_CL73_USERB0_CL73_UCTRL, 1971 MDIO_CL73_USERB0_CL73_UCTRL,
2002 0xe); 1972 0xe);
2003 1973
2004 /* Enable BAM Station Manager*/ 1974 /* Enable BAM Station Manager*/
2005 CL45_WR_OVER_CL22(bp, phy, 1975 CL22_WR_OVER_CL45(bp, phy,
2006 MDIO_REG_BANK_CL73_USERB0, 1976 MDIO_REG_BANK_CL73_USERB0,
2007 MDIO_CL73_USERB0_CL73_BAM_CTRL1, 1977 MDIO_CL73_USERB0_CL73_BAM_CTRL1,
2008 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN | 1978 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
@@ -2010,10 +1980,10 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
2010 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN); 1980 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
2011 1981
2012 /* Advertise CL73 link speeds */ 1982 /* Advertise CL73 link speeds */
2013 CL45_RD_OVER_CL22(bp, phy, 1983 CL22_RD_OVER_CL45(bp, phy,
2014 MDIO_REG_BANK_CL73_IEEEB1, 1984 MDIO_REG_BANK_CL73_IEEEB1,
2015 MDIO_CL73_IEEEB1_AN_ADV2, 1985 MDIO_CL73_IEEEB1_AN_ADV2,
2016 &reg_val); 1986 &reg_val);
2017 if (phy->speed_cap_mask & 1987 if (phy->speed_cap_mask &
2018 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 1988 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2019 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4; 1989 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
@@ -2021,10 +1991,10 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
2021 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) 1991 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
2022 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX; 1992 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
2023 1993
2024 CL45_WR_OVER_CL22(bp, phy, 1994 CL22_WR_OVER_CL45(bp, phy,
2025 MDIO_REG_BANK_CL73_IEEEB1, 1995 MDIO_REG_BANK_CL73_IEEEB1,
2026 MDIO_CL73_IEEEB1_AN_ADV2, 1996 MDIO_CL73_IEEEB1_AN_ADV2,
2027 reg_val); 1997 reg_val);
2028 1998
2029 /* CL73 Autoneg Enabled */ 1999 /* CL73 Autoneg Enabled */
2030 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN; 2000 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
@@ -2032,37 +2002,39 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
2032 } else /* CL73 Autoneg Disabled */ 2002 } else /* CL73 Autoneg Disabled */
2033 reg_val = 0; 2003 reg_val = 0;
2034 2004
2035 CL45_WR_OVER_CL22(bp, phy, 2005 CL22_WR_OVER_CL45(bp, phy,
2036 MDIO_REG_BANK_CL73_IEEEB0, 2006 MDIO_REG_BANK_CL73_IEEEB0,
2037 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val); 2007 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
2038} 2008}
2039 2009
2040/* program SerDes, forced speed */ 2010/* program SerDes, forced speed */
2041static void bnx2x_program_serdes(struct bnx2x_phy *phy, 2011static void bnx2x_program_serdes(struct bnx2x_phy *phy,
2042 struct link_params *params, 2012 struct link_params *params,
2043 struct link_vars *vars) 2013 struct link_vars *vars)
2044{ 2014{
2045 struct bnx2x *bp = params->bp; 2015 struct bnx2x *bp = params->bp;
2046 u16 reg_val; 2016 u16 reg_val;
2047 2017
2048 /* program duplex, disable autoneg and sgmii*/ 2018 /* program duplex, disable autoneg and sgmii*/
2049 CL45_RD_OVER_CL22(bp, phy, 2019 CL22_RD_OVER_CL45(bp, phy,
2050 MDIO_REG_BANK_COMBO_IEEE0, 2020 MDIO_REG_BANK_COMBO_IEEE0,
2051 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val); 2021 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
2052 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX | 2022 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
2053 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 2023 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2054 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK); 2024 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK);
2055 if (phy->req_duplex == DUPLEX_FULL) 2025 if (phy->req_duplex == DUPLEX_FULL)
2056 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; 2026 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
2057 CL45_WR_OVER_CL22(bp, phy, 2027 CL22_WR_OVER_CL45(bp, phy,
2058 MDIO_REG_BANK_COMBO_IEEE0, 2028 MDIO_REG_BANK_COMBO_IEEE0,
2059 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); 2029 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2060 2030
2061 /* program speed 2031 /*
2062 - needed only if the speed is greater than 1G (2.5G or 10G) */ 2032 * program speed
2063 CL45_RD_OVER_CL22(bp, phy, 2033 * - needed only if the speed is greater than 1G (2.5G or 10G)
2064 MDIO_REG_BANK_SERDES_DIGITAL, 2034 */
2065 MDIO_SERDES_DIGITAL_MISC1, &reg_val); 2035 CL22_RD_OVER_CL45(bp, phy,
2036 MDIO_REG_BANK_SERDES_DIGITAL,
2037 MDIO_SERDES_DIGITAL_MISC1, &reg_val);
2066 /* clearing the speed value before setting the right speed */ 2038 /* clearing the speed value before setting the right speed */
2067 DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val); 2039 DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
2068 2040
@@ -2083,9 +2055,9 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy,
2083 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G; 2055 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G;
2084 } 2056 }
2085 2057
2086 CL45_WR_OVER_CL22(bp, phy, 2058 CL22_WR_OVER_CL45(bp, phy,
2087 MDIO_REG_BANK_SERDES_DIGITAL, 2059 MDIO_REG_BANK_SERDES_DIGITAL,
2088 MDIO_SERDES_DIGITAL_MISC1, reg_val); 2060 MDIO_SERDES_DIGITAL_MISC1, reg_val);
2089 2061
2090} 2062}
2091 2063
@@ -2102,13 +2074,13 @@ static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x_phy *phy,
2102 val |= MDIO_OVER_1G_UP1_2_5G; 2074 val |= MDIO_OVER_1G_UP1_2_5G;
2103 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 2075 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2104 val |= MDIO_OVER_1G_UP1_10G; 2076 val |= MDIO_OVER_1G_UP1_10G;
2105 CL45_WR_OVER_CL22(bp, phy, 2077 CL22_WR_OVER_CL45(bp, phy,
2106 MDIO_REG_BANK_OVER_1G, 2078 MDIO_REG_BANK_OVER_1G,
2107 MDIO_OVER_1G_UP1, val); 2079 MDIO_OVER_1G_UP1, val);
2108 2080
2109 CL45_WR_OVER_CL22(bp, phy, 2081 CL22_WR_OVER_CL45(bp, phy,
2110 MDIO_REG_BANK_OVER_1G, 2082 MDIO_REG_BANK_OVER_1G,
2111 MDIO_OVER_1G_UP3, 0x400); 2083 MDIO_OVER_1G_UP3, 0x400);
2112} 2084}
2113 2085
2114static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy, 2086static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
@@ -2116,22 +2088,21 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
2116{ 2088{
2117 struct bnx2x *bp = params->bp; 2089 struct bnx2x *bp = params->bp;
2118 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; 2090 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
2119 /* resolve pause mode and advertisement 2091 /*
2120 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */ 2092 * Resolve pause mode and advertisement.
2093 * Please refer to Table 28B-3 of the 802.3ab-1999 spec
2094 */
2121 2095
2122 switch (phy->req_flow_ctrl) { 2096 switch (phy->req_flow_ctrl) {
2123 case BNX2X_FLOW_CTRL_AUTO: 2097 case BNX2X_FLOW_CTRL_AUTO:
2124 if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) { 2098 if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH)
2125 *ieee_fc |= 2099 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
2126 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; 2100 else
2127 } else {
2128 *ieee_fc |= 2101 *ieee_fc |=
2129 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; 2102 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
2130 }
2131 break; 2103 break;
2132 case BNX2X_FLOW_CTRL_TX: 2104 case BNX2X_FLOW_CTRL_TX:
2133 *ieee_fc |= 2105 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
2134 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
2135 break; 2106 break;
2136 2107
2137 case BNX2X_FLOW_CTRL_RX: 2108 case BNX2X_FLOW_CTRL_RX:
@@ -2149,23 +2120,23 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
2149 2120
2150static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x_phy *phy, 2121static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x_phy *phy,
2151 struct link_params *params, 2122 struct link_params *params,
2152 u16 ieee_fc) 2123 u16 ieee_fc)
2153{ 2124{
2154 struct bnx2x *bp = params->bp; 2125 struct bnx2x *bp = params->bp;
2155 u16 val; 2126 u16 val;
2156 /* for AN, we are always publishing full duplex */ 2127 /* for AN, we are always publishing full duplex */
2157 2128
2158 CL45_WR_OVER_CL22(bp, phy, 2129 CL22_WR_OVER_CL45(bp, phy,
2159 MDIO_REG_BANK_COMBO_IEEE0, 2130 MDIO_REG_BANK_COMBO_IEEE0,
2160 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc); 2131 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
2161 CL45_RD_OVER_CL22(bp, phy, 2132 CL22_RD_OVER_CL45(bp, phy,
2162 MDIO_REG_BANK_CL73_IEEEB1, 2133 MDIO_REG_BANK_CL73_IEEEB1,
2163 MDIO_CL73_IEEEB1_AN_ADV1, &val); 2134 MDIO_CL73_IEEEB1_AN_ADV1, &val);
2164 val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH; 2135 val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
2165 val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK); 2136 val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
2166 CL45_WR_OVER_CL22(bp, phy, 2137 CL22_WR_OVER_CL45(bp, phy,
2167 MDIO_REG_BANK_CL73_IEEEB1, 2138 MDIO_REG_BANK_CL73_IEEEB1,
2168 MDIO_CL73_IEEEB1_AN_ADV1, val); 2139 MDIO_CL73_IEEEB1_AN_ADV1, val);
2169} 2140}
2170 2141
2171static void bnx2x_restart_autoneg(struct bnx2x_phy *phy, 2142static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
@@ -2179,67 +2150,67 @@ static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
2179 /* Enable and restart BAM/CL37 aneg */ 2150 /* Enable and restart BAM/CL37 aneg */
2180 2151
2181 if (enable_cl73) { 2152 if (enable_cl73) {
2182 CL45_RD_OVER_CL22(bp, phy, 2153 CL22_RD_OVER_CL45(bp, phy,
2183 MDIO_REG_BANK_CL73_IEEEB0, 2154 MDIO_REG_BANK_CL73_IEEEB0,
2184 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 2155 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
2185 &mii_control); 2156 &mii_control);
2186 2157
2187 CL45_WR_OVER_CL22(bp, phy, 2158 CL22_WR_OVER_CL45(bp, phy,
2188 MDIO_REG_BANK_CL73_IEEEB0, 2159 MDIO_REG_BANK_CL73_IEEEB0,
2189 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 2160 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
2190 (mii_control | 2161 (mii_control |
2191 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN | 2162 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
2192 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN)); 2163 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
2193 } else { 2164 } else {
2194 2165
2195 CL45_RD_OVER_CL22(bp, phy, 2166 CL22_RD_OVER_CL45(bp, phy,
2196 MDIO_REG_BANK_COMBO_IEEE0, 2167 MDIO_REG_BANK_COMBO_IEEE0,
2197 MDIO_COMBO_IEEE0_MII_CONTROL, 2168 MDIO_COMBO_IEEE0_MII_CONTROL,
2198 &mii_control); 2169 &mii_control);
2199 DP(NETIF_MSG_LINK, 2170 DP(NETIF_MSG_LINK,
2200 "bnx2x_restart_autoneg mii_control before = 0x%x\n", 2171 "bnx2x_restart_autoneg mii_control before = 0x%x\n",
2201 mii_control); 2172 mii_control);
2202 CL45_WR_OVER_CL22(bp, phy, 2173 CL22_WR_OVER_CL45(bp, phy,
2203 MDIO_REG_BANK_COMBO_IEEE0, 2174 MDIO_REG_BANK_COMBO_IEEE0,
2204 MDIO_COMBO_IEEE0_MII_CONTROL, 2175 MDIO_COMBO_IEEE0_MII_CONTROL,
2205 (mii_control | 2176 (mii_control |
2206 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 2177 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2207 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN)); 2178 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
2208 } 2179 }
2209} 2180}
2210 2181
2211static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy, 2182static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
2212 struct link_params *params, 2183 struct link_params *params,
2213 struct link_vars *vars) 2184 struct link_vars *vars)
2214{ 2185{
2215 struct bnx2x *bp = params->bp; 2186 struct bnx2x *bp = params->bp;
2216 u16 control1; 2187 u16 control1;
2217 2188
2218 /* in SGMII mode, the unicore is always slave */ 2189 /* in SGMII mode, the unicore is always slave */
2219 2190
2220 CL45_RD_OVER_CL22(bp, phy, 2191 CL22_RD_OVER_CL45(bp, phy,
2221 MDIO_REG_BANK_SERDES_DIGITAL, 2192 MDIO_REG_BANK_SERDES_DIGITAL,
2222 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, 2193 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
2223 &control1); 2194 &control1);
2224 control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT; 2195 control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
2225 /* set sgmii mode (and not fiber) */ 2196 /* set sgmii mode (and not fiber) */
2226 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE | 2197 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
2227 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET | 2198 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
2228 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE); 2199 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
2229 CL45_WR_OVER_CL22(bp, phy, 2200 CL22_WR_OVER_CL45(bp, phy,
2230 MDIO_REG_BANK_SERDES_DIGITAL, 2201 MDIO_REG_BANK_SERDES_DIGITAL,
2231 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, 2202 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
2232 control1); 2203 control1);
2233 2204
2234 /* if forced speed */ 2205 /* if forced speed */
2235 if (!(vars->line_speed == SPEED_AUTO_NEG)) { 2206 if (!(vars->line_speed == SPEED_AUTO_NEG)) {
2236 /* set speed, disable autoneg */ 2207 /* set speed, disable autoneg */
2237 u16 mii_control; 2208 u16 mii_control;
2238 2209
2239 CL45_RD_OVER_CL22(bp, phy, 2210 CL22_RD_OVER_CL45(bp, phy,
2240 MDIO_REG_BANK_COMBO_IEEE0, 2211 MDIO_REG_BANK_COMBO_IEEE0,
2241 MDIO_COMBO_IEEE0_MII_CONTROL, 2212 MDIO_COMBO_IEEE0_MII_CONTROL,
2242 &mii_control); 2213 &mii_control);
2243 mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 2214 mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2244 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK| 2215 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK|
2245 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX); 2216 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
@@ -2267,10 +2238,10 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
2267 if (phy->req_duplex == DUPLEX_FULL) 2238 if (phy->req_duplex == DUPLEX_FULL)
2268 mii_control |= 2239 mii_control |=
2269 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; 2240 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
2270 CL45_WR_OVER_CL22(bp, phy, 2241 CL22_WR_OVER_CL45(bp, phy,
2271 MDIO_REG_BANK_COMBO_IEEE0, 2242 MDIO_REG_BANK_COMBO_IEEE0,
2272 MDIO_COMBO_IEEE0_MII_CONTROL, 2243 MDIO_COMBO_IEEE0_MII_CONTROL,
2273 mii_control); 2244 mii_control);
2274 2245
2275 } else { /* AN mode */ 2246 } else { /* AN mode */
2276 /* enable and restart AN */ 2247 /* enable and restart AN */
@@ -2285,19 +2256,19 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
2285 2256
2286static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result) 2257static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
2287{ /* LD LP */ 2258{ /* LD LP */
2288 switch (pause_result) { /* ASYM P ASYM P */ 2259 switch (pause_result) { /* ASYM P ASYM P */
2289 case 0xb: /* 1 0 1 1 */ 2260 case 0xb: /* 1 0 1 1 */
2290 vars->flow_ctrl = BNX2X_FLOW_CTRL_TX; 2261 vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
2291 break; 2262 break;
2292 2263
2293 case 0xe: /* 1 1 1 0 */ 2264 case 0xe: /* 1 1 1 0 */
2294 vars->flow_ctrl = BNX2X_FLOW_CTRL_RX; 2265 vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
2295 break; 2266 break;
2296 2267
2297 case 0x5: /* 0 1 0 1 */ 2268 case 0x5: /* 0 1 0 1 */
2298 case 0x7: /* 0 1 1 1 */ 2269 case 0x7: /* 0 1 1 1 */
2299 case 0xd: /* 1 1 0 1 */ 2270 case 0xd: /* 1 1 0 1 */
2300 case 0xf: /* 1 1 1 1 */ 2271 case 0xf: /* 1 1 1 1 */
2301 vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH; 2272 vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
2302 break; 2273 break;
2303 2274
@@ -2317,24 +2288,24 @@ static u8 bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
2317 u16 pd_10g, status2_1000x; 2288 u16 pd_10g, status2_1000x;
2318 if (phy->req_line_speed != SPEED_AUTO_NEG) 2289 if (phy->req_line_speed != SPEED_AUTO_NEG)
2319 return 0; 2290 return 0;
2320 CL45_RD_OVER_CL22(bp, phy, 2291 CL22_RD_OVER_CL45(bp, phy,
2321 MDIO_REG_BANK_SERDES_DIGITAL, 2292 MDIO_REG_BANK_SERDES_DIGITAL,
2322 MDIO_SERDES_DIGITAL_A_1000X_STATUS2, 2293 MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
2323 &status2_1000x); 2294 &status2_1000x);
2324 CL45_RD_OVER_CL22(bp, phy, 2295 CL22_RD_OVER_CL45(bp, phy,
2325 MDIO_REG_BANK_SERDES_DIGITAL, 2296 MDIO_REG_BANK_SERDES_DIGITAL,
2326 MDIO_SERDES_DIGITAL_A_1000X_STATUS2, 2297 MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
2327 &status2_1000x); 2298 &status2_1000x);
2328 if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) { 2299 if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
2329 DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n", 2300 DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n",
2330 params->port); 2301 params->port);
2331 return 1; 2302 return 1;
2332 } 2303 }
2333 2304
2334 CL45_RD_OVER_CL22(bp, phy, 2305 CL22_RD_OVER_CL45(bp, phy,
2335 MDIO_REG_BANK_10G_PARALLEL_DETECT, 2306 MDIO_REG_BANK_10G_PARALLEL_DETECT,
2336 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS, 2307 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
2337 &pd_10g); 2308 &pd_10g);
2338 2309
2339 if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) { 2310 if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
2340 DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n", 2311 DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n",
@@ -2373,14 +2344,14 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
2373 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | 2344 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
2374 MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) { 2345 MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
2375 2346
2376 CL45_RD_OVER_CL22(bp, phy, 2347 CL22_RD_OVER_CL45(bp, phy,
2377 MDIO_REG_BANK_CL73_IEEEB1, 2348 MDIO_REG_BANK_CL73_IEEEB1,
2378 MDIO_CL73_IEEEB1_AN_ADV1, 2349 MDIO_CL73_IEEEB1_AN_ADV1,
2379 &ld_pause); 2350 &ld_pause);
2380 CL45_RD_OVER_CL22(bp, phy, 2351 CL22_RD_OVER_CL45(bp, phy,
2381 MDIO_REG_BANK_CL73_IEEEB1, 2352 MDIO_REG_BANK_CL73_IEEEB1,
2382 MDIO_CL73_IEEEB1_AN_LP_ADV1, 2353 MDIO_CL73_IEEEB1_AN_LP_ADV1,
2383 &lp_pause); 2354 &lp_pause);
2384 pause_result = (ld_pause & 2355 pause_result = (ld_pause &
2385 MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK) 2356 MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK)
2386 >> 8; 2357 >> 8;
@@ -2390,18 +2361,18 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
2390 DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n", 2361 DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n",
2391 pause_result); 2362 pause_result);
2392 } else { 2363 } else {
2393 CL45_RD_OVER_CL22(bp, phy, 2364 CL22_RD_OVER_CL45(bp, phy,
2394 MDIO_REG_BANK_COMBO_IEEE0, 2365 MDIO_REG_BANK_COMBO_IEEE0,
2395 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, 2366 MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
2396 &ld_pause); 2367 &ld_pause);
2397 CL45_RD_OVER_CL22(bp, phy, 2368 CL22_RD_OVER_CL45(bp, phy,
2398 MDIO_REG_BANK_COMBO_IEEE0, 2369 MDIO_REG_BANK_COMBO_IEEE0,
2399 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1, 2370 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
2400 &lp_pause); 2371 &lp_pause);
2401 pause_result = (ld_pause & 2372 pause_result = (ld_pause &
2402 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5; 2373 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
2403 pause_result |= (lp_pause & 2374 pause_result |= (lp_pause &
2404 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7; 2375 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
2405 DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n", 2376 DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n",
2406 pause_result); 2377 pause_result);
2407 } 2378 }
@@ -2417,25 +2388,25 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
2417 u16 rx_status, ustat_val, cl37_fsm_recieved; 2388 u16 rx_status, ustat_val, cl37_fsm_recieved;
2418 DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n"); 2389 DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n");
2419 /* Step 1: Make sure signal is detected */ 2390 /* Step 1: Make sure signal is detected */
2420 CL45_RD_OVER_CL22(bp, phy, 2391 CL22_RD_OVER_CL45(bp, phy,
2421 MDIO_REG_BANK_RX0, 2392 MDIO_REG_BANK_RX0,
2422 MDIO_RX0_RX_STATUS, 2393 MDIO_RX0_RX_STATUS,
2423 &rx_status); 2394 &rx_status);
2424 if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) != 2395 if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) !=
2425 (MDIO_RX0_RX_STATUS_SIGDET)) { 2396 (MDIO_RX0_RX_STATUS_SIGDET)) {
2426 DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73." 2397 DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73."
2427 "rx_status(0x80b0) = 0x%x\n", rx_status); 2398 "rx_status(0x80b0) = 0x%x\n", rx_status);
2428 CL45_WR_OVER_CL22(bp, phy, 2399 CL22_WR_OVER_CL45(bp, phy,
2429 MDIO_REG_BANK_CL73_IEEEB0, 2400 MDIO_REG_BANK_CL73_IEEEB0,
2430 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 2401 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
2431 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN); 2402 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
2432 return; 2403 return;
2433 } 2404 }
2434 /* Step 2: Check CL73 state machine */ 2405 /* Step 2: Check CL73 state machine */
2435 CL45_RD_OVER_CL22(bp, phy, 2406 CL22_RD_OVER_CL45(bp, phy,
2436 MDIO_REG_BANK_CL73_USERB0, 2407 MDIO_REG_BANK_CL73_USERB0,
2437 MDIO_CL73_USERB0_CL73_USTAT1, 2408 MDIO_CL73_USERB0_CL73_USTAT1,
2438 &ustat_val); 2409 &ustat_val);
2439 if ((ustat_val & 2410 if ((ustat_val &
2440 (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK | 2411 (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
2441 MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) != 2412 MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) !=
@@ -2445,12 +2416,14 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
2445 "ustat_val(0x8371) = 0x%x\n", ustat_val); 2416 "ustat_val(0x8371) = 0x%x\n", ustat_val);
2446 return; 2417 return;
2447 } 2418 }
2448 /* Step 3: Check CL37 Message Pages received to indicate LP 2419 /*
2449 supports only CL37 */ 2420 * Step 3: Check CL37 Message Pages received to indicate LP
2450 CL45_RD_OVER_CL22(bp, phy, 2421 * supports only CL37
2451 MDIO_REG_BANK_REMOTE_PHY, 2422 */
2452 MDIO_REMOTE_PHY_MISC_RX_STATUS, 2423 CL22_RD_OVER_CL45(bp, phy,
2453 &cl37_fsm_recieved); 2424 MDIO_REG_BANK_REMOTE_PHY,
2425 MDIO_REMOTE_PHY_MISC_RX_STATUS,
2426 &cl37_fsm_recieved);
2454 if ((cl37_fsm_recieved & 2427 if ((cl37_fsm_recieved &
2455 (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG | 2428 (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
2456 MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) != 2429 MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) !=
@@ -2461,14 +2434,18 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
2461 cl37_fsm_recieved); 2434 cl37_fsm_recieved);
2462 return; 2435 return;
2463 } 2436 }
2464 /* The combined cl37/cl73 fsm state information indicating that we are 2437 /*
2465 connected to a device which does not support cl73, but does support 2438 * The combined cl37/cl73 fsm state information indicating that
2466 cl37 BAM. In this case we disable cl73 and restart cl37 auto-neg */ 2439 * we are connected to a device which does not support cl73, but
2440 * does support cl37 BAM. In this case we disable cl73 and
2441 * restart cl37 auto-neg
2442 */
2443
2467 /* Disable CL73 */ 2444 /* Disable CL73 */
2468 CL45_WR_OVER_CL22(bp, phy, 2445 CL22_WR_OVER_CL45(bp, phy,
2469 MDIO_REG_BANK_CL73_IEEEB0, 2446 MDIO_REG_BANK_CL73_IEEEB0,
2470 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 2447 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
2471 0); 2448 0);
2472 /* Restart CL37 autoneg */ 2449 /* Restart CL37 autoneg */
2473 bnx2x_restart_autoneg(phy, params, 0); 2450 bnx2x_restart_autoneg(phy, params, 0);
2474 DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n"); 2451 DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n");
@@ -2493,14 +2470,14 @@ static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy,
2493 struct link_vars *vars) 2470 struct link_vars *vars)
2494{ 2471{
2495 struct bnx2x *bp = params->bp; 2472 struct bnx2x *bp = params->bp;
2496 u16 new_line_speed , gp_status; 2473 u16 new_line_speed, gp_status;
2497 u8 rc = 0; 2474 u8 rc = 0;
2498 2475
2499 /* Read gp_status */ 2476 /* Read gp_status */
2500 CL45_RD_OVER_CL22(bp, phy, 2477 CL22_RD_OVER_CL45(bp, phy,
2501 MDIO_REG_BANK_GP_STATUS, 2478 MDIO_REG_BANK_GP_STATUS,
2502 MDIO_GP_STATUS_TOP_AN_STATUS1, 2479 MDIO_GP_STATUS_TOP_AN_STATUS1,
2503 &gp_status); 2480 &gp_status);
2504 2481
2505 if (phy->req_line_speed == SPEED_AUTO_NEG) 2482 if (phy->req_line_speed == SPEED_AUTO_NEG)
2506 vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED; 2483 vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
@@ -2637,9 +2614,9 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
2637 u16 bank; 2614 u16 bank;
2638 2615
2639 /* read precomp */ 2616 /* read precomp */
2640 CL45_RD_OVER_CL22(bp, phy, 2617 CL22_RD_OVER_CL45(bp, phy,
2641 MDIO_REG_BANK_OVER_1G, 2618 MDIO_REG_BANK_OVER_1G,
2642 MDIO_OVER_1G_LP_UP2, &lp_up2); 2619 MDIO_OVER_1G_LP_UP2, &lp_up2);
2643 2620
2644 /* bits [10:7] at lp_up2, positioned at [15:12] */ 2621 /* bits [10:7] at lp_up2, positioned at [15:12] */
2645 lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >> 2622 lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
@@ -2651,18 +2628,18 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
2651 2628
2652 for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3; 2629 for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3;
2653 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) { 2630 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) {
2654 CL45_RD_OVER_CL22(bp, phy, 2631 CL22_RD_OVER_CL45(bp, phy,
2655 bank, 2632 bank,
2656 MDIO_TX0_TX_DRIVER, &tx_driver); 2633 MDIO_TX0_TX_DRIVER, &tx_driver);
2657 2634
2658 /* replace tx_driver bits [15:12] */ 2635 /* replace tx_driver bits [15:12] */
2659 if (lp_up2 != 2636 if (lp_up2 !=
2660 (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) { 2637 (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
2661 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK; 2638 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
2662 tx_driver |= lp_up2; 2639 tx_driver |= lp_up2;
2663 CL45_WR_OVER_CL22(bp, phy, 2640 CL22_WR_OVER_CL45(bp, phy,
2664 bank, 2641 bank,
2665 MDIO_TX0_TX_DRIVER, tx_driver); 2642 MDIO_TX0_TX_DRIVER, tx_driver);
2666 } 2643 }
2667 } 2644 }
2668} 2645}
@@ -2676,10 +2653,10 @@ static u8 bnx2x_emac_program(struct link_params *params,
2676 2653
2677 DP(NETIF_MSG_LINK, "setting link speed & duplex\n"); 2654 DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
2678 bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 + 2655 bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 +
2679 EMAC_REG_EMAC_MODE, 2656 EMAC_REG_EMAC_MODE,
2680 (EMAC_MODE_25G_MODE | 2657 (EMAC_MODE_25G_MODE |
2681 EMAC_MODE_PORT_MII_10M | 2658 EMAC_MODE_PORT_MII_10M |
2682 EMAC_MODE_HALF_DUPLEX)); 2659 EMAC_MODE_HALF_DUPLEX));
2683 switch (vars->line_speed) { 2660 switch (vars->line_speed) {
2684 case SPEED_10: 2661 case SPEED_10:
2685 mode |= EMAC_MODE_PORT_MII_10M; 2662 mode |= EMAC_MODE_PORT_MII_10M;
@@ -2707,8 +2684,8 @@ static u8 bnx2x_emac_program(struct link_params *params,
2707 if (vars->duplex == DUPLEX_HALF) 2684 if (vars->duplex == DUPLEX_HALF)
2708 mode |= EMAC_MODE_HALF_DUPLEX; 2685 mode |= EMAC_MODE_HALF_DUPLEX;
2709 bnx2x_bits_en(bp, 2686 bnx2x_bits_en(bp,
2710 GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE, 2687 GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2711 mode); 2688 mode);
2712 2689
2713 bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed); 2690 bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
2714 return 0; 2691 return 0;
@@ -2723,7 +2700,7 @@ static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
2723 2700
2724 for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3; 2701 for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3;
2725 bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) { 2702 bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) {
2726 CL45_WR_OVER_CL22(bp, phy, 2703 CL22_WR_OVER_CL45(bp, phy,
2727 bank, 2704 bank,
2728 MDIO_RX0_RX_EQ_BOOST, 2705 MDIO_RX0_RX_EQ_BOOST,
2729 phy->rx_preemphasis[i]); 2706 phy->rx_preemphasis[i]);
@@ -2731,7 +2708,7 @@ static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
2731 2708
2732 for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3; 2709 for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
2733 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) { 2710 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
2734 CL45_WR_OVER_CL22(bp, phy, 2711 CL22_WR_OVER_CL45(bp, phy,
2735 bank, 2712 bank,
2736 MDIO_TX0_TX_DRIVER, 2713 MDIO_TX0_TX_DRIVER,
2737 phy->tx_preemphasis[i]); 2714 phy->tx_preemphasis[i]);
@@ -2754,7 +2731,7 @@ static void bnx2x_init_internal_phy(struct bnx2x_phy *phy,
2754 /* forced speed requested? */ 2731 /* forced speed requested? */
2755 if (vars->line_speed != SPEED_AUTO_NEG || 2732 if (vars->line_speed != SPEED_AUTO_NEG ||
2756 (SINGLE_MEDIA_DIRECT(params) && 2733 (SINGLE_MEDIA_DIRECT(params) &&
2757 params->loopback_mode == LOOPBACK_EXT)) { 2734 params->loopback_mode == LOOPBACK_EXT)) {
2758 DP(NETIF_MSG_LINK, "not SGMII, no AN\n"); 2735 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
2759 2736
2760 /* disable autoneg */ 2737 /* disable autoneg */
@@ -2771,7 +2748,7 @@ static void bnx2x_init_internal_phy(struct bnx2x_phy *phy,
2771 2748
2772 /* program duplex & pause advertisement (for aneg) */ 2749 /* program duplex & pause advertisement (for aneg) */
2773 bnx2x_set_ieee_aneg_advertisment(phy, params, 2750 bnx2x_set_ieee_aneg_advertisment(phy, params,
2774 vars->ieee_fc); 2751 vars->ieee_fc);
2775 2752
2776 /* enable autoneg */ 2753 /* enable autoneg */
2777 bnx2x_set_autoneg(phy, params, vars, enable_cl73); 2754 bnx2x_set_autoneg(phy, params, vars, enable_cl73);
@@ -2842,7 +2819,8 @@ static u8 bnx2x_init_xgxs(struct bnx2x_phy *phy,
2842} 2819}
2843 2820
2844static u16 bnx2x_wait_reset_complete(struct bnx2x *bp, 2821static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
2845 struct bnx2x_phy *phy) 2822 struct bnx2x_phy *phy,
2823 struct link_params *params)
2846{ 2824{
2847 u16 cnt, ctrl; 2825 u16 cnt, ctrl;
2848 /* Wait for soft reset to get cleared upto 1 sec */ 2826 /* Wait for soft reset to get cleared upto 1 sec */
@@ -2853,6 +2831,11 @@ static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
2853 break; 2831 break;
2854 msleep(1); 2832 msleep(1);
2855 } 2833 }
2834
2835 if (cnt == 1000)
2836 netdev_err(bp->dev, "Warning: PHY was not initialized,"
2837 " Port %d\n",
2838 params->port);
2856 DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt); 2839 DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt);
2857 return cnt; 2840 return cnt;
2858} 2841}
@@ -2863,9 +2846,7 @@ static void bnx2x_link_int_enable(struct link_params *params)
2863 u32 mask; 2846 u32 mask;
2864 struct bnx2x *bp = params->bp; 2847 struct bnx2x *bp = params->bp;
2865 2848
2866 /* setting the status to report on link up 2849 /* Setting the status to report on link up for either XGXS or SerDes */
2867 for either XGXS or SerDes */
2868
2869 if (params->switch_cfg == SWITCH_CFG_10G) { 2850 if (params->switch_cfg == SWITCH_CFG_10G) {
2870 mask = (NIG_MASK_XGXS0_LINK10G | 2851 mask = (NIG_MASK_XGXS0_LINK10G |
2871 NIG_MASK_XGXS0_LINK_STATUS); 2852 NIG_MASK_XGXS0_LINK_STATUS);
@@ -2908,7 +2889,7 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
2908{ 2889{
2909 u32 latch_status = 0; 2890 u32 latch_status = 0;
2910 2891
2911 /** 2892 /*
2912 * Disable the MI INT ( external phy int ) by writing 1 to the 2893 * Disable the MI INT ( external phy int ) by writing 1 to the
2913 * status register. Link down indication is high-active-signal, 2894 * status register. Link down indication is high-active-signal,
2914 * so in this case we need to write the status to clear the XOR 2895 * so in this case we need to write the status to clear the XOR
@@ -2933,27 +2914,30 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
2933 2914
2934 /* For all latched-signal=up : Re-Arm Latch signals */ 2915 /* For all latched-signal=up : Re-Arm Latch signals */
2935 REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8, 2916 REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8,
2936 (latch_status & 0xfffe) | (latch_status & 1)); 2917 (latch_status & 0xfffe) | (latch_status & 1));
2937 } 2918 }
2938 /* For all latched-signal=up,Write original_signal to status */ 2919 /* For all latched-signal=up,Write original_signal to status */
2939} 2920}
2940 2921
2941static void bnx2x_link_int_ack(struct link_params *params, 2922static void bnx2x_link_int_ack(struct link_params *params,
2942 struct link_vars *vars, u8 is_10g) 2923 struct link_vars *vars, u8 is_10g)
2943{ 2924{
2944 struct bnx2x *bp = params->bp; 2925 struct bnx2x *bp = params->bp;
2945 u8 port = params->port; 2926 u8 port = params->port;
2946 2927
2947 /* first reset all status 2928 /*
2948 * we assume only one line will be change at a time */ 2929 * First reset all status we assume only one line will be
2930 * change at a time
2931 */
2949 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 2932 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
2950 (NIG_STATUS_XGXS0_LINK10G | 2933 (NIG_STATUS_XGXS0_LINK10G |
2951 NIG_STATUS_XGXS0_LINK_STATUS | 2934 NIG_STATUS_XGXS0_LINK_STATUS |
2952 NIG_STATUS_SERDES0_LINK_STATUS)); 2935 NIG_STATUS_SERDES0_LINK_STATUS));
2953 if (vars->phy_link_up) { 2936 if (vars->phy_link_up) {
2954 if (is_10g) { 2937 if (is_10g) {
2955 /* Disable the 10G link interrupt 2938 /*
2956 * by writing 1 to the status register 2939 * Disable the 10G link interrupt by writing 1 to the
2940 * status register
2957 */ 2941 */
2958 DP(NETIF_MSG_LINK, "10G XGXS phy link up\n"); 2942 DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
2959 bnx2x_bits_en(bp, 2943 bnx2x_bits_en(bp,
@@ -2961,9 +2945,9 @@ static void bnx2x_link_int_ack(struct link_params *params,
2961 NIG_STATUS_XGXS0_LINK10G); 2945 NIG_STATUS_XGXS0_LINK10G);
2962 2946
2963 } else if (params->switch_cfg == SWITCH_CFG_10G) { 2947 } else if (params->switch_cfg == SWITCH_CFG_10G) {
2964 /* Disable the link interrupt 2948 /*
2965 * by writing 1 to the relevant lane 2949 * Disable the link interrupt by writing 1 to the
2966 * in the status register 2950 * relevant lane in the status register
2967 */ 2951 */
2968 u32 ser_lane = ((params->lane_config & 2952 u32 ser_lane = ((params->lane_config &
2969 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> 2953 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
@@ -2978,8 +2962,9 @@ static void bnx2x_link_int_ack(struct link_params *params,
2978 2962
2979 } else { /* SerDes */ 2963 } else { /* SerDes */
2980 DP(NETIF_MSG_LINK, "SerDes phy link up\n"); 2964 DP(NETIF_MSG_LINK, "SerDes phy link up\n");
2981 /* Disable the link interrupt 2965 /*
2982 * by writing 1 to the status register 2966 * Disable the link interrupt by writing 1 to the status
2967 * register
2983 */ 2968 */
2984 bnx2x_bits_en(bp, 2969 bnx2x_bits_en(bp,
2985 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 2970 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
@@ -3059,8 +3044,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
3059 } 3044 }
3060 if ((params->num_phys == MAX_PHYS) && 3045 if ((params->num_phys == MAX_PHYS) &&
3061 (params->phy[EXT_PHY2].ver_addr != 0)) { 3046 (params->phy[EXT_PHY2].ver_addr != 0)) {
3062 spirom_ver = REG_RD(bp, 3047 spirom_ver = REG_RD(bp, params->phy[EXT_PHY2].ver_addr);
3063 params->phy[EXT_PHY2].ver_addr);
3064 if (params->phy[EXT_PHY2].format_fw_ver) { 3048 if (params->phy[EXT_PHY2].format_fw_ver) {
3065 *ver_p = '/'; 3049 *ver_p = '/';
3066 ver_p++; 3050 ver_p++;
@@ -3089,29 +3073,27 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
3089 3073
3090 /* change the uni_phy_addr in the nig */ 3074 /* change the uni_phy_addr in the nig */
3091 md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + 3075 md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
3092 port*0x18)); 3076 port*0x18));
3093 3077
3094 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5); 3078 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
3095 3079
3096 bnx2x_cl45_write(bp, phy, 3080 bnx2x_cl45_write(bp, phy,
3097 5, 3081 5,
3098 (MDIO_REG_BANK_AER_BLOCK + 3082 (MDIO_REG_BANK_AER_BLOCK +
3099 (MDIO_AER_BLOCK_AER_REG & 0xf)), 3083 (MDIO_AER_BLOCK_AER_REG & 0xf)),
3100 0x2800); 3084 0x2800);
3101 3085
3102 bnx2x_cl45_write(bp, phy, 3086 bnx2x_cl45_write(bp, phy,
3103 5, 3087 5,
3104 (MDIO_REG_BANK_CL73_IEEEB0 + 3088 (MDIO_REG_BANK_CL73_IEEEB0 +
3105 (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)), 3089 (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
3106 0x6041); 3090 0x6041);
3107 msleep(200); 3091 msleep(200);
3108 /* set aer mmd back */ 3092 /* set aer mmd back */
3109 bnx2x_set_aer_mmd_xgxs(params, phy); 3093 bnx2x_set_aer_mmd_xgxs(params, phy);
3110 3094
3111 /* and md_devad */ 3095 /* and md_devad */
3112 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 3096 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad);
3113 md_devad);
3114
3115 } else { 3097 } else {
3116 u16 mii_ctrl; 3098 u16 mii_ctrl;
3117 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n"); 3099 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
@@ -3152,26 +3134,26 @@ u8 bnx2x_set_led(struct link_params *params,
3152 case LED_MODE_OFF: 3134 case LED_MODE_OFF:
3153 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0); 3135 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
3154 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 3136 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
3155 SHARED_HW_CFG_LED_MAC1); 3137 SHARED_HW_CFG_LED_MAC1);
3156 3138
3157 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); 3139 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
3158 EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE)); 3140 EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
3159 break; 3141 break;
3160 3142
3161 case LED_MODE_OPER: 3143 case LED_MODE_OPER:
3162 /** 3144 /*
3163 * For all other phys, OPER mode is same as ON, so in case 3145 * For all other phys, OPER mode is same as ON, so in case
3164 * link is down, do nothing 3146 * link is down, do nothing
3165 **/ 3147 */
3166 if (!vars->link_up) 3148 if (!vars->link_up)
3167 break; 3149 break;
3168 case LED_MODE_ON: 3150 case LED_MODE_ON:
3169 if (params->phy[EXT_PHY1].type == 3151 if (params->phy[EXT_PHY1].type ==
3170 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 && 3152 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 &&
3171 CHIP_IS_E2(bp) && params->num_phys == 2) { 3153 CHIP_IS_E2(bp) && params->num_phys == 2) {
3172 /** 3154 /*
3173 * This is a work-around for E2+8727 Configurations 3155 * This is a work-around for E2+8727 Configurations
3174 */ 3156 */
3175 if (mode == LED_MODE_ON || 3157 if (mode == LED_MODE_ON ||
3176 speed == SPEED_10000){ 3158 speed == SPEED_10000){
3177 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); 3159 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
@@ -3183,41 +3165,40 @@ u8 bnx2x_set_led(struct link_params *params,
3183 return rc; 3165 return rc;
3184 } 3166 }
3185 } else if (SINGLE_MEDIA_DIRECT(params)) { 3167 } else if (SINGLE_MEDIA_DIRECT(params)) {
3186 /** 3168 /*
3187 * This is a work-around for HW issue found when link 3169 * This is a work-around for HW issue found when link
3188 * is up in CL73 3170 * is up in CL73
3189 */ 3171 */
3190 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); 3172 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
3191 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); 3173 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
3192 } else { 3174 } else {
3193 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 3175 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode);
3194 hw_led_mode);
3195 } 3176 }
3196 3177
3197 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + 3178 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
3198 port*4, 0);
3199 /* Set blinking rate to ~15.9Hz */ 3179 /* Set blinking rate to ~15.9Hz */
3200 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4, 3180 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
3201 LED_BLINK_RATE_VAL); 3181 LED_BLINK_RATE_VAL);
3202 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + 3182 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
3203 port*4, 1); 3183 port*4, 1);
3204 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); 3184 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
3205 EMAC_WR(bp, EMAC_REG_EMAC_LED, 3185 EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp & (~EMAC_LED_OVERRIDE)));
3206 (tmp & (~EMAC_LED_OVERRIDE)));
3207 3186
3208 if (CHIP_IS_E1(bp) && 3187 if (CHIP_IS_E1(bp) &&
3209 ((speed == SPEED_2500) || 3188 ((speed == SPEED_2500) ||
3210 (speed == SPEED_1000) || 3189 (speed == SPEED_1000) ||
3211 (speed == SPEED_100) || 3190 (speed == SPEED_100) ||
3212 (speed == SPEED_10))) { 3191 (speed == SPEED_10))) {
3213 /* On Everest 1 Ax chip versions for speeds less than 3192 /*
3214 10G LED scheme is different */ 3193 * On Everest 1 Ax chip versions for speeds less than
3194 * 10G LED scheme is different
3195 */
3215 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 3196 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
3216 + port*4, 1); 3197 + port*4, 1);
3217 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + 3198 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
3218 port*4, 0); 3199 port*4, 0);
3219 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 + 3200 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
3220 port*4, 1); 3201 port*4, 1);
3221 } 3202 }
3222 break; 3203 break;
3223 3204
@@ -3231,7 +3212,7 @@ u8 bnx2x_set_led(struct link_params *params,
3231 3212
3232} 3213}
3233 3214
3234/** 3215/*
3235 * This function comes to reflect the actual link state read DIRECTLY from the 3216 * This function comes to reflect the actual link state read DIRECTLY from the
3236 * HW 3217 * HW
3237 */ 3218 */
@@ -3243,10 +3224,10 @@ u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars,
3243 u8 ext_phy_link_up = 0, serdes_phy_type; 3224 u8 ext_phy_link_up = 0, serdes_phy_type;
3244 struct link_vars temp_vars; 3225 struct link_vars temp_vars;
3245 3226
3246 CL45_RD_OVER_CL22(bp, &params->phy[INT_PHY], 3227 CL22_RD_OVER_CL45(bp, &params->phy[INT_PHY],
3247 MDIO_REG_BANK_GP_STATUS, 3228 MDIO_REG_BANK_GP_STATUS,
3248 MDIO_GP_STATUS_TOP_AN_STATUS1, 3229 MDIO_GP_STATUS_TOP_AN_STATUS1,
3249 &gp_status); 3230 &gp_status);
3250 /* link is up only if both local phy and external phy are up */ 3231 /* link is up only if both local phy and external phy are up */
3251 if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)) 3232 if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
3252 return -ESRCH; 3233 return -ESRCH;
@@ -3290,15 +3271,15 @@ static u8 bnx2x_link_initialize(struct link_params *params,
3290 u8 rc = 0; 3271 u8 rc = 0;
3291 u8 phy_index, non_ext_phy; 3272 u8 phy_index, non_ext_phy;
3292 struct bnx2x *bp = params->bp; 3273 struct bnx2x *bp = params->bp;
3293 /** 3274 /*
3294 * In case of external phy existence, the line speed would be the 3275 * In case of external phy existence, the line speed would be the
3295 * line speed linked up by the external phy. In case it is direct 3276 * line speed linked up by the external phy. In case it is direct
3296 * only, then the line_speed during initialization will be 3277 * only, then the line_speed during initialization will be
3297 * equal to the req_line_speed 3278 * equal to the req_line_speed
3298 */ 3279 */
3299 vars->line_speed = params->phy[INT_PHY].req_line_speed; 3280 vars->line_speed = params->phy[INT_PHY].req_line_speed;
3300 3281
3301 /** 3282 /*
3302 * Initialize the internal phy in case this is a direct board 3283 * Initialize the internal phy in case this is a direct board
3303 * (no external phys), or this board has external phy which requires 3284 * (no external phys), or this board has external phy which requires
3304 * to first. 3285 * to first.
@@ -3326,17 +3307,16 @@ static u8 bnx2x_link_initialize(struct link_params *params,
3326 if (!non_ext_phy) 3307 if (!non_ext_phy)
3327 for (phy_index = EXT_PHY1; phy_index < params->num_phys; 3308 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
3328 phy_index++) { 3309 phy_index++) {
3329 /** 3310 /*
3330 * No need to initialize second phy in case of first 3311 * No need to initialize second phy in case of first
3331 * phy only selection. In case of second phy, we do 3312 * phy only selection. In case of second phy, we do
3332 * need to initialize the first phy, since they are 3313 * need to initialize the first phy, since they are
3333 * connected. 3314 * connected.
3334 **/ 3315 */
3335 if (phy_index == EXT_PHY2 && 3316 if (phy_index == EXT_PHY2 &&
3336 (bnx2x_phy_selection(params) == 3317 (bnx2x_phy_selection(params) ==
3337 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) { 3318 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) {
3338 DP(NETIF_MSG_LINK, "Not initializing" 3319 DP(NETIF_MSG_LINK, "Ignoring second phy\n");
3339 "second phy\n");
3340 continue; 3320 continue;
3341 } 3321 }
3342 params->phy[phy_index].config_init( 3322 params->phy[phy_index].config_init(
@@ -3358,9 +3338,8 @@ static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
3358 struct link_params *params) 3338 struct link_params *params)
3359{ 3339{
3360 /* reset the SerDes/XGXS */ 3340 /* reset the SerDes/XGXS */
3361 REG_WR(params->bp, GRCBASE_MISC + 3341 REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
3362 MISC_REGISTERS_RESET_REG_3_CLEAR, 3342 (0x1ff << (params->port*16)));
3363 (0x1ff << (params->port*16)));
3364} 3343}
3365 3344
3366static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy, 3345static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
@@ -3374,11 +3353,11 @@ static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
3374 else 3353 else
3375 gpio_port = params->port; 3354 gpio_port = params->port;
3376 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 3355 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3377 MISC_REGISTERS_GPIO_OUTPUT_LOW, 3356 MISC_REGISTERS_GPIO_OUTPUT_LOW,
3378 gpio_port); 3357 gpio_port);
3379 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 3358 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
3380 MISC_REGISTERS_GPIO_OUTPUT_LOW, 3359 MISC_REGISTERS_GPIO_OUTPUT_LOW,
3381 gpio_port); 3360 gpio_port);
3382 DP(NETIF_MSG_LINK, "reset external PHY\n"); 3361 DP(NETIF_MSG_LINK, "reset external PHY\n");
3383} 3362}
3384 3363
@@ -3409,9 +3388,8 @@ static u8 bnx2x_update_link_down(struct link_params *params,
3409 3388
3410 /* reset BigMac */ 3389 /* reset BigMac */
3411 bnx2x_bmac_rx_disable(bp, params->port); 3390 bnx2x_bmac_rx_disable(bp, params->port);
3412 REG_WR(bp, GRCBASE_MISC + 3391 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
3413 MISC_REGISTERS_RESET_REG_2_CLEAR, 3392 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
3414 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
3415 return 0; 3393 return 0;
3416} 3394}
3417 3395
@@ -3462,7 +3440,7 @@ static u8 bnx2x_update_link_up(struct link_params *params,
3462 msleep(20); 3440 msleep(20);
3463 return rc; 3441 return rc;
3464} 3442}
3465/** 3443/*
3466 * The bnx2x_link_update function should be called upon link 3444 * The bnx2x_link_update function should be called upon link
3467 * interrupt. 3445 * interrupt.
3468 * Link is considered up as follows: 3446 * Link is considered up as follows:
@@ -3501,12 +3479,11 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3501 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4)); 3479 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
3502 3480
3503 is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + 3481 is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
3504 port*0x18) > 0); 3482 port*0x18) > 0);
3505 DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n", 3483 DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
3506 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), 3484 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
3507 is_mi_int, 3485 is_mi_int,
3508 REG_RD(bp, 3486 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
3509 NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
3510 3487
3511 DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n", 3488 DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
3512 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), 3489 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
@@ -3515,14 +3492,14 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3515 /* disable emac */ 3492 /* disable emac */
3516 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); 3493 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
3517 3494
3518 /** 3495 /*
3519 * Step 1: 3496 * Step 1:
3520 * Check external link change only for external phys, and apply 3497 * Check external link change only for external phys, and apply
3521 * priority selection between them in case the link on both phys 3498 * priority selection between them in case the link on both phys
3522 * is up. Note that the instead of the common vars, a temporary 3499 * is up. Note that the instead of the common vars, a temporary
3523 * vars argument is used since each phy may have different link/ 3500 * vars argument is used since each phy may have different link/
3524 * speed/duplex result 3501 * speed/duplex result
3525 */ 3502 */
3526 for (phy_index = EXT_PHY1; phy_index < params->num_phys; 3503 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
3527 phy_index++) { 3504 phy_index++) {
3528 struct bnx2x_phy *phy = &params->phy[phy_index]; 3505 struct bnx2x_phy *phy = &params->phy[phy_index];
@@ -3547,22 +3524,22 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3547 switch (bnx2x_phy_selection(params)) { 3524 switch (bnx2x_phy_selection(params)) {
3548 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: 3525 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3549 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: 3526 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3550 /** 3527 /*
3551 * In this option, the first PHY makes sure to pass the 3528 * In this option, the first PHY makes sure to pass the
3552 * traffic through itself only. 3529 * traffic through itself only.
3553 * Its not clear how to reset the link on the second phy 3530 * Its not clear how to reset the link on the second phy
3554 **/ 3531 */
3555 active_external_phy = EXT_PHY1; 3532 active_external_phy = EXT_PHY1;
3556 break; 3533 break;
3557 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: 3534 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3558 /** 3535 /*
3559 * In this option, the first PHY makes sure to pass the 3536 * In this option, the first PHY makes sure to pass the
3560 * traffic through the second PHY. 3537 * traffic through the second PHY.
3561 **/ 3538 */
3562 active_external_phy = EXT_PHY2; 3539 active_external_phy = EXT_PHY2;
3563 break; 3540 break;
3564 default: 3541 default:
3565 /** 3542 /*
3566 * Link indication on both PHYs with the following cases 3543 * Link indication on both PHYs with the following cases
3567 * is invalid: 3544 * is invalid:
3568 * - FIRST_PHY means that second phy wasn't initialized, 3545 * - FIRST_PHY means that second phy wasn't initialized,
@@ -3570,7 +3547,7 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3570 * - SECOND_PHY means that first phy should not be able 3547 * - SECOND_PHY means that first phy should not be able
3571 * to link up by itself (using configuration) 3548 * to link up by itself (using configuration)
3572 * - DEFAULT should be overriden during initialiazation 3549 * - DEFAULT should be overriden during initialiazation
3573 **/ 3550 */
3574 DP(NETIF_MSG_LINK, "Invalid link indication" 3551 DP(NETIF_MSG_LINK, "Invalid link indication"
3575 "mpc=0x%x. DISABLING LINK !!!\n", 3552 "mpc=0x%x. DISABLING LINK !!!\n",
3576 params->multi_phy_config); 3553 params->multi_phy_config);
@@ -3580,18 +3557,18 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3580 } 3557 }
3581 } 3558 }
3582 prev_line_speed = vars->line_speed; 3559 prev_line_speed = vars->line_speed;
3583 /** 3560 /*
3584 * Step 2: 3561 * Step 2:
3585 * Read the status of the internal phy. In case of 3562 * Read the status of the internal phy. In case of
3586 * DIRECT_SINGLE_MEDIA board, this link is the external link, 3563 * DIRECT_SINGLE_MEDIA board, this link is the external link,
3587 * otherwise this is the link between the 577xx and the first 3564 * otherwise this is the link between the 577xx and the first
3588 * external phy 3565 * external phy
3589 */ 3566 */
3590 if (params->phy[INT_PHY].read_status) 3567 if (params->phy[INT_PHY].read_status)
3591 params->phy[INT_PHY].read_status( 3568 params->phy[INT_PHY].read_status(
3592 &params->phy[INT_PHY], 3569 &params->phy[INT_PHY],
3593 params, vars); 3570 params, vars);
3594 /** 3571 /*
3595 * The INT_PHY flow control reside in the vars. This include the 3572 * The INT_PHY flow control reside in the vars. This include the
3596 * case where the speed or flow control are not set to AUTO. 3573 * case where the speed or flow control are not set to AUTO.
3597 * Otherwise, the active external phy flow control result is set 3574 * Otherwise, the active external phy flow control result is set
@@ -3601,13 +3578,13 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3601 */ 3578 */
3602 if (active_external_phy > INT_PHY) { 3579 if (active_external_phy > INT_PHY) {
3603 vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl; 3580 vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl;
3604 /** 3581 /*
3605 * Link speed is taken from the XGXS. AN and FC result from 3582 * Link speed is taken from the XGXS. AN and FC result from
3606 * the external phy. 3583 * the external phy.
3607 */ 3584 */
3608 vars->link_status |= phy_vars[active_external_phy].link_status; 3585 vars->link_status |= phy_vars[active_external_phy].link_status;
3609 3586
3610 /** 3587 /*
3611 * if active_external_phy is first PHY and link is up - disable 3588 * if active_external_phy is first PHY and link is up - disable
3612 * disable TX on second external PHY 3589 * disable TX on second external PHY
3613 */ 3590 */
@@ -3643,7 +3620,7 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3643 DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x," 3620 DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
3644 " ext_phy_line_speed = %d\n", vars->flow_ctrl, 3621 " ext_phy_line_speed = %d\n", vars->flow_ctrl,
3645 vars->link_status, ext_phy_line_speed); 3622 vars->link_status, ext_phy_line_speed);
3646 /** 3623 /*
3647 * Upon link speed change set the NIG into drain mode. Comes to 3624 * Upon link speed change set the NIG into drain mode. Comes to
3648 * deals with possible FIFO glitch due to clk change when speed 3625 * deals with possible FIFO glitch due to clk change when speed
3649 * is decreased without link down indicator 3626 * is decreased without link down indicator
@@ -3658,8 +3635,8 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3658 ext_phy_line_speed); 3635 ext_phy_line_speed);
3659 vars->phy_link_up = 0; 3636 vars->phy_link_up = 0;
3660 } else if (prev_line_speed != vars->line_speed) { 3637 } else if (prev_line_speed != vars->line_speed) {
3661 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE 3638 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4,
3662 + params->port*4, 0); 3639 0);
3663 msleep(1); 3640 msleep(1);
3664 } 3641 }
3665 } 3642 }
@@ -3674,14 +3651,14 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3674 3651
3675 bnx2x_link_int_ack(params, vars, link_10g); 3652 bnx2x_link_int_ack(params, vars, link_10g);
3676 3653
3677 /** 3654 /*
3678 * In case external phy link is up, and internal link is down 3655 * In case external phy link is up, and internal link is down
3679 * (not initialized yet probably after link initialization, it 3656 * (not initialized yet probably after link initialization, it
3680 * needs to be initialized. 3657 * needs to be initialized.
3681 * Note that after link down-up as result of cable plug, the xgxs 3658 * Note that after link down-up as result of cable plug, the xgxs
3682 * link would probably become up again without the need 3659 * link would probably become up again without the need
3683 * initialize it 3660 * initialize it
3684 */ 3661 */
3685 if (!(SINGLE_MEDIA_DIRECT(params))) { 3662 if (!(SINGLE_MEDIA_DIRECT(params))) {
3686 DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d," 3663 DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d,"
3687 " init_preceding = %d\n", ext_phy_link_up, 3664 " init_preceding = %d\n", ext_phy_link_up,
@@ -3701,9 +3678,9 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3701 vars); 3678 vars);
3702 } 3679 }
3703 } 3680 }
3704 /** 3681 /*
3705 * Link is up only if both local phy and external phy (in case of 3682 * Link is up only if both local phy and external phy (in case of
3706 * non-direct board) are up 3683 * non-direct board) are up
3707 */ 3684 */
3708 vars->link_up = (vars->phy_link_up && 3685 vars->link_up = (vars->phy_link_up &&
3709 (ext_phy_link_up || 3686 (ext_phy_link_up ||
@@ -3724,10 +3701,10 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3724void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port) 3701void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
3725{ 3702{
3726 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 3703 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3727 MISC_REGISTERS_GPIO_OUTPUT_LOW, port); 3704 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3728 msleep(1); 3705 msleep(1);
3729 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 3706 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3730 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); 3707 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
3731} 3708}
3732 3709
3733static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port, 3710static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port,
@@ -3747,9 +3724,9 @@ static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp,
3747 u16 fw_ver1, fw_ver2; 3724 u16 fw_ver1, fw_ver2;
3748 3725
3749 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 3726 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
3750 MDIO_PMA_REG_ROM_VER1, &fw_ver1); 3727 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
3751 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 3728 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
3752 MDIO_PMA_REG_ROM_VER2, &fw_ver2); 3729 MDIO_PMA_REG_ROM_VER2, &fw_ver2);
3753 bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2), 3730 bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2),
3754 phy->ver_addr); 3731 phy->ver_addr);
3755} 3732}
@@ -3770,7 +3747,7 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params,
3770 if ((vars->ieee_fc & 3747 if ((vars->ieee_fc &
3771 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == 3748 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
3772 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { 3749 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
3773 val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; 3750 val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
3774 } 3751 }
3775 if ((vars->ieee_fc & 3752 if ((vars->ieee_fc &
3776 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == 3753 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
@@ -3801,11 +3778,11 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
3801 else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { 3778 else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
3802 ret = 1; 3779 ret = 1;
3803 bnx2x_cl45_read(bp, phy, 3780 bnx2x_cl45_read(bp, phy,
3804 MDIO_AN_DEVAD, 3781 MDIO_AN_DEVAD,
3805 MDIO_AN_REG_ADV_PAUSE, &ld_pause); 3782 MDIO_AN_REG_ADV_PAUSE, &ld_pause);
3806 bnx2x_cl45_read(bp, phy, 3783 bnx2x_cl45_read(bp, phy,
3807 MDIO_AN_DEVAD, 3784 MDIO_AN_DEVAD,
3808 MDIO_AN_REG_LP_AUTO_NEG, &lp_pause); 3785 MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
3809 pause_result = (ld_pause & 3786 pause_result = (ld_pause &
3810 MDIO_AN_REG_ADV_PAUSE_MASK) >> 8; 3787 MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
3811 pause_result |= (lp_pause & 3788 pause_result |= (lp_pause &
@@ -3881,31 +3858,31 @@ static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
3881 /* Boot port from external ROM */ 3858 /* Boot port from external ROM */
3882 /* EDC grst */ 3859 /* EDC grst */
3883 bnx2x_cl45_write(bp, phy, 3860 bnx2x_cl45_write(bp, phy,
3884 MDIO_PMA_DEVAD, 3861 MDIO_PMA_DEVAD,
3885 MDIO_PMA_REG_GEN_CTRL, 3862 MDIO_PMA_REG_GEN_CTRL,
3886 0x0001); 3863 0x0001);
3887 3864
3888 /* ucode reboot and rst */ 3865 /* ucode reboot and rst */
3889 bnx2x_cl45_write(bp, phy, 3866 bnx2x_cl45_write(bp, phy,
3890 MDIO_PMA_DEVAD, 3867 MDIO_PMA_DEVAD,
3891 MDIO_PMA_REG_GEN_CTRL, 3868 MDIO_PMA_REG_GEN_CTRL,
3892 0x008c); 3869 0x008c);
3893 3870
3894 bnx2x_cl45_write(bp, phy, 3871 bnx2x_cl45_write(bp, phy,
3895 MDIO_PMA_DEVAD, 3872 MDIO_PMA_DEVAD,
3896 MDIO_PMA_REG_MISC_CTRL1, 0x0001); 3873 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
3897 3874
3898 /* Reset internal microprocessor */ 3875 /* Reset internal microprocessor */
3899 bnx2x_cl45_write(bp, phy, 3876 bnx2x_cl45_write(bp, phy,
3900 MDIO_PMA_DEVAD, 3877 MDIO_PMA_DEVAD,
3901 MDIO_PMA_REG_GEN_CTRL, 3878 MDIO_PMA_REG_GEN_CTRL,
3902 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); 3879 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
3903 3880
3904 /* Release srst bit */ 3881 /* Release srst bit */
3905 bnx2x_cl45_write(bp, phy, 3882 bnx2x_cl45_write(bp, phy,
3906 MDIO_PMA_DEVAD, 3883 MDIO_PMA_DEVAD,
3907 MDIO_PMA_REG_GEN_CTRL, 3884 MDIO_PMA_REG_GEN_CTRL,
3908 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); 3885 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
3909 3886
3910 /* Delay 100ms per the PHY specifications */ 3887 /* Delay 100ms per the PHY specifications */
3911 msleep(100); 3888 msleep(100);
@@ -3936,8 +3913,8 @@ static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
3936 3913
3937 /* Clear ser_boot_ctl bit */ 3914 /* Clear ser_boot_ctl bit */
3938 bnx2x_cl45_write(bp, phy, 3915 bnx2x_cl45_write(bp, phy,
3939 MDIO_PMA_DEVAD, 3916 MDIO_PMA_DEVAD,
3940 MDIO_PMA_REG_MISC_CTRL1, 0x0000); 3917 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
3941 bnx2x_save_bcm_spirom_ver(bp, phy, port); 3918 bnx2x_save_bcm_spirom_ver(bp, phy, port);
3942 3919
3943 DP(NETIF_MSG_LINK, 3920 DP(NETIF_MSG_LINK,
@@ -3958,8 +3935,8 @@ static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
3958 3935
3959 /* Read 8073 HW revision*/ 3936 /* Read 8073 HW revision*/
3960 bnx2x_cl45_read(bp, phy, 3937 bnx2x_cl45_read(bp, phy,
3961 MDIO_PMA_DEVAD, 3938 MDIO_PMA_DEVAD,
3962 MDIO_PMA_REG_8073_CHIP_REV, &val); 3939 MDIO_PMA_REG_8073_CHIP_REV, &val);
3963 3940
3964 if (val != 1) { 3941 if (val != 1) {
3965 /* No need to workaround in 8073 A1 */ 3942 /* No need to workaround in 8073 A1 */
@@ -3967,8 +3944,8 @@ static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
3967 } 3944 }
3968 3945
3969 bnx2x_cl45_read(bp, phy, 3946 bnx2x_cl45_read(bp, phy,
3970 MDIO_PMA_DEVAD, 3947 MDIO_PMA_DEVAD,
3971 MDIO_PMA_REG_ROM_VER2, &val); 3948 MDIO_PMA_REG_ROM_VER2, &val);
3972 3949
3973 /* SNR should be applied only for version 0x102 */ 3950 /* SNR should be applied only for version 0x102 */
3974 if (val != 0x102) 3951 if (val != 0x102)
@@ -3982,8 +3959,8 @@ static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
3982 u16 val, cnt, cnt1 ; 3959 u16 val, cnt, cnt1 ;
3983 3960
3984 bnx2x_cl45_read(bp, phy, 3961 bnx2x_cl45_read(bp, phy,
3985 MDIO_PMA_DEVAD, 3962 MDIO_PMA_DEVAD,
3986 MDIO_PMA_REG_8073_CHIP_REV, &val); 3963 MDIO_PMA_REG_8073_CHIP_REV, &val);
3987 3964
3988 if (val > 0) { 3965 if (val > 0) {
3989 /* No need to workaround in 8073 A1 */ 3966 /* No need to workaround in 8073 A1 */
@@ -3991,26 +3968,32 @@ static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
3991 } 3968 }
3992 /* XAUI workaround in 8073 A0: */ 3969 /* XAUI workaround in 8073 A0: */
3993 3970
3994 /* After loading the boot ROM and restarting Autoneg, 3971 /*
3995 poll Dev1, Reg $C820: */ 3972 * After loading the boot ROM and restarting Autoneg, poll
3973 * Dev1, Reg $C820:
3974 */
3996 3975
3997 for (cnt = 0; cnt < 1000; cnt++) { 3976 for (cnt = 0; cnt < 1000; cnt++) {
3998 bnx2x_cl45_read(bp, phy, 3977 bnx2x_cl45_read(bp, phy,
3999 MDIO_PMA_DEVAD, 3978 MDIO_PMA_DEVAD,
4000 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, 3979 MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
4001 &val); 3980 &val);
4002 /* If bit [14] = 0 or bit [13] = 0, continue on with 3981 /*
4003 system initialization (XAUI work-around not required, 3982 * If bit [14] = 0 or bit [13] = 0, continue on with
4004 as these bits indicate 2.5G or 1G link up). */ 3983 * system initialization (XAUI work-around not required, as
3984 * these bits indicate 2.5G or 1G link up).
3985 */
4005 if (!(val & (1<<14)) || !(val & (1<<13))) { 3986 if (!(val & (1<<14)) || !(val & (1<<13))) {
4006 DP(NETIF_MSG_LINK, "XAUI work-around not required\n"); 3987 DP(NETIF_MSG_LINK, "XAUI work-around not required\n");
4007 return 0; 3988 return 0;
4008 } else if (!(val & (1<<15))) { 3989 } else if (!(val & (1<<15))) {
4009 DP(NETIF_MSG_LINK, "clc bit 15 went off\n"); 3990 DP(NETIF_MSG_LINK, "bit 15 went off\n");
4010 /* If bit 15 is 0, then poll Dev1, Reg $C841 until 3991 /*
4011 it's MSB (bit 15) goes to 1 (indicating that the 3992 * If bit 15 is 0, then poll Dev1, Reg $C841 until it's
4012 XAUI workaround has completed), 3993 * MSB (bit15) goes to 1 (indicating that the XAUI
4013 then continue on with system initialization.*/ 3994 * workaround has completed), then continue on with
3995 * system initialization.
3996 */
4014 for (cnt1 = 0; cnt1 < 1000; cnt1++) { 3997 for (cnt1 = 0; cnt1 < 1000; cnt1++) {
4015 bnx2x_cl45_read(bp, phy, 3998 bnx2x_cl45_read(bp, phy,
4016 MDIO_PMA_DEVAD, 3999 MDIO_PMA_DEVAD,
@@ -4093,10 +4076,10 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
4093 gpio_port = params->port; 4076 gpio_port = params->port;
4094 /* Restore normal power mode*/ 4077 /* Restore normal power mode*/
4095 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 4078 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
4096 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); 4079 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
4097 4080
4098 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 4081 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
4099 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); 4082 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
4100 4083
4101 /* enable LASI */ 4084 /* enable LASI */
4102 bnx2x_cl45_write(bp, phy, 4085 bnx2x_cl45_write(bp, phy,
@@ -4114,10 +4097,6 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
4114 4097
4115 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1); 4098 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
4116 4099
4117 /**
4118 * If this is forced speed, set to KR or KX (all other are not
4119 * supported)
4120 */
4121 /* Swap polarity if required - Must be done only in non-1G mode */ 4100 /* Swap polarity if required - Must be done only in non-1G mode */
4122 if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) { 4101 if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
4123 /* Configure the 8073 to swap _P and _N of the KR lines */ 4102 /* Configure the 8073 to swap _P and _N of the KR lines */
@@ -4160,8 +4139,10 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
4160 val = (1<<7); 4139 val = (1<<7);
4161 } else if (phy->req_line_speed == SPEED_2500) { 4140 } else if (phy->req_line_speed == SPEED_2500) {
4162 val = (1<<5); 4141 val = (1<<5);
4163 /* Note that 2.5G works only 4142 /*
4164 when used with 1G advertisment */ 4143 * Note that 2.5G works only when used with 1G
4144 * advertisment
4145 */
4165 } else 4146 } else
4166 val = (1<<5); 4147 val = (1<<5);
4167 } else { 4148 } else {
@@ -4170,8 +4151,7 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
4170 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 4151 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
4171 val |= (1<<7); 4152 val |= (1<<7);
4172 4153
4173 /* Note that 2.5G works only when 4154 /* Note that 2.5G works only when used with 1G advertisment */
4174 used with 1G advertisment */
4175 if (phy->speed_cap_mask & 4155 if (phy->speed_cap_mask &
4176 (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G | 4156 (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
4177 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) 4157 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
@@ -4211,9 +4191,11 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
4211 /* Add support for CL37 (passive mode) III */ 4191 /* Add support for CL37 (passive mode) III */
4212 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); 4192 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
4213 4193
4214 /* The SNR will improve about 2db by changing 4194 /*
4215 BW and FEE main tap. Rest commands are executed 4195 * The SNR will improve about 2db by changing BW and FEE main
4216 after link is up*/ 4196 * tap. Rest commands are executed after link is up
4197 * Change FFE main cursor to 5 in EDC register
4198 */
4217 if (bnx2x_8073_is_snr_needed(bp, phy)) 4199 if (bnx2x_8073_is_snr_needed(bp, phy))
4218 bnx2x_cl45_write(bp, phy, 4200 bnx2x_cl45_write(bp, phy,
4219 MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN, 4201 MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN,
@@ -4297,12 +4279,11 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
4297 4279
4298 link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1))); 4280 link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1)));
4299 if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) { 4281 if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) {
4300 /* The SNR will improve about 2dbby 4282 /*
4301 changing the BW and FEE main tap.*/ 4283 * The SNR will improve about 2dbby changing the BW and FEE main
4302 /* The 1st write to change FFE main 4284 * tap. The 1st write to change FFE main tap is set before
4303 tap is set before restart AN */ 4285 * restart AN. Change PLL Bandwidth in EDC register
4304 /* Change PLL Bandwidth in EDC 4286 */
4305 register */
4306 bnx2x_cl45_write(bp, phy, 4287 bnx2x_cl45_write(bp, phy,
4307 MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH, 4288 MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH,
4308 0x26BC); 4289 0x26BC);
@@ -4346,10 +4327,10 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
4346 bnx2x_cl45_read(bp, phy, 4327 bnx2x_cl45_read(bp, phy,
4347 MDIO_XS_DEVAD, 4328 MDIO_XS_DEVAD,
4348 MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1); 4329 MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
4349 /** 4330 /*
4350 * Set bit 3 to invert Rx in 1G mode and clear this bit 4331 * Set bit 3 to invert Rx in 1G mode and clear this bit
4351 * when it`s in 10G mode. 4332 * when it`s in 10G mode.
4352 */ 4333 */
4353 if (vars->line_speed == SPEED_1000) { 4334 if (vars->line_speed == SPEED_1000) {
4354 DP(NETIF_MSG_LINK, "Swapping 1G polarity for" 4335 DP(NETIF_MSG_LINK, "Swapping 1G polarity for"
4355 "the 8073\n"); 4336 "the 8073\n");
@@ -4381,8 +4362,8 @@ static void bnx2x_8073_link_reset(struct bnx2x_phy *phy,
4381 DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n", 4362 DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n",
4382 gpio_port); 4363 gpio_port);
4383 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 4364 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
4384 MISC_REGISTERS_GPIO_OUTPUT_LOW, 4365 MISC_REGISTERS_GPIO_OUTPUT_LOW,
4385 gpio_port); 4366 gpio_port);
4386} 4367}
4387 4368
4388/******************************************************************/ 4369/******************************************************************/
@@ -4396,11 +4377,11 @@ static u8 bnx2x_8705_config_init(struct bnx2x_phy *phy,
4396 DP(NETIF_MSG_LINK, "init 8705\n"); 4377 DP(NETIF_MSG_LINK, "init 8705\n");
4397 /* Restore normal power mode*/ 4378 /* Restore normal power mode*/
4398 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 4379 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
4399 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); 4380 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
4400 /* HW reset */ 4381 /* HW reset */
4401 bnx2x_ext_phy_hw_reset(bp, params->port); 4382 bnx2x_ext_phy_hw_reset(bp, params->port);
4402 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040); 4383 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
4403 bnx2x_wait_reset_complete(bp, phy); 4384 bnx2x_wait_reset_complete(bp, phy, params);
4404 4385
4405 bnx2x_cl45_write(bp, phy, 4386 bnx2x_cl45_write(bp, phy,
4406 MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288); 4387 MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288);
@@ -4451,35 +4432,79 @@ static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy,
4451/******************************************************************/ 4432/******************************************************************/
4452/* SFP+ module Section */ 4433/* SFP+ module Section */
4453/******************************************************************/ 4434/******************************************************************/
4454static void bnx2x_sfp_set_transmitter(struct bnx2x *bp, 4435static u8 bnx2x_get_gpio_port(struct link_params *params)
4436{
4437 u8 gpio_port;
4438 u32 swap_val, swap_override;
4439 struct bnx2x *bp = params->bp;
4440 if (CHIP_IS_E2(bp))
4441 gpio_port = BP_PATH(bp);
4442 else
4443 gpio_port = params->port;
4444 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
4445 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
4446 return gpio_port ^ (swap_val && swap_override);
4447}
4448static void bnx2x_sfp_set_transmitter(struct link_params *params,
4455 struct bnx2x_phy *phy, 4449 struct bnx2x_phy *phy,
4456 u8 port,
4457 u8 tx_en) 4450 u8 tx_en)
4458{ 4451{
4459 u16 val; 4452 u16 val;
4453 u8 port = params->port;
4454 struct bnx2x *bp = params->bp;
4455 u32 tx_en_mode;
4460 4456
4461 DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x\n",
4462 tx_en, port);
4463 /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/ 4457 /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/
4464 bnx2x_cl45_read(bp, phy, 4458 tx_en_mode = REG_RD(bp, params->shmem_base +
4465 MDIO_PMA_DEVAD, 4459 offsetof(struct shmem_region,
4466 MDIO_PMA_REG_PHY_IDENTIFIER, 4460 dev_info.port_hw_config[port].sfp_ctrl)) &
4467 &val); 4461 PORT_HW_CFG_TX_LASER_MASK;
4462 DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x "
4463 "mode = %x\n", tx_en, port, tx_en_mode);
4464 switch (tx_en_mode) {
4465 case PORT_HW_CFG_TX_LASER_MDIO:
4468 4466
4469 if (tx_en) 4467 bnx2x_cl45_read(bp, phy,
4470 val &= ~(1<<15); 4468 MDIO_PMA_DEVAD,
4471 else 4469 MDIO_PMA_REG_PHY_IDENTIFIER,
4472 val |= (1<<15); 4470 &val);
4473 4471
4474 bnx2x_cl45_write(bp, phy, 4472 if (tx_en)
4475 MDIO_PMA_DEVAD, 4473 val &= ~(1<<15);
4476 MDIO_PMA_REG_PHY_IDENTIFIER, 4474 else
4477 val); 4475 val |= (1<<15);
4476
4477 bnx2x_cl45_write(bp, phy,
4478 MDIO_PMA_DEVAD,
4479 MDIO_PMA_REG_PHY_IDENTIFIER,
4480 val);
4481 break;
4482 case PORT_HW_CFG_TX_LASER_GPIO0:
4483 case PORT_HW_CFG_TX_LASER_GPIO1:
4484 case PORT_HW_CFG_TX_LASER_GPIO2:
4485 case PORT_HW_CFG_TX_LASER_GPIO3:
4486 {
4487 u16 gpio_pin;
4488 u8 gpio_port, gpio_mode;
4489 if (tx_en)
4490 gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_HIGH;
4491 else
4492 gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_LOW;
4493
4494 gpio_pin = tx_en_mode - PORT_HW_CFG_TX_LASER_GPIO0;
4495 gpio_port = bnx2x_get_gpio_port(params);
4496 bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
4497 break;
4498 }
4499 default:
4500 DP(NETIF_MSG_LINK, "Invalid TX_LASER_MDIO 0x%x\n", tx_en_mode);
4501 break;
4502 }
4478} 4503}
4479 4504
4480static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, 4505static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4481 struct link_params *params, 4506 struct link_params *params,
4482 u16 addr, u8 byte_cnt, u8 *o_buf) 4507 u16 addr, u8 byte_cnt, u8 *o_buf)
4483{ 4508{
4484 struct bnx2x *bp = params->bp; 4509 struct bnx2x *bp = params->bp;
4485 u16 val = 0; 4510 u16 val = 0;
@@ -4492,23 +4517,23 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4492 /* Set the read command byte count */ 4517 /* Set the read command byte count */
4493 bnx2x_cl45_write(bp, phy, 4518 bnx2x_cl45_write(bp, phy,
4494 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, 4519 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
4495 (byte_cnt | 0xa000)); 4520 (byte_cnt | 0xa000));
4496 4521
4497 /* Set the read command address */ 4522 /* Set the read command address */
4498 bnx2x_cl45_write(bp, phy, 4523 bnx2x_cl45_write(bp, phy,
4499 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR, 4524 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
4500 addr); 4525 addr);
4501 4526
4502 /* Activate read command */ 4527 /* Activate read command */
4503 bnx2x_cl45_write(bp, phy, 4528 bnx2x_cl45_write(bp, phy,
4504 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, 4529 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
4505 0x2c0f); 4530 0x2c0f);
4506 4531
4507 /* Wait up to 500us for command complete status */ 4532 /* Wait up to 500us for command complete status */
4508 for (i = 0; i < 100; i++) { 4533 for (i = 0; i < 100; i++) {
4509 bnx2x_cl45_read(bp, phy, 4534 bnx2x_cl45_read(bp, phy,
4510 MDIO_PMA_DEVAD, 4535 MDIO_PMA_DEVAD,
4511 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 4536 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
4512 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 4537 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
4513 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) 4538 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
4514 break; 4539 break;
@@ -4526,15 +4551,15 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4526 /* Read the buffer */ 4551 /* Read the buffer */
4527 for (i = 0; i < byte_cnt; i++) { 4552 for (i = 0; i < byte_cnt; i++) {
4528 bnx2x_cl45_read(bp, phy, 4553 bnx2x_cl45_read(bp, phy,
4529 MDIO_PMA_DEVAD, 4554 MDIO_PMA_DEVAD,
4530 MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val); 4555 MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
4531 o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK); 4556 o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK);
4532 } 4557 }
4533 4558
4534 for (i = 0; i < 100; i++) { 4559 for (i = 0; i < 100; i++) {
4535 bnx2x_cl45_read(bp, phy, 4560 bnx2x_cl45_read(bp, phy,
4536 MDIO_PMA_DEVAD, 4561 MDIO_PMA_DEVAD,
4537 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 4562 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
4538 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 4563 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
4539 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) 4564 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
4540 return 0; 4565 return 0;
@@ -4545,7 +4570,7 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4545 4570
4546static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, 4571static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4547 struct link_params *params, 4572 struct link_params *params,
4548 u16 addr, u8 byte_cnt, u8 *o_buf) 4573 u16 addr, u8 byte_cnt, u8 *o_buf)
4549{ 4574{
4550 struct bnx2x *bp = params->bp; 4575 struct bnx2x *bp = params->bp;
4551 u16 val, i; 4576 u16 val, i;
@@ -4558,41 +4583,43 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4558 4583
4559 /* Need to read from 1.8000 to clear it */ 4584 /* Need to read from 1.8000 to clear it */
4560 bnx2x_cl45_read(bp, phy, 4585 bnx2x_cl45_read(bp, phy,
4561 MDIO_PMA_DEVAD, 4586 MDIO_PMA_DEVAD,
4562 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, 4587 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
4563 &val); 4588 &val);
4564 4589
4565 /* Set the read command byte count */ 4590 /* Set the read command byte count */
4566 bnx2x_cl45_write(bp, phy, 4591 bnx2x_cl45_write(bp, phy,
4567 MDIO_PMA_DEVAD, 4592 MDIO_PMA_DEVAD,
4568 MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, 4593 MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
4569 ((byte_cnt < 2) ? 2 : byte_cnt)); 4594 ((byte_cnt < 2) ? 2 : byte_cnt));
4570 4595
4571 /* Set the read command address */ 4596 /* Set the read command address */
4572 bnx2x_cl45_write(bp, phy, 4597 bnx2x_cl45_write(bp, phy,
4573 MDIO_PMA_DEVAD, 4598 MDIO_PMA_DEVAD,
4574 MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR, 4599 MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
4575 addr); 4600 addr);
4576 /* Set the destination address */ 4601 /* Set the destination address */
4577 bnx2x_cl45_write(bp, phy, 4602 bnx2x_cl45_write(bp, phy,
4578 MDIO_PMA_DEVAD, 4603 MDIO_PMA_DEVAD,
4579 0x8004, 4604 0x8004,
4580 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF); 4605 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
4581 4606
4582 /* Activate read command */ 4607 /* Activate read command */
4583 bnx2x_cl45_write(bp, phy, 4608 bnx2x_cl45_write(bp, phy,
4584 MDIO_PMA_DEVAD, 4609 MDIO_PMA_DEVAD,
4585 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, 4610 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
4586 0x8002); 4611 0x8002);
4587 /* Wait appropriate time for two-wire command to finish before 4612 /*
4588 polling the status register */ 4613 * Wait appropriate time for two-wire command to finish before
4614 * polling the status register
4615 */
4589 msleep(1); 4616 msleep(1);
4590 4617
4591 /* Wait up to 500us for command complete status */ 4618 /* Wait up to 500us for command complete status */
4592 for (i = 0; i < 100; i++) { 4619 for (i = 0; i < 100; i++) {
4593 bnx2x_cl45_read(bp, phy, 4620 bnx2x_cl45_read(bp, phy,
4594 MDIO_PMA_DEVAD, 4621 MDIO_PMA_DEVAD,
4595 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 4622 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
4596 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 4623 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
4597 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) 4624 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
4598 break; 4625 break;
@@ -4604,21 +4631,21 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4604 DP(NETIF_MSG_LINK, 4631 DP(NETIF_MSG_LINK,
4605 "Got bad status 0x%x when reading from SFP+ EEPROM\n", 4632 "Got bad status 0x%x when reading from SFP+ EEPROM\n",
4606 (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK)); 4633 (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
4607 return -EINVAL; 4634 return -EFAULT;
4608 } 4635 }
4609 4636
4610 /* Read the buffer */ 4637 /* Read the buffer */
4611 for (i = 0; i < byte_cnt; i++) { 4638 for (i = 0; i < byte_cnt; i++) {
4612 bnx2x_cl45_read(bp, phy, 4639 bnx2x_cl45_read(bp, phy,
4613 MDIO_PMA_DEVAD, 4640 MDIO_PMA_DEVAD,
4614 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val); 4641 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
4615 o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK); 4642 o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK);
4616 } 4643 }
4617 4644
4618 for (i = 0; i < 100; i++) { 4645 for (i = 0; i < 100; i++) {
4619 bnx2x_cl45_read(bp, phy, 4646 bnx2x_cl45_read(bp, phy,
4620 MDIO_PMA_DEVAD, 4647 MDIO_PMA_DEVAD,
4621 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 4648 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
4622 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 4649 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
4623 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) 4650 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
4624 return 0; 4651 return 0;
@@ -4628,22 +4655,22 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4628 return -EINVAL; 4655 return -EINVAL;
4629} 4656}
4630 4657
4631static u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, 4658u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4632 struct link_params *params, u16 addr, 4659 struct link_params *params, u16 addr,
4633 u8 byte_cnt, u8 *o_buf) 4660 u8 byte_cnt, u8 *o_buf)
4634{ 4661{
4635 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) 4662 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
4636 return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr, 4663 return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
4637 byte_cnt, o_buf); 4664 byte_cnt, o_buf);
4638 else if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) 4665 else if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
4639 return bnx2x_8727_read_sfp_module_eeprom(phy, params, addr, 4666 return bnx2x_8727_read_sfp_module_eeprom(phy, params, addr,
4640 byte_cnt, o_buf); 4667 byte_cnt, o_buf);
4641 return -EINVAL; 4668 return -EINVAL;
4642} 4669}
4643 4670
4644static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy, 4671static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
4645 struct link_params *params, 4672 struct link_params *params,
4646 u16 *edc_mode) 4673 u16 *edc_mode)
4647{ 4674{
4648 struct bnx2x *bp = params->bp; 4675 struct bnx2x *bp = params->bp;
4649 u8 val, check_limiting_mode = 0; 4676 u8 val, check_limiting_mode = 0;
@@ -4664,8 +4691,10 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
4664 { 4691 {
4665 u8 copper_module_type; 4692 u8 copper_module_type;
4666 4693
4667 /* Check if its active cable( includes SFP+ module) 4694 /*
4668 of passive cable*/ 4695 * Check if its active cable (includes SFP+ module)
4696 * of passive cable
4697 */
4669 if (bnx2x_read_sfp_module_eeprom(phy, 4698 if (bnx2x_read_sfp_module_eeprom(phy,
4670 params, 4699 params,
4671 SFP_EEPROM_FC_TX_TECH_ADDR, 4700 SFP_EEPROM_FC_TX_TECH_ADDR,
@@ -4724,8 +4753,10 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
4724 DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode); 4753 DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
4725 return 0; 4754 return 0;
4726} 4755}
4727/* This function read the relevant field from the module ( SFP+ ), 4756/*
4728 and verify it is compliant with this board */ 4757 * This function read the relevant field from the module (SFP+), and verify it
4758 * is compliant with this board
4759 */
4729static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy, 4760static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
4730 struct link_params *params) 4761 struct link_params *params)
4731{ 4762{
@@ -4774,24 +4805,24 @@ static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
4774 /* format the warning message */ 4805 /* format the warning message */
4775 if (bnx2x_read_sfp_module_eeprom(phy, 4806 if (bnx2x_read_sfp_module_eeprom(phy,
4776 params, 4807 params,
4777 SFP_EEPROM_VENDOR_NAME_ADDR, 4808 SFP_EEPROM_VENDOR_NAME_ADDR,
4778 SFP_EEPROM_VENDOR_NAME_SIZE, 4809 SFP_EEPROM_VENDOR_NAME_SIZE,
4779 (u8 *)vendor_name)) 4810 (u8 *)vendor_name))
4780 vendor_name[0] = '\0'; 4811 vendor_name[0] = '\0';
4781 else 4812 else
4782 vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0'; 4813 vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
4783 if (bnx2x_read_sfp_module_eeprom(phy, 4814 if (bnx2x_read_sfp_module_eeprom(phy,
4784 params, 4815 params,
4785 SFP_EEPROM_PART_NO_ADDR, 4816 SFP_EEPROM_PART_NO_ADDR,
4786 SFP_EEPROM_PART_NO_SIZE, 4817 SFP_EEPROM_PART_NO_SIZE,
4787 (u8 *)vendor_pn)) 4818 (u8 *)vendor_pn))
4788 vendor_pn[0] = '\0'; 4819 vendor_pn[0] = '\0';
4789 else 4820 else
4790 vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0'; 4821 vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0';
4791 4822
4792 netdev_info(bp->dev, "Warning: Unqualified SFP+ module detected," 4823 netdev_err(bp->dev, "Warning: Unqualified SFP+ module detected,"
4793 " Port %d from %s part number %s\n", 4824 " Port %d from %s part number %s\n",
4794 params->port, vendor_name, vendor_pn); 4825 params->port, vendor_name, vendor_pn);
4795 phy->flags |= FLAGS_SFP_NOT_APPROVED; 4826 phy->flags |= FLAGS_SFP_NOT_APPROVED;
4796 return -EINVAL; 4827 return -EINVAL;
4797} 4828}
@@ -4803,8 +4834,11 @@ static u8 bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
4803 u8 val; 4834 u8 val;
4804 struct bnx2x *bp = params->bp; 4835 struct bnx2x *bp = params->bp;
4805 u16 timeout; 4836 u16 timeout;
4806 /* Initialization time after hot-plug may take up to 300ms for some 4837 /*
4807 phys type ( e.g. JDSU ) */ 4838 * Initialization time after hot-plug may take up to 300ms for
4839 * some phys type ( e.g. JDSU )
4840 */
4841
4808 for (timeout = 0; timeout < 60; timeout++) { 4842 for (timeout = 0; timeout < 60; timeout++) {
4809 if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val) 4843 if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val)
4810 == 0) { 4844 == 0) {
@@ -4823,16 +4857,14 @@ static void bnx2x_8727_power_module(struct bnx2x *bp,
4823 /* Make sure GPIOs are not using for LED mode */ 4857 /* Make sure GPIOs are not using for LED mode */
4824 u16 val; 4858 u16 val;
4825 /* 4859 /*
4826 * In the GPIO register, bit 4 is use to detemine if the GPIOs are 4860 * In the GPIO register, bit 4 is use to determine if the GPIOs are
4827 * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for 4861 * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
4828 * output 4862 * output
4829 * Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0 4863 * Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0
4830 * Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1 4864 * Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1
4831 * where the 1st bit is the over-current(only input), and 2nd bit is 4865 * where the 1st bit is the over-current(only input), and 2nd bit is
4832 * for power( only output ) 4866 * for power( only output )
4833 */ 4867 *
4834
4835 /*
4836 * In case of NOC feature is disabled and power is up, set GPIO control 4868 * In case of NOC feature is disabled and power is up, set GPIO control
4837 * as input to enable listening of over-current indication 4869 * as input to enable listening of over-current indication
4838 */ 4870 */
@@ -4861,15 +4893,14 @@ static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
4861 u16 cur_limiting_mode; 4893 u16 cur_limiting_mode;
4862 4894
4863 bnx2x_cl45_read(bp, phy, 4895 bnx2x_cl45_read(bp, phy,
4864 MDIO_PMA_DEVAD, 4896 MDIO_PMA_DEVAD,
4865 MDIO_PMA_REG_ROM_VER2, 4897 MDIO_PMA_REG_ROM_VER2,
4866 &cur_limiting_mode); 4898 &cur_limiting_mode);
4867 DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n", 4899 DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n",
4868 cur_limiting_mode); 4900 cur_limiting_mode);
4869 4901
4870 if (edc_mode == EDC_MODE_LIMITING) { 4902 if (edc_mode == EDC_MODE_LIMITING) {
4871 DP(NETIF_MSG_LINK, 4903 DP(NETIF_MSG_LINK, "Setting LIMITING MODE\n");
4872 "Setting LIMITING MODE\n");
4873 bnx2x_cl45_write(bp, phy, 4904 bnx2x_cl45_write(bp, phy,
4874 MDIO_PMA_DEVAD, 4905 MDIO_PMA_DEVAD,
4875 MDIO_PMA_REG_ROM_VER2, 4906 MDIO_PMA_REG_ROM_VER2,
@@ -4878,62 +4909,63 @@ static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
4878 4909
4879 DP(NETIF_MSG_LINK, "Setting LRM MODE\n"); 4910 DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
4880 4911
4881 /* Changing to LRM mode takes quite few seconds. 4912 /*
4882 So do it only if current mode is limiting 4913 * Changing to LRM mode takes quite few seconds. So do it only
4883 ( default is LRM )*/ 4914 * if current mode is limiting (default is LRM)
4915 */
4884 if (cur_limiting_mode != EDC_MODE_LIMITING) 4916 if (cur_limiting_mode != EDC_MODE_LIMITING)
4885 return 0; 4917 return 0;
4886 4918
4887 bnx2x_cl45_write(bp, phy, 4919 bnx2x_cl45_write(bp, phy,
4888 MDIO_PMA_DEVAD, 4920 MDIO_PMA_DEVAD,
4889 MDIO_PMA_REG_LRM_MODE, 4921 MDIO_PMA_REG_LRM_MODE,
4890 0); 4922 0);
4891 bnx2x_cl45_write(bp, phy, 4923 bnx2x_cl45_write(bp, phy,
4892 MDIO_PMA_DEVAD, 4924 MDIO_PMA_DEVAD,
4893 MDIO_PMA_REG_ROM_VER2, 4925 MDIO_PMA_REG_ROM_VER2,
4894 0x128); 4926 0x128);
4895 bnx2x_cl45_write(bp, phy, 4927 bnx2x_cl45_write(bp, phy,
4896 MDIO_PMA_DEVAD, 4928 MDIO_PMA_DEVAD,
4897 MDIO_PMA_REG_MISC_CTRL0, 4929 MDIO_PMA_REG_MISC_CTRL0,
4898 0x4008); 4930 0x4008);
4899 bnx2x_cl45_write(bp, phy, 4931 bnx2x_cl45_write(bp, phy,
4900 MDIO_PMA_DEVAD, 4932 MDIO_PMA_DEVAD,
4901 MDIO_PMA_REG_LRM_MODE, 4933 MDIO_PMA_REG_LRM_MODE,
4902 0xaaaa); 4934 0xaaaa);
4903 } 4935 }
4904 return 0; 4936 return 0;
4905} 4937}
4906 4938
4907static u8 bnx2x_8727_set_limiting_mode(struct bnx2x *bp, 4939static u8 bnx2x_8727_set_limiting_mode(struct bnx2x *bp,
4908 struct bnx2x_phy *phy, 4940 struct bnx2x_phy *phy,
4909 u16 edc_mode) 4941 u16 edc_mode)
4910{ 4942{
4911 u16 phy_identifier; 4943 u16 phy_identifier;
4912 u16 rom_ver2_val; 4944 u16 rom_ver2_val;
4913 bnx2x_cl45_read(bp, phy, 4945 bnx2x_cl45_read(bp, phy,
4914 MDIO_PMA_DEVAD, 4946 MDIO_PMA_DEVAD,
4915 MDIO_PMA_REG_PHY_IDENTIFIER, 4947 MDIO_PMA_REG_PHY_IDENTIFIER,
4916 &phy_identifier); 4948 &phy_identifier);
4917 4949
4918 bnx2x_cl45_write(bp, phy, 4950 bnx2x_cl45_write(bp, phy,
4919 MDIO_PMA_DEVAD, 4951 MDIO_PMA_DEVAD,
4920 MDIO_PMA_REG_PHY_IDENTIFIER, 4952 MDIO_PMA_REG_PHY_IDENTIFIER,
4921 (phy_identifier & ~(1<<9))); 4953 (phy_identifier & ~(1<<9)));
4922 4954
4923 bnx2x_cl45_read(bp, phy, 4955 bnx2x_cl45_read(bp, phy,
4924 MDIO_PMA_DEVAD, 4956 MDIO_PMA_DEVAD,
4925 MDIO_PMA_REG_ROM_VER2, 4957 MDIO_PMA_REG_ROM_VER2,
4926 &rom_ver2_val); 4958 &rom_ver2_val);
4927 /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */ 4959 /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */
4928 bnx2x_cl45_write(bp, phy, 4960 bnx2x_cl45_write(bp, phy,
4929 MDIO_PMA_DEVAD, 4961 MDIO_PMA_DEVAD,
4930 MDIO_PMA_REG_ROM_VER2, 4962 MDIO_PMA_REG_ROM_VER2,
4931 (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff)); 4963 (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
4932 4964
4933 bnx2x_cl45_write(bp, phy, 4965 bnx2x_cl45_write(bp, phy,
4934 MDIO_PMA_DEVAD, 4966 MDIO_PMA_DEVAD,
4935 MDIO_PMA_REG_PHY_IDENTIFIER, 4967 MDIO_PMA_REG_PHY_IDENTIFIER,
4936 (phy_identifier | (1<<9))); 4968 (phy_identifier | (1<<9)));
4937 4969
4938 return 0; 4970 return 0;
4939} 4971}
@@ -4946,11 +4978,11 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
4946 4978
4947 switch (action) { 4979 switch (action) {
4948 case DISABLE_TX: 4980 case DISABLE_TX:
4949 bnx2x_sfp_set_transmitter(bp, phy, params->port, 0); 4981 bnx2x_sfp_set_transmitter(params, phy, 0);
4950 break; 4982 break;
4951 case ENABLE_TX: 4983 case ENABLE_TX:
4952 if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) 4984 if (!(phy->flags & FLAGS_SFP_NOT_APPROVED))
4953 bnx2x_sfp_set_transmitter(bp, phy, params->port, 1); 4985 bnx2x_sfp_set_transmitter(params, phy, 1);
4954 break; 4986 break;
4955 default: 4987 default:
4956 DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n", 4988 DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
@@ -4959,6 +4991,38 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
4959 } 4991 }
4960} 4992}
4961 4993
4994static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
4995 u8 gpio_mode)
4996{
4997 struct bnx2x *bp = params->bp;
4998
4999 u32 fault_led_gpio = REG_RD(bp, params->shmem_base +
5000 offsetof(struct shmem_region,
5001 dev_info.port_hw_config[params->port].sfp_ctrl)) &
5002 PORT_HW_CFG_FAULT_MODULE_LED_MASK;
5003 switch (fault_led_gpio) {
5004 case PORT_HW_CFG_FAULT_MODULE_LED_DISABLED:
5005 return;
5006 case PORT_HW_CFG_FAULT_MODULE_LED_GPIO0:
5007 case PORT_HW_CFG_FAULT_MODULE_LED_GPIO1:
5008 case PORT_HW_CFG_FAULT_MODULE_LED_GPIO2:
5009 case PORT_HW_CFG_FAULT_MODULE_LED_GPIO3:
5010 {
5011 u8 gpio_port = bnx2x_get_gpio_port(params);
5012 u16 gpio_pin = fault_led_gpio -
5013 PORT_HW_CFG_FAULT_MODULE_LED_GPIO0;
5014 DP(NETIF_MSG_LINK, "Set fault module-detected led "
5015 "pin %x port %x mode %x\n",
5016 gpio_pin, gpio_port, gpio_mode);
5017 bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
5018 }
5019 break;
5020 default:
5021 DP(NETIF_MSG_LINK, "Error: Invalid fault led mode 0x%x\n",
5022 fault_led_gpio);
5023 }
5024}
5025
4962static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy, 5026static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
4963 struct link_params *params) 5027 struct link_params *params)
4964{ 5028{
@@ -4976,15 +5040,14 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
4976 if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) { 5040 if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) {
4977 DP(NETIF_MSG_LINK, "Failed to get valid module type\n"); 5041 DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
4978 return -EINVAL; 5042 return -EINVAL;
4979 } else if (bnx2x_verify_sfp_module(phy, params) != 5043 } else if (bnx2x_verify_sfp_module(phy, params) != 0) {
4980 0) {
4981 /* check SFP+ module compatibility */ 5044 /* check SFP+ module compatibility */
4982 DP(NETIF_MSG_LINK, "Module verification failed!!\n"); 5045 DP(NETIF_MSG_LINK, "Module verification failed!!\n");
4983 rc = -EINVAL; 5046 rc = -EINVAL;
4984 /* Turn on fault module-detected led */ 5047 /* Turn on fault module-detected led */
4985 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, 5048 bnx2x_set_sfp_module_fault_led(params,
4986 MISC_REGISTERS_GPIO_HIGH, 5049 MISC_REGISTERS_GPIO_HIGH);
4987 params->port); 5050
4988 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) && 5051 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) &&
4989 ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == 5052 ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
4990 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) { 5053 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) {
@@ -4995,18 +5058,17 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
4995 } 5058 }
4996 } else { 5059 } else {
4997 /* Turn off fault module-detected led */ 5060 /* Turn off fault module-detected led */
4998 DP(NETIF_MSG_LINK, "Turn off fault module-detected led\n"); 5061 bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW);
4999 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
5000 MISC_REGISTERS_GPIO_LOW,
5001 params->port);
5002 } 5062 }
5003 5063
5004 /* power up the SFP module */ 5064 /* power up the SFP module */
5005 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) 5065 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
5006 bnx2x_8727_power_module(bp, phy, 1); 5066 bnx2x_8727_power_module(bp, phy, 1);
5007 5067
5008 /* Check and set limiting mode / LRM mode on 8726. 5068 /*
5009 On 8727 it is done automatically */ 5069 * Check and set limiting mode / LRM mode on 8726. On 8727 it
5070 * is done automatically
5071 */
5010 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) 5072 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
5011 bnx2x_8726_set_limiting_mode(bp, phy, edc_mode); 5073 bnx2x_8726_set_limiting_mode(bp, phy, edc_mode);
5012 else 5074 else
@@ -5018,9 +5080,9 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
5018 if (rc == 0 || 5080 if (rc == 0 ||
5019 (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) != 5081 (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
5020 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) 5082 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
5021 bnx2x_sfp_set_transmitter(bp, phy, params->port, 1); 5083 bnx2x_sfp_set_transmitter(params, phy, 1);
5022 else 5084 else
5023 bnx2x_sfp_set_transmitter(bp, phy, params->port, 0); 5085 bnx2x_sfp_set_transmitter(params, phy, 0);
5024 5086
5025 return rc; 5087 return rc;
5026} 5088}
@@ -5033,11 +5095,9 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
5033 u8 port = params->port; 5095 u8 port = params->port;
5034 5096
5035 /* Set valid module led off */ 5097 /* Set valid module led off */
5036 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, 5098 bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH);
5037 MISC_REGISTERS_GPIO_HIGH,
5038 params->port);
5039 5099
5040 /* Get current gpio val refelecting module plugged in / out*/ 5100 /* Get current gpio val reflecting module plugged in / out*/
5041 gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port); 5101 gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port);
5042 5102
5043 /* Call the handling function in case module is detected */ 5103 /* Call the handling function in case module is detected */
@@ -5053,18 +5113,20 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
5053 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); 5113 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
5054 } else { 5114 } else {
5055 u32 val = REG_RD(bp, params->shmem_base + 5115 u32 val = REG_RD(bp, params->shmem_base +
5056 offsetof(struct shmem_region, dev_info. 5116 offsetof(struct shmem_region, dev_info.
5057 port_feature_config[params->port]. 5117 port_feature_config[params->port].
5058 config)); 5118 config));
5059 5119
5060 bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3, 5120 bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3,
5061 MISC_REGISTERS_GPIO_INT_OUTPUT_SET, 5121 MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
5062 port); 5122 port);
5063 /* Module was plugged out. */ 5123 /*
5064 /* Disable transmit for this module */ 5124 * Module was plugged out.
5125 * Disable transmit for this module
5126 */
5065 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == 5127 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
5066 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) 5128 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
5067 bnx2x_sfp_set_transmitter(bp, phy, params->port, 0); 5129 bnx2x_sfp_set_transmitter(params, phy, 0);
5068 } 5130 }
5069} 5131}
5070 5132
@@ -5100,9 +5162,9 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
5100 5162
5101 DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps" 5163 DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
5102 " link_status 0x%x\n", rx_sd, pcs_status, val2); 5164 " link_status 0x%x\n", rx_sd, pcs_status, val2);
5103 /* link is up if both bit 0 of pmd_rx_sd and 5165 /*
5104 * bit 0 of pcs_status are set, or if the autoneg bit 5166 * link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
5105 * 1 is set 5167 * are set, or if the autoneg bit 1 is set
5106 */ 5168 */
5107 link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1))); 5169 link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
5108 if (link_up) { 5170 if (link_up) {
@@ -5123,14 +5185,15 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
5123 struct link_params *params, 5185 struct link_params *params,
5124 struct link_vars *vars) 5186 struct link_vars *vars)
5125{ 5187{
5126 u16 cnt, val; 5188 u32 tx_en_mode;
5189 u16 cnt, val, tmp1;
5127 struct bnx2x *bp = params->bp; 5190 struct bnx2x *bp = params->bp;
5128 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 5191 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
5129 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); 5192 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
5130 /* HW reset */ 5193 /* HW reset */
5131 bnx2x_ext_phy_hw_reset(bp, params->port); 5194 bnx2x_ext_phy_hw_reset(bp, params->port);
5132 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040); 5195 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
5133 bnx2x_wait_reset_complete(bp, phy); 5196 bnx2x_wait_reset_complete(bp, phy, params);
5134 5197
5135 /* Wait until fw is loaded */ 5198 /* Wait until fw is loaded */
5136 for (cnt = 0; cnt < 100; cnt++) { 5199 for (cnt = 0; cnt < 100; cnt++) {
@@ -5197,6 +5260,26 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
5197 0x0004); 5260 0x0004);
5198 } 5261 }
5199 bnx2x_save_bcm_spirom_ver(bp, phy, params->port); 5262 bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
5263
5264 /*
5265 * If TX Laser is controlled by GPIO_0, do not let PHY go into low
5266 * power mode, if TX Laser is disabled
5267 */
5268
5269 tx_en_mode = REG_RD(bp, params->shmem_base +
5270 offsetof(struct shmem_region,
5271 dev_info.port_hw_config[params->port].sfp_ctrl))
5272 & PORT_HW_CFG_TX_LASER_MASK;
5273
5274 if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
5275 DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
5276 bnx2x_cl45_read(bp, phy,
5277 MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, &tmp1);
5278 tmp1 |= 0x1;
5279 bnx2x_cl45_write(bp, phy,
5280 MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1);
5281 }
5282
5200 return 0; 5283 return 0;
5201} 5284}
5202 5285
@@ -5231,26 +5314,26 @@ static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy,
5231 5314
5232 /* Set soft reset */ 5315 /* Set soft reset */
5233 bnx2x_cl45_write(bp, phy, 5316 bnx2x_cl45_write(bp, phy,
5234 MDIO_PMA_DEVAD, 5317 MDIO_PMA_DEVAD,
5235 MDIO_PMA_REG_GEN_CTRL, 5318 MDIO_PMA_REG_GEN_CTRL,
5236 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); 5319 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
5237 5320
5238 bnx2x_cl45_write(bp, phy, 5321 bnx2x_cl45_write(bp, phy,
5239 MDIO_PMA_DEVAD, 5322 MDIO_PMA_DEVAD,
5240 MDIO_PMA_REG_MISC_CTRL1, 0x0001); 5323 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
5241 5324
5242 bnx2x_cl45_write(bp, phy, 5325 bnx2x_cl45_write(bp, phy,
5243 MDIO_PMA_DEVAD, 5326 MDIO_PMA_DEVAD,
5244 MDIO_PMA_REG_GEN_CTRL, 5327 MDIO_PMA_REG_GEN_CTRL,
5245 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); 5328 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
5246 5329
5247 /* wait for 150ms for microcode load */ 5330 /* wait for 150ms for microcode load */
5248 msleep(150); 5331 msleep(150);
5249 5332
5250 /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */ 5333 /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
5251 bnx2x_cl45_write(bp, phy, 5334 bnx2x_cl45_write(bp, phy,
5252 MDIO_PMA_DEVAD, 5335 MDIO_PMA_DEVAD,
5253 MDIO_PMA_REG_MISC_CTRL1, 0x0000); 5336 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
5254 5337
5255 msleep(200); 5338 msleep(200);
5256 bnx2x_save_bcm_spirom_ver(bp, phy, params->port); 5339 bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
@@ -5285,23 +5368,18 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
5285 u32 val; 5368 u32 val;
5286 u32 swap_val, swap_override, aeu_gpio_mask, offset; 5369 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5287 DP(NETIF_MSG_LINK, "Initializing BCM8726\n"); 5370 DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
5288 /* Restore normal power mode*/
5289 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
5290 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
5291
5292 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
5293 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
5294 5371
5295 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); 5372 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
5296 bnx2x_wait_reset_complete(bp, phy); 5373 bnx2x_wait_reset_complete(bp, phy, params);
5297 5374
5298 bnx2x_8726_external_rom_boot(phy, params); 5375 bnx2x_8726_external_rom_boot(phy, params);
5299 5376
5300 /* Need to call module detected on initialization since 5377 /*
5301 the module detection triggered by actual module 5378 * Need to call module detected on initialization since the module
5302 insertion might occur before driver is loaded, and when 5379 * detection triggered by actual module insertion might occur before
5303 driver is loaded, it reset all registers, including the 5380 * driver is loaded, and when driver is loaded, it reset all
5304 transmitter */ 5381 * registers, including the transmitter
5382 */
5305 bnx2x_sfp_module_detection(phy, params); 5383 bnx2x_sfp_module_detection(phy, params);
5306 5384
5307 if (phy->req_line_speed == SPEED_1000) { 5385 if (phy->req_line_speed == SPEED_1000) {
@@ -5334,8 +5412,10 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
5334 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); 5412 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
5335 bnx2x_cl45_write(bp, phy, 5413 bnx2x_cl45_write(bp, phy,
5336 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); 5414 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
5337 /* Enable RX-ALARM control to receive 5415 /*
5338 interrupt for 1G speed change */ 5416 * Enable RX-ALARM control to receive interrupt for 1G speed
5417 * change
5418 */
5339 bnx2x_cl45_write(bp, phy, 5419 bnx2x_cl45_write(bp, phy,
5340 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x4); 5420 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x4);
5341 bnx2x_cl45_write(bp, phy, 5421 bnx2x_cl45_write(bp, phy,
@@ -5367,7 +5447,7 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
5367 5447
5368 /* Set GPIO3 to trigger SFP+ module insertion/removal */ 5448 /* Set GPIO3 to trigger SFP+ module insertion/removal */
5369 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, 5449 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5370 MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port); 5450 MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port);
5371 5451
5372 /* The GPIO should be swapped if the swap register is set and active */ 5452 /* The GPIO should be swapped if the swap register is set and active */
5373 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); 5453 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
@@ -5458,7 +5538,7 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
5458 struct link_params *params) { 5538 struct link_params *params) {
5459 u32 swap_val, swap_override; 5539 u32 swap_val, swap_override;
5460 u8 port; 5540 u8 port;
5461 /** 5541 /*
5462 * The PHY reset is controlled by GPIO 1. Fake the port number 5542 * The PHY reset is controlled by GPIO 1. Fake the port number
5463 * to cancel the swap done in set_gpio() 5543 * to cancel the swap done in set_gpio()
5464 */ 5544 */
@@ -5467,20 +5547,21 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
5467 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); 5547 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5468 port = (swap_val && swap_override) ^ 1; 5548 port = (swap_val && swap_override) ^ 1;
5469 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 5549 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
5470 MISC_REGISTERS_GPIO_OUTPUT_LOW, port); 5550 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
5471} 5551}
5472 5552
5473static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy, 5553static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
5474 struct link_params *params, 5554 struct link_params *params,
5475 struct link_vars *vars) 5555 struct link_vars *vars)
5476{ 5556{
5477 u16 tmp1, val, mod_abs; 5557 u32 tx_en_mode;
5558 u16 tmp1, val, mod_abs, tmp2;
5478 u16 rx_alarm_ctrl_val; 5559 u16 rx_alarm_ctrl_val;
5479 u16 lasi_ctrl_val; 5560 u16 lasi_ctrl_val;
5480 struct bnx2x *bp = params->bp; 5561 struct bnx2x *bp = params->bp;
5481 /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */ 5562 /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
5482 5563
5483 bnx2x_wait_reset_complete(bp, phy); 5564 bnx2x_wait_reset_complete(bp, phy, params);
5484 rx_alarm_ctrl_val = (1<<2) | (1<<5) ; 5565 rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
5485 lasi_ctrl_val = 0x0004; 5566 lasi_ctrl_val = 0x0004;
5486 5567
@@ -5493,14 +5574,17 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
5493 bnx2x_cl45_write(bp, phy, 5574 bnx2x_cl45_write(bp, phy,
5494 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, lasi_ctrl_val); 5575 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, lasi_ctrl_val);
5495 5576
5496 /* Initially configure MOD_ABS to interrupt when 5577 /*
5497 module is presence( bit 8) */ 5578 * Initially configure MOD_ABS to interrupt when module is
5579 * presence( bit 8)
5580 */
5498 bnx2x_cl45_read(bp, phy, 5581 bnx2x_cl45_read(bp, phy,
5499 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); 5582 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
5500 /* Set EDC off by setting OPTXLOS signal input to low 5583 /*
5501 (bit 9). 5584 * Set EDC off by setting OPTXLOS signal input to low (bit 9).
5502 When the EDC is off it locks onto a reference clock and 5585 * When the EDC is off it locks onto a reference clock and avoids
5503 avoids becoming 'lost'.*/ 5586 * becoming 'lost'
5587 */
5504 mod_abs &= ~(1<<8); 5588 mod_abs &= ~(1<<8);
5505 if (!(phy->flags & FLAGS_NOC)) 5589 if (!(phy->flags & FLAGS_NOC))
5506 mod_abs &= ~(1<<9); 5590 mod_abs &= ~(1<<9);
@@ -5515,7 +5599,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
5515 if (phy->flags & FLAGS_NOC) 5599 if (phy->flags & FLAGS_NOC)
5516 val |= (3<<5); 5600 val |= (3<<5);
5517 5601
5518 /** 5602 /*
5519 * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0 5603 * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
5520 * status which reflect SFP+ module over-current 5604 * status which reflect SFP+ module over-current
5521 */ 5605 */
@@ -5542,7 +5626,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
5542 bnx2x_cl45_read(bp, phy, 5626 bnx2x_cl45_read(bp, phy,
5543 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1); 5627 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
5544 DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1); 5628 DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
5545 /** 5629 /*
5546 * Power down the XAUI until link is up in case of dual-media 5630 * Power down the XAUI until link is up in case of dual-media
5547 * and 1G 5631 * and 1G
5548 */ 5632 */
@@ -5568,7 +5652,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
5568 bnx2x_cl45_write(bp, phy, 5652 bnx2x_cl45_write(bp, phy,
5569 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300); 5653 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
5570 } else { 5654 } else {
5571 /** 5655 /*
5572 * Since the 8727 has only single reset pin, need to set the 10G 5656 * Since the 8727 has only single reset pin, need to set the 10G
5573 * registers although it is default 5657 * registers although it is default
5574 */ 5658 */
@@ -5584,7 +5668,8 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
5584 0x0008); 5668 0x0008);
5585 } 5669 }
5586 5670
5587 /* Set 2-wire transfer rate of SFP+ module EEPROM 5671 /*
5672 * Set 2-wire transfer rate of SFP+ module EEPROM
5588 * to 100Khz since some DACs(direct attached cables) do 5673 * to 100Khz since some DACs(direct attached cables) do
5589 * not work at 400Khz. 5674 * not work at 400Khz.
5590 */ 5675 */
@@ -5607,6 +5692,26 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
5607 phy->tx_preemphasis[1]); 5692 phy->tx_preemphasis[1]);
5608 } 5693 }
5609 5694
5695 /*
5696 * If TX Laser is controlled by GPIO_0, do not let PHY go into low
5697 * power mode, if TX Laser is disabled
5698 */
5699 tx_en_mode = REG_RD(bp, params->shmem_base +
5700 offsetof(struct shmem_region,
5701 dev_info.port_hw_config[params->port].sfp_ctrl))
5702 & PORT_HW_CFG_TX_LASER_MASK;
5703
5704 if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
5705
5706 DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
5707 bnx2x_cl45_read(bp, phy,
5708 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, &tmp2);
5709 tmp2 |= 0x1000;
5710 tmp2 &= 0xFFEF;
5711 bnx2x_cl45_write(bp, phy,
5712 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2);
5713 }
5714
5610 return 0; 5715 return 0;
5611} 5716}
5612 5717
@@ -5620,46 +5725,49 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
5620 port_feature_config[params->port]. 5725 port_feature_config[params->port].
5621 config)); 5726 config));
5622 bnx2x_cl45_read(bp, phy, 5727 bnx2x_cl45_read(bp, phy,
5623 MDIO_PMA_DEVAD, 5728 MDIO_PMA_DEVAD,
5624 MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); 5729 MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
5625 if (mod_abs & (1<<8)) { 5730 if (mod_abs & (1<<8)) {
5626 5731
5627 /* Module is absent */ 5732 /* Module is absent */
5628 DP(NETIF_MSG_LINK, "MOD_ABS indication " 5733 DP(NETIF_MSG_LINK, "MOD_ABS indication "
5629 "show module is absent\n"); 5734 "show module is absent\n");
5630 5735
5631 /* 1. Set mod_abs to detect next module 5736 /*
5632 presence event 5737 * 1. Set mod_abs to detect next module
5633 2. Set EDC off by setting OPTXLOS signal input to low 5738 * presence event
5634 (bit 9). 5739 * 2. Set EDC off by setting OPTXLOS signal input to low
5635 When the EDC is off it locks onto a reference clock and 5740 * (bit 9).
5636 avoids becoming 'lost'.*/ 5741 * When the EDC is off it locks onto a reference clock and
5742 * avoids becoming 'lost'.
5743 */
5637 mod_abs &= ~(1<<8); 5744 mod_abs &= ~(1<<8);
5638 if (!(phy->flags & FLAGS_NOC)) 5745 if (!(phy->flags & FLAGS_NOC))
5639 mod_abs &= ~(1<<9); 5746 mod_abs &= ~(1<<9);
5640 bnx2x_cl45_write(bp, phy, 5747 bnx2x_cl45_write(bp, phy,
5641 MDIO_PMA_DEVAD, 5748 MDIO_PMA_DEVAD,
5642 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); 5749 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
5643 5750
5644 /* Clear RX alarm since it stays up as long as 5751 /*
5645 the mod_abs wasn't changed */ 5752 * Clear RX alarm since it stays up as long as
5753 * the mod_abs wasn't changed
5754 */
5646 bnx2x_cl45_read(bp, phy, 5755 bnx2x_cl45_read(bp, phy,
5647 MDIO_PMA_DEVAD, 5756 MDIO_PMA_DEVAD,
5648 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status); 5757 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
5649 5758
5650 } else { 5759 } else {
5651 /* Module is present */ 5760 /* Module is present */
5652 DP(NETIF_MSG_LINK, "MOD_ABS indication " 5761 DP(NETIF_MSG_LINK, "MOD_ABS indication "
5653 "show module is present\n"); 5762 "show module is present\n");
5654 /* First thing, disable transmitter, 5763 /*
5655 and if the module is ok, the 5764 * First disable transmitter, and if the module is ok, the
5656 module_detection will enable it*/ 5765 * module_detection will enable it
5657 5766 * 1. Set mod_abs to detect next module absent event ( bit 8)
5658 /* 1. Set mod_abs to detect next module 5767 * 2. Restore the default polarity of the OPRXLOS signal and
5659 absent event ( bit 8) 5768 * this signal will then correctly indicate the presence or
5660 2. Restore the default polarity of the OPRXLOS signal and 5769 * absence of the Rx signal. (bit 9)
5661 this signal will then correctly indicate the presence or 5770 */
5662 absence of the Rx signal. (bit 9) */
5663 mod_abs |= (1<<8); 5771 mod_abs |= (1<<8);
5664 if (!(phy->flags & FLAGS_NOC)) 5772 if (!(phy->flags & FLAGS_NOC))
5665 mod_abs |= (1<<9); 5773 mod_abs |= (1<<9);
@@ -5667,10 +5775,12 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
5667 MDIO_PMA_DEVAD, 5775 MDIO_PMA_DEVAD,
5668 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); 5776 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
5669 5777
5670 /* Clear RX alarm since it stays up as long as 5778 /*
5671 the mod_abs wasn't changed. This is need to be done 5779 * Clear RX alarm since it stays up as long as the mod_abs
5672 before calling the module detection, otherwise it will clear 5780 * wasn't changed. This is need to be done before calling the
5673 the link update alarm */ 5781 * module detection, otherwise it will clear* the link update
5782 * alarm
5783 */
5674 bnx2x_cl45_read(bp, phy, 5784 bnx2x_cl45_read(bp, phy,
5675 MDIO_PMA_DEVAD, 5785 MDIO_PMA_DEVAD,
5676 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status); 5786 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
@@ -5678,7 +5788,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
5678 5788
5679 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == 5789 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
5680 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) 5790 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
5681 bnx2x_sfp_set_transmitter(bp, phy, params->port, 0); 5791 bnx2x_sfp_set_transmitter(params, phy, 0);
5682 5792
5683 if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) 5793 if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
5684 bnx2x_sfp_module_detection(phy, params); 5794 bnx2x_sfp_module_detection(phy, params);
@@ -5687,9 +5797,8 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
5687 } 5797 }
5688 5798
5689 DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", 5799 DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
5690 rx_alarm_status); 5800 rx_alarm_status);
5691 /* No need to check link status in case of 5801 /* No need to check link status in case of module plugged in/out */
5692 module plugged in/out */
5693} 5802}
5694 5803
5695static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, 5804static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
@@ -5725,7 +5834,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
5725 bnx2x_cl45_read(bp, phy, 5834 bnx2x_cl45_read(bp, phy,
5726 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1); 5835 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
5727 5836
5728 /** 5837 /*
5729 * If a module is present and there is need to check 5838 * If a module is present and there is need to check
5730 * for over current 5839 * for over current
5731 */ 5840 */
@@ -5745,12 +5854,8 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
5745 " Please remove the SFP+ module and" 5854 " Please remove the SFP+ module and"
5746 " restart the system to clear this" 5855 " restart the system to clear this"
5747 " error.\n", 5856 " error.\n",
5748 params->port); 5857 params->port);
5749 5858 /* Disable all RX_ALARMs except for mod_abs */
5750 /*
5751 * Disable all RX_ALARMs except for
5752 * mod_abs
5753 */
5754 bnx2x_cl45_write(bp, phy, 5859 bnx2x_cl45_write(bp, phy,
5755 MDIO_PMA_DEVAD, 5860 MDIO_PMA_DEVAD,
5756 MDIO_PMA_REG_RX_ALARM_CTRL, (1<<5)); 5861 MDIO_PMA_REG_RX_ALARM_CTRL, (1<<5));
@@ -5793,11 +5898,15 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
5793 MDIO_PMA_DEVAD, 5898 MDIO_PMA_DEVAD,
5794 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status); 5899 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status);
5795 5900
5796 /* Bits 0..2 --> speed detected, 5901 /*
5797 bits 13..15--> link is down */ 5902 * Bits 0..2 --> speed detected,
5903 * Bits 13..15--> link is down
5904 */
5798 if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) { 5905 if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
5799 link_up = 1; 5906 link_up = 1;
5800 vars->line_speed = SPEED_10000; 5907 vars->line_speed = SPEED_10000;
5908 DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
5909 params->port);
5801 } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) { 5910 } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
5802 link_up = 1; 5911 link_up = 1;
5803 vars->line_speed = SPEED_1000; 5912 vars->line_speed = SPEED_1000;
@@ -5819,7 +5928,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
5819 bnx2x_cl45_read(bp, phy, 5928 bnx2x_cl45_read(bp, phy,
5820 MDIO_PMA_DEVAD, 5929 MDIO_PMA_DEVAD,
5821 MDIO_PMA_REG_8727_PCS_GP, &val1); 5930 MDIO_PMA_REG_8727_PCS_GP, &val1);
5822 /** 5931 /*
5823 * In case of dual-media board and 1G, power up the XAUI side, 5932 * In case of dual-media board and 1G, power up the XAUI side,
5824 * otherwise power it down. For 10G it is done automatically 5933 * otherwise power it down. For 10G it is done automatically
5825 */ 5934 */
@@ -5839,7 +5948,7 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
5839{ 5948{
5840 struct bnx2x *bp = params->bp; 5949 struct bnx2x *bp = params->bp;
5841 /* Disable Transmitter */ 5950 /* Disable Transmitter */
5842 bnx2x_sfp_set_transmitter(bp, phy, params->port, 0); 5951 bnx2x_sfp_set_transmitter(params, phy, 0);
5843 /* Clear LASI */ 5952 /* Clear LASI */
5844 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0); 5953 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0);
5845 5954
@@ -5851,19 +5960,23 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
5851static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, 5960static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
5852 struct link_params *params) 5961 struct link_params *params)
5853{ 5962{
5854 u16 val, fw_ver1, fw_ver2, cnt; 5963 u16 val, fw_ver1, fw_ver2, cnt, adj;
5855 struct bnx2x *bp = params->bp; 5964 struct bnx2x *bp = params->bp;
5856 5965
5966 adj = 0;
5967 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
5968 adj = -1;
5969
5857 /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/ 5970 /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/
5858 /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ 5971 /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
5859 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014); 5972 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0014);
5860 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); 5973 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200);
5861 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000); 5974 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, 0x0000);
5862 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300); 5975 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, 0x0300);
5863 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009); 5976 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x0009);
5864 5977
5865 for (cnt = 0; cnt < 100; cnt++) { 5978 for (cnt = 0; cnt < 100; cnt++) {
5866 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); 5979 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val);
5867 if (val & 1) 5980 if (val & 1)
5868 break; 5981 break;
5869 udelay(5); 5982 udelay(5);
@@ -5877,11 +5990,11 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
5877 5990
5878 5991
5879 /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */ 5992 /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
5880 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000); 5993 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0000);
5881 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); 5994 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200);
5882 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A); 5995 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x000A);
5883 for (cnt = 0; cnt < 100; cnt++) { 5996 for (cnt = 0; cnt < 100; cnt++) {
5884 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); 5997 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val);
5885 if (val & 1) 5998 if (val & 1)
5886 break; 5999 break;
5887 udelay(5); 6000 udelay(5);
@@ -5894,9 +6007,9 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
5894 } 6007 }
5895 6008
5896 /* lower 16 bits of the register SPI_FW_STATUS */ 6009 /* lower 16 bits of the register SPI_FW_STATUS */
5897 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1); 6010 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, &fw_ver1);
5898 /* upper 16 bits of register SPI_FW_STATUS */ 6011 /* upper 16 bits of register SPI_FW_STATUS */
5899 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2); 6012 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, &fw_ver2);
5900 6013
5901 bnx2x_save_spirom_version(bp, params->port, (fw_ver2<<16) | fw_ver1, 6014 bnx2x_save_spirom_version(bp, params->port, (fw_ver2<<16) | fw_ver1,
5902 phy->ver_addr); 6015 phy->ver_addr);
@@ -5905,49 +6018,53 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
5905static void bnx2x_848xx_set_led(struct bnx2x *bp, 6018static void bnx2x_848xx_set_led(struct bnx2x *bp,
5906 struct bnx2x_phy *phy) 6019 struct bnx2x_phy *phy)
5907{ 6020{
5908 u16 val; 6021 u16 val, adj;
6022
6023 adj = 0;
6024 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
6025 adj = -1;
5909 6026
5910 /* PHYC_CTL_LED_CTL */ 6027 /* PHYC_CTL_LED_CTL */
5911 bnx2x_cl45_read(bp, phy, 6028 bnx2x_cl45_read(bp, phy,
5912 MDIO_PMA_DEVAD, 6029 MDIO_PMA_DEVAD,
5913 MDIO_PMA_REG_8481_LINK_SIGNAL, &val); 6030 MDIO_PMA_REG_8481_LINK_SIGNAL + adj, &val);
5914 val &= 0xFE00; 6031 val &= 0xFE00;
5915 val |= 0x0092; 6032 val |= 0x0092;
5916 6033
5917 bnx2x_cl45_write(bp, phy, 6034 bnx2x_cl45_write(bp, phy,
5918 MDIO_PMA_DEVAD, 6035 MDIO_PMA_DEVAD,
5919 MDIO_PMA_REG_8481_LINK_SIGNAL, val); 6036 MDIO_PMA_REG_8481_LINK_SIGNAL + adj, val);
5920 6037
5921 bnx2x_cl45_write(bp, phy, 6038 bnx2x_cl45_write(bp, phy,
5922 MDIO_PMA_DEVAD, 6039 MDIO_PMA_DEVAD,
5923 MDIO_PMA_REG_8481_LED1_MASK, 6040 MDIO_PMA_REG_8481_LED1_MASK + adj,
5924 0x80); 6041 0x80);
5925 6042
5926 bnx2x_cl45_write(bp, phy, 6043 bnx2x_cl45_write(bp, phy,
5927 MDIO_PMA_DEVAD, 6044 MDIO_PMA_DEVAD,
5928 MDIO_PMA_REG_8481_LED2_MASK, 6045 MDIO_PMA_REG_8481_LED2_MASK + adj,
5929 0x18); 6046 0x18);
5930 6047
5931 /* Select activity source by Tx and Rx, as suggested by PHY AE */ 6048 /* Select activity source by Tx and Rx, as suggested by PHY AE */
5932 bnx2x_cl45_write(bp, phy, 6049 bnx2x_cl45_write(bp, phy,
5933 MDIO_PMA_DEVAD, 6050 MDIO_PMA_DEVAD,
5934 MDIO_PMA_REG_8481_LED3_MASK, 6051 MDIO_PMA_REG_8481_LED3_MASK + adj,
5935 0x0006); 6052 0x0006);
5936 6053
5937 /* Select the closest activity blink rate to that in 10/100/1000 */ 6054 /* Select the closest activity blink rate to that in 10/100/1000 */
5938 bnx2x_cl45_write(bp, phy, 6055 bnx2x_cl45_write(bp, phy,
5939 MDIO_PMA_DEVAD, 6056 MDIO_PMA_DEVAD,
5940 MDIO_PMA_REG_8481_LED3_BLINK, 6057 MDIO_PMA_REG_8481_LED3_BLINK + adj,
5941 0); 6058 0);
5942 6059
5943 bnx2x_cl45_read(bp, phy, 6060 bnx2x_cl45_read(bp, phy,
5944 MDIO_PMA_DEVAD, 6061 MDIO_PMA_DEVAD,
5945 MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val); 6062 MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, &val);
5946 val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/ 6063 val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
5947 6064
5948 bnx2x_cl45_write(bp, phy, 6065 bnx2x_cl45_write(bp, phy,
5949 MDIO_PMA_DEVAD, 6066 MDIO_PMA_DEVAD,
5950 MDIO_PMA_REG_84823_CTL_LED_CTL_1, val); 6067 MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, val);
5951 6068
5952 /* 'Interrupt Mask' */ 6069 /* 'Interrupt Mask' */
5953 bnx2x_cl45_write(bp, phy, 6070 bnx2x_cl45_write(bp, phy,
@@ -5961,7 +6078,11 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
5961{ 6078{
5962 struct bnx2x *bp = params->bp; 6079 struct bnx2x *bp = params->bp;
5963 u16 autoneg_val, an_1000_val, an_10_100_val; 6080 u16 autoneg_val, an_1000_val, an_10_100_val;
5964 6081 /*
6082 * This phy uses the NIG latch mechanism since link indication
6083 * arrives through its LED4 and not via its LASI signal, so we
6084 * get steady signal instead of clear on read
6085 */
5965 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4, 6086 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
5966 1 << NIG_LATCH_BC_ENABLE_MI_INT); 6087 1 << NIG_LATCH_BC_ENABLE_MI_INT);
5967 6088
@@ -6086,11 +6207,11 @@ static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy,
6086 struct bnx2x *bp = params->bp; 6207 struct bnx2x *bp = params->bp;
6087 /* Restore normal power mode*/ 6208 /* Restore normal power mode*/
6088 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 6209 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
6089 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); 6210 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
6090 6211
6091 /* HW reset */ 6212 /* HW reset */
6092 bnx2x_ext_phy_hw_reset(bp, params->port); 6213 bnx2x_ext_phy_hw_reset(bp, params->port);
6093 bnx2x_wait_reset_complete(bp, phy); 6214 bnx2x_wait_reset_complete(bp, phy, params);
6094 6215
6095 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); 6216 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
6096 return bnx2x_848xx_cmn_config_init(phy, params, vars); 6217 return bnx2x_848xx_cmn_config_init(phy, params, vars);
@@ -6102,12 +6223,15 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
6102{ 6223{
6103 struct bnx2x *bp = params->bp; 6224 struct bnx2x *bp = params->bp;
6104 u8 port, initialize = 1; 6225 u8 port, initialize = 1;
6105 u16 val; 6226 u16 val, adj;
6106 u16 temp; 6227 u16 temp;
6107 u32 actual_phy_selection; 6228 u32 actual_phy_selection, cms_enable;
6108 u8 rc = 0; 6229 u8 rc = 0;
6109 6230
6110 /* This is just for MDIO_CTL_REG_84823_MEDIA register. */ 6231 /* This is just for MDIO_CTL_REG_84823_MEDIA register. */
6232 adj = 0;
6233 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
6234 adj = 3;
6111 6235
6112 msleep(1); 6236 msleep(1);
6113 if (CHIP_IS_E2(bp)) 6237 if (CHIP_IS_E2(bp))
@@ -6117,11 +6241,12 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
6117 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, 6241 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6118 MISC_REGISTERS_GPIO_OUTPUT_HIGH, 6242 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
6119 port); 6243 port);
6120 bnx2x_wait_reset_complete(bp, phy); 6244 bnx2x_wait_reset_complete(bp, phy, params);
6121 /* Wait for GPHY to come out of reset */ 6245 /* Wait for GPHY to come out of reset */
6122 msleep(50); 6246 msleep(50);
6123 /* BCM84823 requires that XGXS links up first @ 10G for normal 6247 /*
6124 behavior */ 6248 * BCM84823 requires that XGXS links up first @ 10G for normal behavior
6249 */
6125 temp = vars->line_speed; 6250 temp = vars->line_speed;
6126 vars->line_speed = SPEED_10000; 6251 vars->line_speed = SPEED_10000;
6127 bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0); 6252 bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
@@ -6131,7 +6256,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
6131 /* Set dual-media configuration according to configuration */ 6256 /* Set dual-media configuration according to configuration */
6132 6257
6133 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 6258 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
6134 MDIO_CTL_REG_84823_MEDIA, &val); 6259 MDIO_CTL_REG_84823_MEDIA + adj, &val);
6135 val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK | 6260 val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
6136 MDIO_CTL_REG_84823_MEDIA_LINE_MASK | 6261 MDIO_CTL_REG_84823_MEDIA_LINE_MASK |
6137 MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN | 6262 MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN |
@@ -6164,7 +6289,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
6164 val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G; 6289 val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G;
6165 6290
6166 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 6291 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
6167 MDIO_CTL_REG_84823_MEDIA, val); 6292 MDIO_CTL_REG_84823_MEDIA + adj, val);
6168 DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n", 6293 DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
6169 params->multi_phy_config, val); 6294 params->multi_phy_config, val);
6170 6295
@@ -6172,23 +6297,43 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
6172 rc = bnx2x_848xx_cmn_config_init(phy, params, vars); 6297 rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
6173 else 6298 else
6174 bnx2x_save_848xx_spirom_version(phy, params); 6299 bnx2x_save_848xx_spirom_version(phy, params);
6300 cms_enable = REG_RD(bp, params->shmem_base +
6301 offsetof(struct shmem_region,
6302 dev_info.port_hw_config[params->port].default_cfg)) &
6303 PORT_HW_CFG_ENABLE_CMS_MASK;
6304
6305 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
6306 MDIO_CTL_REG_84823_USER_CTRL_REG, &val);
6307 if (cms_enable)
6308 val |= MDIO_CTL_REG_84823_USER_CTRL_CMS;
6309 else
6310 val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS;
6311 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
6312 MDIO_CTL_REG_84823_USER_CTRL_REG, val);
6313
6314
6175 return rc; 6315 return rc;
6176} 6316}
6177 6317
6178static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy, 6318static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
6179 struct link_params *params, 6319 struct link_params *params,
6180 struct link_vars *vars) 6320 struct link_vars *vars)
6181{ 6321{
6182 struct bnx2x *bp = params->bp; 6322 struct bnx2x *bp = params->bp;
6183 u16 val, val1, val2; 6323 u16 val, val1, val2, adj;
6184 u8 link_up = 0; 6324 u8 link_up = 0;
6185 6325
6326 /* Reg offset adjustment for 84833 */
6327 adj = 0;
6328 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
6329 adj = -1;
6330
6186 /* Check 10G-BaseT link status */ 6331 /* Check 10G-BaseT link status */
6187 /* Check PMD signal ok */ 6332 /* Check PMD signal ok */
6188 bnx2x_cl45_read(bp, phy, 6333 bnx2x_cl45_read(bp, phy,
6189 MDIO_AN_DEVAD, 0xFFFA, &val1); 6334 MDIO_AN_DEVAD, 0xFFFA, &val1);
6190 bnx2x_cl45_read(bp, phy, 6335 bnx2x_cl45_read(bp, phy,
6191 MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL, 6336 MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL + adj,
6192 &val2); 6337 &val2);
6193 DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2); 6338 DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2);
6194 6339
@@ -6273,9 +6418,9 @@ static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy,
6273 struct link_params *params) 6418 struct link_params *params)
6274{ 6419{
6275 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, 6420 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
6276 MISC_REGISTERS_GPIO_OUTPUT_LOW, 0); 6421 MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
6277 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, 6422 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
6278 MISC_REGISTERS_GPIO_OUTPUT_LOW, 1); 6423 MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
6279} 6424}
6280 6425
6281static void bnx2x_8481_link_reset(struct bnx2x_phy *phy, 6426static void bnx2x_8481_link_reset(struct bnx2x_phy *phy,
@@ -6297,8 +6442,8 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
6297 else 6442 else
6298 port = params->port; 6443 port = params->port;
6299 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, 6444 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6300 MISC_REGISTERS_GPIO_OUTPUT_LOW, 6445 MISC_REGISTERS_GPIO_OUTPUT_LOW,
6301 port); 6446 port);
6302} 6447}
6303 6448
6304static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, 6449static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
@@ -6353,24 +6498,24 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
6353 6498
6354 /* Set LED masks */ 6499 /* Set LED masks */
6355 bnx2x_cl45_write(bp, phy, 6500 bnx2x_cl45_write(bp, phy,
6356 MDIO_PMA_DEVAD, 6501 MDIO_PMA_DEVAD,
6357 MDIO_PMA_REG_8481_LED1_MASK, 6502 MDIO_PMA_REG_8481_LED1_MASK,
6358 0x0); 6503 0x0);
6359 6504
6360 bnx2x_cl45_write(bp, phy, 6505 bnx2x_cl45_write(bp, phy,
6361 MDIO_PMA_DEVAD, 6506 MDIO_PMA_DEVAD,
6362 MDIO_PMA_REG_8481_LED2_MASK, 6507 MDIO_PMA_REG_8481_LED2_MASK,
6363 0x0); 6508 0x0);
6364 6509
6365 bnx2x_cl45_write(bp, phy, 6510 bnx2x_cl45_write(bp, phy,
6366 MDIO_PMA_DEVAD, 6511 MDIO_PMA_DEVAD,
6367 MDIO_PMA_REG_8481_LED3_MASK, 6512 MDIO_PMA_REG_8481_LED3_MASK,
6368 0x0); 6513 0x0);
6369 6514
6370 bnx2x_cl45_write(bp, phy, 6515 bnx2x_cl45_write(bp, phy,
6371 MDIO_PMA_DEVAD, 6516 MDIO_PMA_DEVAD,
6372 MDIO_PMA_REG_8481_LED5_MASK, 6517 MDIO_PMA_REG_8481_LED5_MASK,
6373 0x20); 6518 0x20);
6374 6519
6375 } else { 6520 } else {
6376 bnx2x_cl45_write(bp, phy, 6521 bnx2x_cl45_write(bp, phy,
@@ -6394,35 +6539,35 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
6394 val |= 0x2492; 6539 val |= 0x2492;
6395 6540
6396 bnx2x_cl45_write(bp, phy, 6541 bnx2x_cl45_write(bp, phy,
6397 MDIO_PMA_DEVAD, 6542 MDIO_PMA_DEVAD,
6398 MDIO_PMA_REG_8481_LINK_SIGNAL, 6543 MDIO_PMA_REG_8481_LINK_SIGNAL,
6399 val); 6544 val);
6400 6545
6401 /* Set LED masks */ 6546 /* Set LED masks */
6402 bnx2x_cl45_write(bp, phy, 6547 bnx2x_cl45_write(bp, phy,
6403 MDIO_PMA_DEVAD, 6548 MDIO_PMA_DEVAD,
6404 MDIO_PMA_REG_8481_LED1_MASK, 6549 MDIO_PMA_REG_8481_LED1_MASK,
6405 0x0); 6550 0x0);
6406 6551
6407 bnx2x_cl45_write(bp, phy, 6552 bnx2x_cl45_write(bp, phy,
6408 MDIO_PMA_DEVAD, 6553 MDIO_PMA_DEVAD,
6409 MDIO_PMA_REG_8481_LED2_MASK, 6554 MDIO_PMA_REG_8481_LED2_MASK,
6410 0x20); 6555 0x20);
6411 6556
6412 bnx2x_cl45_write(bp, phy, 6557 bnx2x_cl45_write(bp, phy,
6413 MDIO_PMA_DEVAD, 6558 MDIO_PMA_DEVAD,
6414 MDIO_PMA_REG_8481_LED3_MASK, 6559 MDIO_PMA_REG_8481_LED3_MASK,
6415 0x20); 6560 0x20);
6416 6561
6417 bnx2x_cl45_write(bp, phy, 6562 bnx2x_cl45_write(bp, phy,
6418 MDIO_PMA_DEVAD, 6563 MDIO_PMA_DEVAD,
6419 MDIO_PMA_REG_8481_LED5_MASK, 6564 MDIO_PMA_REG_8481_LED5_MASK,
6420 0x0); 6565 0x0);
6421 } else { 6566 } else {
6422 bnx2x_cl45_write(bp, phy, 6567 bnx2x_cl45_write(bp, phy,
6423 MDIO_PMA_DEVAD, 6568 MDIO_PMA_DEVAD,
6424 MDIO_PMA_REG_8481_LED1_MASK, 6569 MDIO_PMA_REG_8481_LED1_MASK,
6425 0x20); 6570 0x20);
6426 } 6571 }
6427 break; 6572 break;
6428 6573
@@ -6440,9 +6585,9 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
6440 &val); 6585 &val);
6441 6586
6442 if (!((val & 6587 if (!((val &
6443 MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK) 6588 MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
6444 >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)){ 6589 >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)) {
6445 DP(NETIF_MSG_LINK, "Seting LINK_SIGNAL\n"); 6590 DP(NETIF_MSG_LINK, "Setting LINK_SIGNAL\n");
6446 bnx2x_cl45_write(bp, phy, 6591 bnx2x_cl45_write(bp, phy,
6447 MDIO_PMA_DEVAD, 6592 MDIO_PMA_DEVAD,
6448 MDIO_PMA_REG_8481_LINK_SIGNAL, 6593 MDIO_PMA_REG_8481_LINK_SIGNAL,
@@ -6451,24 +6596,24 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
6451 6596
6452 /* Set LED masks */ 6597 /* Set LED masks */
6453 bnx2x_cl45_write(bp, phy, 6598 bnx2x_cl45_write(bp, phy,
6454 MDIO_PMA_DEVAD, 6599 MDIO_PMA_DEVAD,
6455 MDIO_PMA_REG_8481_LED1_MASK, 6600 MDIO_PMA_REG_8481_LED1_MASK,
6456 0x10); 6601 0x10);
6457 6602
6458 bnx2x_cl45_write(bp, phy, 6603 bnx2x_cl45_write(bp, phy,
6459 MDIO_PMA_DEVAD, 6604 MDIO_PMA_DEVAD,
6460 MDIO_PMA_REG_8481_LED2_MASK, 6605 MDIO_PMA_REG_8481_LED2_MASK,
6461 0x80); 6606 0x80);
6462 6607
6463 bnx2x_cl45_write(bp, phy, 6608 bnx2x_cl45_write(bp, phy,
6464 MDIO_PMA_DEVAD, 6609 MDIO_PMA_DEVAD,
6465 MDIO_PMA_REG_8481_LED3_MASK, 6610 MDIO_PMA_REG_8481_LED3_MASK,
6466 0x98); 6611 0x98);
6467 6612
6468 bnx2x_cl45_write(bp, phy, 6613 bnx2x_cl45_write(bp, phy,
6469 MDIO_PMA_DEVAD, 6614 MDIO_PMA_DEVAD,
6470 MDIO_PMA_REG_8481_LED5_MASK, 6615 MDIO_PMA_REG_8481_LED5_MASK,
6471 0x40); 6616 0x40);
6472 6617
6473 } else { 6618 } else {
6474 bnx2x_cl45_write(bp, phy, 6619 bnx2x_cl45_write(bp, phy,
@@ -6513,10 +6658,10 @@ static u8 bnx2x_7101_config_init(struct bnx2x_phy *phy,
6513 6658
6514 /* Restore normal power mode*/ 6659 /* Restore normal power mode*/
6515 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 6660 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
6516 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); 6661 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
6517 /* HW reset */ 6662 /* HW reset */
6518 bnx2x_ext_phy_hw_reset(bp, params->port); 6663 bnx2x_ext_phy_hw_reset(bp, params->port);
6519 bnx2x_wait_reset_complete(bp, phy); 6664 bnx2x_wait_reset_complete(bp, phy, params);
6520 6665
6521 bnx2x_cl45_write(bp, phy, 6666 bnx2x_cl45_write(bp, phy,
6522 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x1); 6667 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x1);
@@ -6563,9 +6708,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
6563 DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n", 6708 DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n",
6564 val2, val1); 6709 val2, val1);
6565 link_up = ((val1 & 4) == 4); 6710 link_up = ((val1 & 4) == 4);
6566 /* if link is up 6711 /* if link is up print the AN outcome of the SFX7101 PHY */
6567 * print the AN outcome of the SFX7101 PHY
6568 */
6569 if (link_up) { 6712 if (link_up) {
6570 bnx2x_cl45_read(bp, phy, 6713 bnx2x_cl45_read(bp, phy,
6571 MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS, 6714 MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
@@ -6599,20 +6742,20 @@ void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy)
6599 u16 val, cnt; 6742 u16 val, cnt;
6600 6743
6601 bnx2x_cl45_read(bp, phy, 6744 bnx2x_cl45_read(bp, phy,
6602 MDIO_PMA_DEVAD, 6745 MDIO_PMA_DEVAD,
6603 MDIO_PMA_REG_7101_RESET, &val); 6746 MDIO_PMA_REG_7101_RESET, &val);
6604 6747
6605 for (cnt = 0; cnt < 10; cnt++) { 6748 for (cnt = 0; cnt < 10; cnt++) {
6606 msleep(50); 6749 msleep(50);
6607 /* Writes a self-clearing reset */ 6750 /* Writes a self-clearing reset */
6608 bnx2x_cl45_write(bp, phy, 6751 bnx2x_cl45_write(bp, phy,
6609 MDIO_PMA_DEVAD, 6752 MDIO_PMA_DEVAD,
6610 MDIO_PMA_REG_7101_RESET, 6753 MDIO_PMA_REG_7101_RESET,
6611 (val | (1<<15))); 6754 (val | (1<<15)));
6612 /* Wait for clear */ 6755 /* Wait for clear */
6613 bnx2x_cl45_read(bp, phy, 6756 bnx2x_cl45_read(bp, phy,
6614 MDIO_PMA_DEVAD, 6757 MDIO_PMA_DEVAD,
6615 MDIO_PMA_REG_7101_RESET, &val); 6758 MDIO_PMA_REG_7101_RESET, &val);
6616 6759
6617 if ((val & (1<<15)) == 0) 6760 if ((val & (1<<15)) == 0)
6618 break; 6761 break;
@@ -6623,10 +6766,10 @@ static void bnx2x_7101_hw_reset(struct bnx2x_phy *phy,
6623 struct link_params *params) { 6766 struct link_params *params) {
6624 /* Low power mode is controlled by GPIO 2 */ 6767 /* Low power mode is controlled by GPIO 2 */
6625 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2, 6768 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2,
6626 MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port); 6769 MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
6627 /* The PHY reset is controlled by GPIO 1 */ 6770 /* The PHY reset is controlled by GPIO 1 */
6628 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, 6771 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
6629 MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port); 6772 MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
6630} 6773}
6631 6774
6632static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy, 6775static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy,
@@ -6668,9 +6811,9 @@ static struct bnx2x_phy phy_null = {
6668 .supported = 0, 6811 .supported = 0,
6669 .media_type = ETH_PHY_NOT_PRESENT, 6812 .media_type = ETH_PHY_NOT_PRESENT,
6670 .ver_addr = 0, 6813 .ver_addr = 0,
6671 .req_flow_ctrl = 0, 6814 .req_flow_ctrl = 0,
6672 .req_line_speed = 0, 6815 .req_line_speed = 0,
6673 .speed_cap_mask = 0, 6816 .speed_cap_mask = 0,
6674 .req_duplex = 0, 6817 .req_duplex = 0,
6675 .rsrv = 0, 6818 .rsrv = 0,
6676 .config_init = (config_init_t)NULL, 6819 .config_init = (config_init_t)NULL,
@@ -6705,8 +6848,8 @@ static struct bnx2x_phy phy_serdes = {
6705 .media_type = ETH_PHY_UNSPECIFIED, 6848 .media_type = ETH_PHY_UNSPECIFIED,
6706 .ver_addr = 0, 6849 .ver_addr = 0,
6707 .req_flow_ctrl = 0, 6850 .req_flow_ctrl = 0,
6708 .req_line_speed = 0, 6851 .req_line_speed = 0,
6709 .speed_cap_mask = 0, 6852 .speed_cap_mask = 0,
6710 .req_duplex = 0, 6853 .req_duplex = 0,
6711 .rsrv = 0, 6854 .rsrv = 0,
6712 .config_init = (config_init_t)bnx2x_init_serdes, 6855 .config_init = (config_init_t)bnx2x_init_serdes,
@@ -6742,8 +6885,8 @@ static struct bnx2x_phy phy_xgxs = {
6742 .media_type = ETH_PHY_UNSPECIFIED, 6885 .media_type = ETH_PHY_UNSPECIFIED,
6743 .ver_addr = 0, 6886 .ver_addr = 0,
6744 .req_flow_ctrl = 0, 6887 .req_flow_ctrl = 0,
6745 .req_line_speed = 0, 6888 .req_line_speed = 0,
6746 .speed_cap_mask = 0, 6889 .speed_cap_mask = 0,
6747 .req_duplex = 0, 6890 .req_duplex = 0,
6748 .rsrv = 0, 6891 .rsrv = 0,
6749 .config_init = (config_init_t)bnx2x_init_xgxs, 6892 .config_init = (config_init_t)bnx2x_init_xgxs,
@@ -6773,8 +6916,8 @@ static struct bnx2x_phy phy_7101 = {
6773 .media_type = ETH_PHY_BASE_T, 6916 .media_type = ETH_PHY_BASE_T,
6774 .ver_addr = 0, 6917 .ver_addr = 0,
6775 .req_flow_ctrl = 0, 6918 .req_flow_ctrl = 0,
6776 .req_line_speed = 0, 6919 .req_line_speed = 0,
6777 .speed_cap_mask = 0, 6920 .speed_cap_mask = 0,
6778 .req_duplex = 0, 6921 .req_duplex = 0,
6779 .rsrv = 0, 6922 .rsrv = 0,
6780 .config_init = (config_init_t)bnx2x_7101_config_init, 6923 .config_init = (config_init_t)bnx2x_7101_config_init,
@@ -6804,9 +6947,9 @@ static struct bnx2x_phy phy_8073 = {
6804 SUPPORTED_Asym_Pause), 6947 SUPPORTED_Asym_Pause),
6805 .media_type = ETH_PHY_UNSPECIFIED, 6948 .media_type = ETH_PHY_UNSPECIFIED,
6806 .ver_addr = 0, 6949 .ver_addr = 0,
6807 .req_flow_ctrl = 0, 6950 .req_flow_ctrl = 0,
6808 .req_line_speed = 0, 6951 .req_line_speed = 0,
6809 .speed_cap_mask = 0, 6952 .speed_cap_mask = 0,
6810 .req_duplex = 0, 6953 .req_duplex = 0,
6811 .rsrv = 0, 6954 .rsrv = 0,
6812 .config_init = (config_init_t)bnx2x_8073_config_init, 6955 .config_init = (config_init_t)bnx2x_8073_config_init,
@@ -7015,6 +7158,43 @@ static struct bnx2x_phy phy_84823 = {
7015 .phy_specific_func = (phy_specific_func_t)NULL 7158 .phy_specific_func = (phy_specific_func_t)NULL
7016}; 7159};
7017 7160
7161static struct bnx2x_phy phy_84833 = {
7162 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
7163 .addr = 0xff,
7164 .flags = FLAGS_FAN_FAILURE_DET_REQ |
7165 FLAGS_REARM_LATCH_SIGNAL,
7166 .def_md_devad = 0,
7167 .reserved = 0,
7168 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
7169 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
7170 .mdio_ctrl = 0,
7171 .supported = (SUPPORTED_10baseT_Half |
7172 SUPPORTED_10baseT_Full |
7173 SUPPORTED_100baseT_Half |
7174 SUPPORTED_100baseT_Full |
7175 SUPPORTED_1000baseT_Full |
7176 SUPPORTED_10000baseT_Full |
7177 SUPPORTED_TP |
7178 SUPPORTED_Autoneg |
7179 SUPPORTED_Pause |
7180 SUPPORTED_Asym_Pause),
7181 .media_type = ETH_PHY_BASE_T,
7182 .ver_addr = 0,
7183 .req_flow_ctrl = 0,
7184 .req_line_speed = 0,
7185 .speed_cap_mask = 0,
7186 .req_duplex = 0,
7187 .rsrv = 0,
7188 .config_init = (config_init_t)bnx2x_848x3_config_init,
7189 .read_status = (read_status_t)bnx2x_848xx_read_status,
7190 .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
7191 .config_loopback = (config_loopback_t)NULL,
7192 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
7193 .hw_reset = (hw_reset_t)NULL,
7194 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
7195 .phy_specific_func = (phy_specific_func_t)NULL
7196};
7197
7018/*****************************************************************/ 7198/*****************************************************************/
7019/* */ 7199/* */
7020/* Populate the phy according. Main function: bnx2x_populate_phy */ 7200/* Populate the phy according. Main function: bnx2x_populate_phy */
@@ -7028,7 +7208,7 @@ static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
7028 /* Get the 4 lanes xgxs config rx and tx */ 7208 /* Get the 4 lanes xgxs config rx and tx */
7029 u32 rx = 0, tx = 0, i; 7209 u32 rx = 0, tx = 0, i;
7030 for (i = 0; i < 2; i++) { 7210 for (i = 0; i < 2; i++) {
7031 /** 7211 /*
7032 * INT_PHY and EXT_PHY1 share the same value location in the 7212 * INT_PHY and EXT_PHY1 share the same value location in the
7033 * shmem. When num_phys is greater than 1, than this value 7213 * shmem. When num_phys is greater than 1, than this value
7034 * applies only to EXT_PHY1 7214 * applies only to EXT_PHY1
@@ -7036,19 +7216,19 @@ static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
7036 if (phy_index == INT_PHY || phy_index == EXT_PHY1) { 7216 if (phy_index == INT_PHY || phy_index == EXT_PHY1) {
7037 rx = REG_RD(bp, shmem_base + 7217 rx = REG_RD(bp, shmem_base +
7038 offsetof(struct shmem_region, 7218 offsetof(struct shmem_region,
7039 dev_info.port_hw_config[port].xgxs_config_rx[i<<1])); 7219 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]));
7040 7220
7041 tx = REG_RD(bp, shmem_base + 7221 tx = REG_RD(bp, shmem_base +
7042 offsetof(struct shmem_region, 7222 offsetof(struct shmem_region,
7043 dev_info.port_hw_config[port].xgxs_config_tx[i<<1])); 7223 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]));
7044 } else { 7224 } else {
7045 rx = REG_RD(bp, shmem_base + 7225 rx = REG_RD(bp, shmem_base +
7046 offsetof(struct shmem_region, 7226 offsetof(struct shmem_region,
7047 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1])); 7227 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
7048 7228
7049 tx = REG_RD(bp, shmem_base + 7229 tx = REG_RD(bp, shmem_base +
7050 offsetof(struct shmem_region, 7230 offsetof(struct shmem_region,
7051 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1])); 7231 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
7052 } 7232 }
7053 7233
7054 phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff); 7234 phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff);
@@ -7168,6 +7348,9 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
7168 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823: 7348 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
7169 *phy = phy_84823; 7349 *phy = phy_84823;
7170 break; 7350 break;
7351 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
7352 *phy = phy_84833;
7353 break;
7171 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: 7354 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7172 *phy = phy_7101; 7355 *phy = phy_7101;
7173 break; 7356 break;
@@ -7182,21 +7365,21 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
7182 phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config); 7365 phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
7183 bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index); 7366 bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index);
7184 7367
7185 /** 7368 /*
7186 * The shmem address of the phy version is located on different 7369 * The shmem address of the phy version is located on different
7187 * structures. In case this structure is too old, do not set 7370 * structures. In case this structure is too old, do not set
7188 * the address 7371 * the address
7189 */ 7372 */
7190 config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region, 7373 config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region,
7191 dev_info.shared_hw_config.config2)); 7374 dev_info.shared_hw_config.config2));
7192 if (phy_index == EXT_PHY1) { 7375 if (phy_index == EXT_PHY1) {
7193 phy->ver_addr = shmem_base + offsetof(struct shmem_region, 7376 phy->ver_addr = shmem_base + offsetof(struct shmem_region,
7194 port_mb[port].ext_phy_fw_version); 7377 port_mb[port].ext_phy_fw_version);
7195 7378
7196 /* Check specific mdc mdio settings */ 7379 /* Check specific mdc mdio settings */
7197 if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK) 7380 if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
7198 mdc_mdio_access = config2 & 7381 mdc_mdio_access = config2 &
7199 SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK; 7382 SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
7200 } else { 7383 } else {
7201 u32 size = REG_RD(bp, shmem2_base); 7384 u32 size = REG_RD(bp, shmem2_base);
7202 7385
@@ -7215,7 +7398,7 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
7215 } 7398 }
7216 phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port); 7399 phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
7217 7400
7218 /** 7401 /*
7219 * In case mdc/mdio_access of the external phy is different than the 7402 * In case mdc/mdio_access of the external phy is different than the
7220 * mdc/mdio access of the XGXS, a HW lock must be taken in each access 7403 * mdc/mdio access of the XGXS, a HW lock must be taken in each access
7221 * to prevent one port interfere with another port's CL45 operations. 7404 * to prevent one port interfere with another port's CL45 operations.
@@ -7250,18 +7433,20 @@ static void bnx2x_phy_def_cfg(struct link_params *params,
7250 /* Populate the default phy configuration for MF mode */ 7433 /* Populate the default phy configuration for MF mode */
7251 if (phy_index == EXT_PHY2) { 7434 if (phy_index == EXT_PHY2) {
7252 link_config = REG_RD(bp, params->shmem_base + 7435 link_config = REG_RD(bp, params->shmem_base +
7253 offsetof(struct shmem_region, dev_info. 7436 offsetof(struct shmem_region, dev_info.
7254 port_feature_config[params->port].link_config2)); 7437 port_feature_config[params->port].link_config2));
7255 phy->speed_cap_mask = REG_RD(bp, params->shmem_base + 7438 phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
7256 offsetof(struct shmem_region, dev_info. 7439 offsetof(struct shmem_region,
7440 dev_info.
7257 port_hw_config[params->port].speed_capability_mask2)); 7441 port_hw_config[params->port].speed_capability_mask2));
7258 } else { 7442 } else {
7259 link_config = REG_RD(bp, params->shmem_base + 7443 link_config = REG_RD(bp, params->shmem_base +
7260 offsetof(struct shmem_region, dev_info. 7444 offsetof(struct shmem_region, dev_info.
7261 port_feature_config[params->port].link_config)); 7445 port_feature_config[params->port].link_config));
7262 phy->speed_cap_mask = REG_RD(bp, params->shmem_base + 7446 phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
7263 offsetof(struct shmem_region, dev_info. 7447 offsetof(struct shmem_region,
7264 port_hw_config[params->port].speed_capability_mask)); 7448 dev_info.
7449 port_hw_config[params->port].speed_capability_mask));
7265 } 7450 }
7266 DP(NETIF_MSG_LINK, "Default config phy idx %x cfg 0x%x speed_cap_mask" 7451 DP(NETIF_MSG_LINK, "Default config phy idx %x cfg 0x%x speed_cap_mask"
7267 " 0x%x\n", phy_index, link_config, phy->speed_cap_mask); 7452 " 0x%x\n", phy_index, link_config, phy->speed_cap_mask);
@@ -7408,7 +7593,7 @@ static void set_phy_vars(struct link_params *params)
7408 else if (phy_index == EXT_PHY2) 7593 else if (phy_index == EXT_PHY2)
7409 actual_phy_idx = EXT_PHY1; 7594 actual_phy_idx = EXT_PHY1;
7410 } 7595 }
7411 params->phy[actual_phy_idx].req_flow_ctrl = 7596 params->phy[actual_phy_idx].req_flow_ctrl =
7412 params->req_flow_ctrl[link_cfg_idx]; 7597 params->req_flow_ctrl[link_cfg_idx];
7413 7598
7414 params->phy[actual_phy_idx].req_line_speed = 7599 params->phy[actual_phy_idx].req_line_speed =
@@ -7461,57 +7646,6 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
7461 set_phy_vars(params); 7646 set_phy_vars(params);
7462 7647
7463 DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys); 7648 DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys);
7464 if (CHIP_REV_IS_FPGA(bp)) {
7465
7466 vars->link_up = 1;
7467 vars->line_speed = SPEED_10000;
7468 vars->duplex = DUPLEX_FULL;
7469 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7470 vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
7471 /* enable on E1.5 FPGA */
7472 if (CHIP_IS_E1H(bp)) {
7473 vars->flow_ctrl |=
7474 (BNX2X_FLOW_CTRL_TX |
7475 BNX2X_FLOW_CTRL_RX);
7476 vars->link_status |=
7477 (LINK_STATUS_TX_FLOW_CONTROL_ENABLED |
7478 LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
7479 }
7480
7481 bnx2x_emac_enable(params, vars, 0);
7482 if (!(CHIP_IS_E2(bp)))
7483 bnx2x_pbf_update(params, vars->flow_ctrl,
7484 vars->line_speed);
7485 /* disable drain */
7486 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
7487
7488 /* update shared memory */
7489 bnx2x_update_mng(params, vars->link_status);
7490
7491 return 0;
7492
7493 } else
7494 if (CHIP_REV_IS_EMUL(bp)) {
7495
7496 vars->link_up = 1;
7497 vars->line_speed = SPEED_10000;
7498 vars->duplex = DUPLEX_FULL;
7499 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7500 vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
7501
7502 bnx2x_bmac_enable(params, vars, 0);
7503
7504 bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed);
7505 /* Disable drain */
7506 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
7507 + params->port*4, 0);
7508
7509 /* update shared memory */
7510 bnx2x_update_mng(params, vars->link_status);
7511
7512 return 0;
7513
7514 } else
7515 if (params->loopback_mode == LOOPBACK_BMAC) { 7649 if (params->loopback_mode == LOOPBACK_BMAC) {
7516 7650
7517 vars->link_up = 1; 7651 vars->link_up = 1;
@@ -7527,8 +7661,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
7527 /* set bmac loopback */ 7661 /* set bmac loopback */
7528 bnx2x_bmac_enable(params, vars, 1); 7662 bnx2x_bmac_enable(params, vars, 1);
7529 7663
7530 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + 7664 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
7531 params->port*4, 0);
7532 7665
7533 } else if (params->loopback_mode == LOOPBACK_EMAC) { 7666 } else if (params->loopback_mode == LOOPBACK_EMAC) {
7534 7667
@@ -7544,8 +7677,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
7544 /* set bmac loopback */ 7677 /* set bmac loopback */
7545 bnx2x_emac_enable(params, vars, 1); 7678 bnx2x_emac_enable(params, vars, 1);
7546 bnx2x_emac_program(params, vars); 7679 bnx2x_emac_program(params, vars);
7547 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + 7680 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
7548 params->port*4, 0);
7549 7681
7550 } else if ((params->loopback_mode == LOOPBACK_XGXS) || 7682 } else if ((params->loopback_mode == LOOPBACK_XGXS) ||
7551 (params->loopback_mode == LOOPBACK_EXT_PHY)) { 7683 (params->loopback_mode == LOOPBACK_EXT_PHY)) {
@@ -7568,8 +7700,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
7568 bnx2x_emac_program(params, vars); 7700 bnx2x_emac_program(params, vars);
7569 bnx2x_emac_enable(params, vars, 0); 7701 bnx2x_emac_enable(params, vars, 0);
7570 } else 7702 } else
7571 bnx2x_bmac_enable(params, vars, 0); 7703 bnx2x_bmac_enable(params, vars, 0);
7572
7573 if (params->loopback_mode == LOOPBACK_XGXS) { 7704 if (params->loopback_mode == LOOPBACK_XGXS) {
7574 /* set 10G XGXS loopback */ 7705 /* set 10G XGXS loopback */
7575 params->phy[INT_PHY].config_loopback( 7706 params->phy[INT_PHY].config_loopback(
@@ -7587,9 +7718,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
7587 params); 7718 params);
7588 } 7719 }
7589 } 7720 }
7590 7721 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
7591 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
7592 params->port*4, 0);
7593 7722
7594 bnx2x_set_led(params, vars, 7723 bnx2x_set_led(params, vars,
7595 LED_MODE_OPER, vars->line_speed); 7724 LED_MODE_OPER, vars->line_speed);
@@ -7608,7 +7737,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
7608 return 0; 7737 return 0;
7609} 7738}
7610u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars, 7739u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
7611 u8 reset_ext_phy) 7740 u8 reset_ext_phy)
7612{ 7741{
7613 struct bnx2x *bp = params->bp; 7742 struct bnx2x *bp = params->bp;
7614 u8 phy_index, port = params->port, clear_latch_ind = 0; 7743 u8 phy_index, port = params->port, clear_latch_ind = 0;
@@ -7617,10 +7746,10 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
7617 vars->link_status = 0; 7746 vars->link_status = 0;
7618 bnx2x_update_mng(params, vars->link_status); 7747 bnx2x_update_mng(params, vars->link_status);
7619 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 7748 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
7620 (NIG_MASK_XGXS0_LINK_STATUS | 7749 (NIG_MASK_XGXS0_LINK_STATUS |
7621 NIG_MASK_XGXS0_LINK10G | 7750 NIG_MASK_XGXS0_LINK10G |
7622 NIG_MASK_SERDES0_LINK_STATUS | 7751 NIG_MASK_SERDES0_LINK_STATUS |
7623 NIG_MASK_MI_INT)); 7752 NIG_MASK_MI_INT));
7624 7753
7625 /* activate nig drain */ 7754 /* activate nig drain */
7626 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); 7755 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
@@ -7719,21 +7848,22 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7719 /* disable attentions */ 7848 /* disable attentions */
7720 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + 7849 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
7721 port_of_path*4, 7850 port_of_path*4,
7722 (NIG_MASK_XGXS0_LINK_STATUS | 7851 (NIG_MASK_XGXS0_LINK_STATUS |
7723 NIG_MASK_XGXS0_LINK10G | 7852 NIG_MASK_XGXS0_LINK10G |
7724 NIG_MASK_SERDES0_LINK_STATUS | 7853 NIG_MASK_SERDES0_LINK_STATUS |
7725 NIG_MASK_MI_INT)); 7854 NIG_MASK_MI_INT));
7726 7855
7727 /* Need to take the phy out of low power mode in order 7856 /* Need to take the phy out of low power mode in order
7728 to write to access its registers */ 7857 to write to access its registers */
7729 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 7858 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
7730 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); 7859 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
7860 port);
7731 7861
7732 /* Reset the phy */ 7862 /* Reset the phy */
7733 bnx2x_cl45_write(bp, &phy[port], 7863 bnx2x_cl45_write(bp, &phy[port],
7734 MDIO_PMA_DEVAD, 7864 MDIO_PMA_DEVAD,
7735 MDIO_PMA_REG_CTRL, 7865 MDIO_PMA_REG_CTRL,
7736 1<<15); 7866 1<<15);
7737 } 7867 }
7738 7868
7739 /* Add delay of 150ms after reset */ 7869 /* Add delay of 150ms after reset */
@@ -7762,18 +7892,20 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7762 7892
7763 /* Only set bit 10 = 1 (Tx power down) */ 7893 /* Only set bit 10 = 1 (Tx power down) */
7764 bnx2x_cl45_read(bp, phy_blk[port], 7894 bnx2x_cl45_read(bp, phy_blk[port],
7765 MDIO_PMA_DEVAD, 7895 MDIO_PMA_DEVAD,
7766 MDIO_PMA_REG_TX_POWER_DOWN, &val); 7896 MDIO_PMA_REG_TX_POWER_DOWN, &val);
7767 7897
7768 /* Phase1 of TX_POWER_DOWN reset */ 7898 /* Phase1 of TX_POWER_DOWN reset */
7769 bnx2x_cl45_write(bp, phy_blk[port], 7899 bnx2x_cl45_write(bp, phy_blk[port],
7770 MDIO_PMA_DEVAD, 7900 MDIO_PMA_DEVAD,
7771 MDIO_PMA_REG_TX_POWER_DOWN, 7901 MDIO_PMA_REG_TX_POWER_DOWN,
7772 (val | 1<<10)); 7902 (val | 1<<10));
7773 } 7903 }
7774 7904
7775 /* Toggle Transmitter: Power down and then up with 600ms 7905 /*
7776 delay between */ 7906 * Toggle Transmitter: Power down and then up with 600ms delay
7907 * between
7908 */
7777 msleep(600); 7909 msleep(600);
7778 7910
7779 /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */ 7911 /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */
@@ -7781,25 +7913,25 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7781 /* Phase2 of POWER_DOWN_RESET */ 7913 /* Phase2 of POWER_DOWN_RESET */
7782 /* Release bit 10 (Release Tx power down) */ 7914 /* Release bit 10 (Release Tx power down) */
7783 bnx2x_cl45_read(bp, phy_blk[port], 7915 bnx2x_cl45_read(bp, phy_blk[port],
7784 MDIO_PMA_DEVAD, 7916 MDIO_PMA_DEVAD,
7785 MDIO_PMA_REG_TX_POWER_DOWN, &val); 7917 MDIO_PMA_REG_TX_POWER_DOWN, &val);
7786 7918
7787 bnx2x_cl45_write(bp, phy_blk[port], 7919 bnx2x_cl45_write(bp, phy_blk[port],
7788 MDIO_PMA_DEVAD, 7920 MDIO_PMA_DEVAD,
7789 MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10)))); 7921 MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
7790 msleep(15); 7922 msleep(15);
7791 7923
7792 /* Read modify write the SPI-ROM version select register */ 7924 /* Read modify write the SPI-ROM version select register */
7793 bnx2x_cl45_read(bp, phy_blk[port], 7925 bnx2x_cl45_read(bp, phy_blk[port],
7794 MDIO_PMA_DEVAD, 7926 MDIO_PMA_DEVAD,
7795 MDIO_PMA_REG_EDC_FFE_MAIN, &val); 7927 MDIO_PMA_REG_EDC_FFE_MAIN, &val);
7796 bnx2x_cl45_write(bp, phy_blk[port], 7928 bnx2x_cl45_write(bp, phy_blk[port],
7797 MDIO_PMA_DEVAD, 7929 MDIO_PMA_DEVAD,
7798 MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12))); 7930 MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
7799 7931
7800 /* set GPIO2 back to LOW */ 7932 /* set GPIO2 back to LOW */
7801 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 7933 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
7802 MISC_REGISTERS_GPIO_OUTPUT_LOW, port); 7934 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
7803 } 7935 }
7804 return 0; 7936 return 0;
7805} 7937}
@@ -7846,32 +7978,90 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp,
7846 7978
7847 /* Set fault module detected LED on */ 7979 /* Set fault module detected LED on */
7848 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, 7980 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
7849 MISC_REGISTERS_GPIO_HIGH, 7981 MISC_REGISTERS_GPIO_HIGH,
7850 port); 7982 port);
7851 } 7983 }
7852 7984
7853 return 0; 7985 return 0;
7854} 7986}
7987static void bnx2x_get_ext_phy_reset_gpio(struct bnx2x *bp, u32 shmem_base,
7988 u8 *io_gpio, u8 *io_port)
7989{
7990
7991 u32 phy_gpio_reset = REG_RD(bp, shmem_base +
7992 offsetof(struct shmem_region,
7993 dev_info.port_hw_config[PORT_0].default_cfg));
7994 switch (phy_gpio_reset) {
7995 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0:
7996 *io_gpio = 0;
7997 *io_port = 0;
7998 break;
7999 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0:
8000 *io_gpio = 1;
8001 *io_port = 0;
8002 break;
8003 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0:
8004 *io_gpio = 2;
8005 *io_port = 0;
8006 break;
8007 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0:
8008 *io_gpio = 3;
8009 *io_port = 0;
8010 break;
8011 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1:
8012 *io_gpio = 0;
8013 *io_port = 1;
8014 break;
8015 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1:
8016 *io_gpio = 1;
8017 *io_port = 1;
8018 break;
8019 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1:
8020 *io_gpio = 2;
8021 *io_port = 1;
8022 break;
8023 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1:
8024 *io_gpio = 3;
8025 *io_port = 1;
8026 break;
8027 default:
8028 /* Don't override the io_gpio and io_port */
8029 break;
8030 }
8031}
7855static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, 8032static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
7856 u32 shmem_base_path[], 8033 u32 shmem_base_path[],
7857 u32 shmem2_base_path[], u8 phy_index, 8034 u32 shmem2_base_path[], u8 phy_index,
7858 u32 chip_id) 8035 u32 chip_id)
7859{ 8036{
7860 s8 port; 8037 s8 port, reset_gpio;
7861 u32 swap_val, swap_override; 8038 u32 swap_val, swap_override;
7862 struct bnx2x_phy phy[PORT_MAX]; 8039 struct bnx2x_phy phy[PORT_MAX];
7863 struct bnx2x_phy *phy_blk[PORT_MAX]; 8040 struct bnx2x_phy *phy_blk[PORT_MAX];
7864 s8 port_of_path; 8041 s8 port_of_path;
7865 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); 8042 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7866 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); 8043 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7867 8044
8045 reset_gpio = MISC_REGISTERS_GPIO_1;
7868 port = 1; 8046 port = 1;
7869 8047
7870 bnx2x_ext_phy_hw_reset(bp, port ^ (swap_val && swap_override)); 8048 /*
8049 * Retrieve the reset gpio/port which control the reset.
8050 * Default is GPIO1, PORT1
8051 */
8052 bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0],
8053 (u8 *)&reset_gpio, (u8 *)&port);
7871 8054
7872 /* Calculate the port based on port swap */ 8055 /* Calculate the port based on port swap */
7873 port ^= (swap_val && swap_override); 8056 port ^= (swap_val && swap_override);
7874 8057
8058 /* Initiate PHY reset*/
8059 bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
8060 port);
8061 msleep(1);
8062 bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
8063 port);
8064
7875 msleep(5); 8065 msleep(5);
7876 8066
7877 /* PART1 - Reset both phys */ 8067 /* PART1 - Reset both phys */
@@ -7907,9 +8097,7 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
7907 8097
7908 /* Reset the phy */ 8098 /* Reset the phy */
7909 bnx2x_cl45_write(bp, &phy[port], 8099 bnx2x_cl45_write(bp, &phy[port],
7910 MDIO_PMA_DEVAD, 8100 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
7911 MDIO_PMA_REG_CTRL,
7912 1<<15);
7913 } 8101 }
7914 8102
7915 /* Add delay of 150ms after reset */ 8103 /* Add delay of 150ms after reset */
@@ -7923,7 +8111,7 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
7923 } 8111 }
7924 /* PART2 - Download firmware to both phys */ 8112 /* PART2 - Download firmware to both phys */
7925 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 8113 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
7926 if (CHIP_IS_E2(bp)) 8114 if (CHIP_IS_E2(bp))
7927 port_of_path = 0; 8115 port_of_path = 0;
7928 else 8116 else
7929 port_of_path = port; 8117 port_of_path = port;
@@ -7958,8 +8146,10 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
7958 break; 8146 break;
7959 8147
7960 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 8148 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7961 /* GPIO1 affects both ports, so there's need to pull 8149 /*
7962 it for single port alone */ 8150 * GPIO1 affects both ports, so there's need to pull
8151 * it for single port alone
8152 */
7963 rc = bnx2x_8726_common_init_phy(bp, shmem_base_path, 8153 rc = bnx2x_8726_common_init_phy(bp, shmem_base_path,
7964 shmem2_base_path, 8154 shmem2_base_path,
7965 phy_index, chip_id); 8155 phy_index, chip_id);
@@ -7969,11 +8159,15 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
7969 break; 8159 break;
7970 default: 8160 default:
7971 DP(NETIF_MSG_LINK, 8161 DP(NETIF_MSG_LINK,
7972 "bnx2x_common_init_phy: ext_phy 0x%x not required\n", 8162 "ext_phy 0x%x common init not required\n",
7973 ext_phy_type); 8163 ext_phy_type);
7974 break; 8164 break;
7975 } 8165 }
7976 8166
8167 if (rc != 0)
8168 netdev_err(bp->dev, "Warning: PHY was not initialized,"
8169 " Port %d\n",
8170 0);
7977 return rc; 8171 return rc;
7978} 8172}
7979 8173
@@ -7986,9 +8180,6 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
7986 u32 ext_phy_type, ext_phy_config; 8180 u32 ext_phy_type, ext_phy_config;
7987 DP(NETIF_MSG_LINK, "Begin common phy init\n"); 8181 DP(NETIF_MSG_LINK, "Begin common phy init\n");
7988 8182
7989 if (CHIP_REV_IS_EMUL(bp))
7990 return 0;
7991
7992 /* Check if common init was already done */ 8183 /* Check if common init was already done */
7993 phy_ver = REG_RD(bp, shmem_base_path[0] + 8184 phy_ver = REG_RD(bp, shmem_base_path[0] +
7994 offsetof(struct shmem_region, 8185 offsetof(struct shmem_region,
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
index bedab1a942c4..92f36b6950dc 100644
--- a/drivers/net/bnx2x/bnx2x_link.h
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -1,4 +1,4 @@
1/* Copyright 2008-2010 Broadcom Corporation 1/* Copyright 2008-2011 Broadcom Corporation
2 * 2 *
3 * Unless you and Broadcom execute a separate written software license 3 * Unless you and Broadcom execute a separate written software license
4 * agreement governing use of this software, this software is licensed to you 4 * agreement governing use of this software, this software is licensed to you
@@ -33,7 +33,7 @@
33#define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH 33#define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH
34#define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE 34#define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE
35 35
36#define SPEED_AUTO_NEG 0 36#define SPEED_AUTO_NEG 0
37#define SPEED_12000 12000 37#define SPEED_12000 12000
38#define SPEED_12500 12500 38#define SPEED_12500 12500
39#define SPEED_13000 13000 39#define SPEED_13000 13000
@@ -44,8 +44,8 @@
44#define SFP_EEPROM_VENDOR_NAME_SIZE 16 44#define SFP_EEPROM_VENDOR_NAME_SIZE 16
45#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25 45#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25
46#define SFP_EEPROM_VENDOR_OUI_SIZE 3 46#define SFP_EEPROM_VENDOR_OUI_SIZE 3
47#define SFP_EEPROM_PART_NO_ADDR 0x28 47#define SFP_EEPROM_PART_NO_ADDR 0x28
48#define SFP_EEPROM_PART_NO_SIZE 16 48#define SFP_EEPROM_PART_NO_SIZE 16
49#define PWR_FLT_ERR_MSG_LEN 250 49#define PWR_FLT_ERR_MSG_LEN 250
50 50
51#define XGXS_EXT_PHY_TYPE(ext_phy_config) \ 51#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
@@ -62,7 +62,7 @@
62#define SINGLE_MEDIA(params) (params->num_phys == 2) 62#define SINGLE_MEDIA(params) (params->num_phys == 2)
63/* Dual Media board contains two external phy with different media */ 63/* Dual Media board contains two external phy with different media */
64#define DUAL_MEDIA(params) (params->num_phys == 3) 64#define DUAL_MEDIA(params) (params->num_phys == 3)
65#define FW_PARAM_MDIO_CTRL_OFFSET 16 65#define FW_PARAM_MDIO_CTRL_OFFSET 16
66#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \ 66#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
67 (phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET) 67 (phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET)
68 68
@@ -201,12 +201,14 @@ struct link_params {
201 201
202 /* Default / User Configuration */ 202 /* Default / User Configuration */
203 u8 loopback_mode; 203 u8 loopback_mode;
204#define LOOPBACK_NONE 0 204#define LOOPBACK_NONE 0
205#define LOOPBACK_EMAC 1 205#define LOOPBACK_EMAC 1
206#define LOOPBACK_BMAC 2 206#define LOOPBACK_BMAC 2
207#define LOOPBACK_XGXS 3 207#define LOOPBACK_XGXS 3
208#define LOOPBACK_EXT_PHY 4 208#define LOOPBACK_EXT_PHY 4
209#define LOOPBACK_EXT 5 209#define LOOPBACK_EXT 5
210#define LOOPBACK_UMAC 6
211#define LOOPBACK_XMAC 7
210 212
211 /* Device parameters */ 213 /* Device parameters */
212 u8 mac_addr[6]; 214 u8 mac_addr[6];
@@ -230,10 +232,11 @@ struct link_params {
230 /* Phy register parameter */ 232 /* Phy register parameter */
231 u32 chip_id; 233 u32 chip_id;
232 234
235 /* features */
233 u32 feature_config_flags; 236 u32 feature_config_flags;
234#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0) 237#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
235#define FEATURE_CONFIG_PFC_ENABLED (1<<1) 238#define FEATURE_CONFIG_PFC_ENABLED (1<<1)
236#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2) 239#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
237#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3) 240#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
238 /* Will be populated during common init */ 241 /* Will be populated during common init */
239 struct bnx2x_phy phy[MAX_PHYS]; 242 struct bnx2x_phy phy[MAX_PHYS];
@@ -334,6 +337,11 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
334/* Reset the external of SFX7101 */ 337/* Reset the external of SFX7101 */
335void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy); 338void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
336 339
340/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
341u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
342 struct link_params *params, u16 addr,
343 u8 byte_cnt, u8 *o_buf);
344
337void bnx2x_hw_reset_phy(struct link_params *params); 345void bnx2x_hw_reset_phy(struct link_params *params);
338 346
339/* Checks if HW lock is required for this phy/board type */ 347/* Checks if HW lock is required for this phy/board type */
@@ -379,7 +387,7 @@ void bnx2x_ets_disabled(struct link_params *params);
379 387
380/* Used to configure the ETS to BW limited */ 388/* Used to configure the ETS to BW limited */
381void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw, 389void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
382 const u32 cos1_bw); 390 const u32 cos1_bw);
383 391
384/* Used to configure the ETS to strict */ 392/* Used to configure the ETS to strict */
385u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos); 393u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index aa032339e321..9d48659e3b28 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -586,7 +586,7 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
586 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); 586 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
587 587
588 /* lock the dmae channel */ 588 /* lock the dmae channel */
589 mutex_lock(&bp->dmae_mutex); 589 spin_lock_bh(&bp->dmae_lock);
590 590
591 /* reset completion */ 591 /* reset completion */
592 *wb_comp = 0; 592 *wb_comp = 0;
@@ -617,7 +617,7 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
617 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); 617 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
618 618
619unlock: 619unlock:
620 mutex_unlock(&bp->dmae_mutex); 620 spin_unlock_bh(&bp->dmae_lock);
621 return rc; 621 return rc;
622} 622}
623 623
@@ -1397,7 +1397,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1397 } 1397 }
1398 1398
1399 smp_mb__before_atomic_inc(); 1399 smp_mb__before_atomic_inc();
1400 atomic_inc(&bp->spq_left); 1400 atomic_inc(&bp->cq_spq_left);
1401 /* push the change in fp->state and towards the memory */ 1401 /* push the change in fp->state and towards the memory */
1402 smp_wmb(); 1402 smp_wmb();
1403 1403
@@ -2484,8 +2484,14 @@ static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2484 rxq_init->sge_map = fp->rx_sge_mapping; 2484 rxq_init->sge_map = fp->rx_sge_mapping;
2485 rxq_init->rcq_map = fp->rx_comp_mapping; 2485 rxq_init->rcq_map = fp->rx_comp_mapping;
2486 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE; 2486 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2487 rxq_init->mtu = bp->dev->mtu; 2487
2488 rxq_init->buf_sz = bp->rx_buf_size; 2488 /* Always use mini-jumbo MTU for FCoE L2 ring */
2489 if (IS_FCOE_FP(fp))
2490 rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2491 else
2492 rxq_init->mtu = bp->dev->mtu;
2493
2494 rxq_init->buf_sz = fp->rx_buf_size;
2489 rxq_init->cl_qzone_id = fp->cl_qzone_id; 2495 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2490 rxq_init->cl_id = fp->cl_id; 2496 rxq_init->cl_id = fp->cl_id;
2491 rxq_init->spcl_id = fp->cl_id; 2497 rxq_init->spcl_id = fp->cl_id;
@@ -2737,11 +2743,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2737 2743
2738 spin_lock_bh(&bp->spq_lock); 2744 spin_lock_bh(&bp->spq_lock);
2739 2745
2740 if (!atomic_read(&bp->spq_left)) { 2746 if (common) {
2741 BNX2X_ERR("BUG! SPQ ring full!\n"); 2747 if (!atomic_read(&bp->eq_spq_left)) {
2742 spin_unlock_bh(&bp->spq_lock); 2748 BNX2X_ERR("BUG! EQ ring full!\n");
2743 bnx2x_panic(); 2749 spin_unlock_bh(&bp->spq_lock);
2744 return -EBUSY; 2750 bnx2x_panic();
2751 return -EBUSY;
2752 }
2753 } else if (!atomic_read(&bp->cq_spq_left)) {
2754 BNX2X_ERR("BUG! SPQ ring full!\n");
2755 spin_unlock_bh(&bp->spq_lock);
2756 bnx2x_panic();
2757 return -EBUSY;
2745 } 2758 }
2746 2759
2747 spe = bnx2x_sp_get_next(bp); 2760 spe = bnx2x_sp_get_next(bp);
@@ -2772,20 +2785,26 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2772 spe->data.update_data_addr.lo = cpu_to_le32(data_lo); 2785 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2773 2786
2774 /* stats ramrod has it's own slot on the spq */ 2787 /* stats ramrod has it's own slot on the spq */
2775 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) 2788 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
2776 /* It's ok if the actual decrement is issued towards the memory 2789 /* It's ok if the actual decrement is issued towards the memory
2777 * somewhere between the spin_lock and spin_unlock. Thus no 2790 * somewhere between the spin_lock and spin_unlock. Thus no
2778 * more explict memory barrier is needed. 2791 * more explict memory barrier is needed.
2779 */ 2792 */
2780 atomic_dec(&bp->spq_left); 2793 if (common)
2794 atomic_dec(&bp->eq_spq_left);
2795 else
2796 atomic_dec(&bp->cq_spq_left);
2797 }
2798
2781 2799
2782 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/, 2800 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2783 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) " 2801 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2784 "type(0x%x) left %x\n", 2802 "type(0x%x) left (ETH, COMMON) (%x,%x)\n",
2785 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), 2803 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2786 (u32)(U64_LO(bp->spq_mapping) + 2804 (u32)(U64_LO(bp->spq_mapping) +
2787 (void *)bp->spq_prod_bd - (void *)bp->spq), command, 2805 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2788 HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left)); 2806 HW_CID(bp, cid), data_hi, data_lo, type,
2807 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
2789 2808
2790 bnx2x_sp_prod_update(bp); 2809 bnx2x_sp_prod_update(bp);
2791 spin_unlock_bh(&bp->spq_lock); 2810 spin_unlock_bh(&bp->spq_lock);
@@ -3697,8 +3716,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
3697 sw_cons = bp->eq_cons; 3716 sw_cons = bp->eq_cons;
3698 sw_prod = bp->eq_prod; 3717 sw_prod = bp->eq_prod;
3699 3718
3700 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n", 3719 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->cq_spq_left %u\n",
3701 hw_cons, sw_cons, atomic_read(&bp->spq_left)); 3720 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
3702 3721
3703 for (; sw_cons != hw_cons; 3722 for (; sw_cons != hw_cons;
3704 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { 3723 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
@@ -3763,13 +3782,15 @@ static void bnx2x_eq_int(struct bnx2x *bp)
3763 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): 3782 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3764 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): 3783 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3765 DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); 3784 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3766 bp->set_mac_pending = 0; 3785 if (elem->message.data.set_mac_event.echo)
3786 bp->set_mac_pending = 0;
3767 break; 3787 break;
3768 3788
3769 case (EVENT_RING_OPCODE_SET_MAC | 3789 case (EVENT_RING_OPCODE_SET_MAC |
3770 BNX2X_STATE_CLOSING_WAIT4_HALT): 3790 BNX2X_STATE_CLOSING_WAIT4_HALT):
3771 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n"); 3791 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3772 bp->set_mac_pending = 0; 3792 if (elem->message.data.set_mac_event.echo)
3793 bp->set_mac_pending = 0;
3773 break; 3794 break;
3774 default: 3795 default:
3775 /* unknown event log error and continue */ 3796 /* unknown event log error and continue */
@@ -3781,7 +3802,7 @@ next_spqe:
3781 } /* for */ 3802 } /* for */
3782 3803
3783 smp_mb__before_atomic_inc(); 3804 smp_mb__before_atomic_inc();
3784 atomic_add(spqe_cnt, &bp->spq_left); 3805 atomic_add(spqe_cnt, &bp->eq_spq_left);
3785 3806
3786 bp->eq_cons = sw_cons; 3807 bp->eq_cons = sw_cons;
3787 bp->eq_prod = sw_prod; 3808 bp->eq_prod = sw_prod;
@@ -4208,13 +4229,13 @@ void bnx2x_update_coalesce(struct bnx2x *bp)
4208 4229
4209 for_each_eth_queue(bp, i) 4230 for_each_eth_queue(bp, i)
4210 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id, 4231 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4211 bp->rx_ticks, bp->tx_ticks); 4232 bp->tx_ticks, bp->rx_ticks);
4212} 4233}
4213 4234
4214static void bnx2x_init_sp_ring(struct bnx2x *bp) 4235static void bnx2x_init_sp_ring(struct bnx2x *bp)
4215{ 4236{
4216 spin_lock_init(&bp->spq_lock); 4237 spin_lock_init(&bp->spq_lock);
4217 atomic_set(&bp->spq_left, MAX_SPQ_PENDING); 4238 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
4218 4239
4219 bp->spq_prod_idx = 0; 4240 bp->spq_prod_idx = 0;
4220 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; 4241 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
@@ -4239,9 +4260,12 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp)
4239 bp->eq_cons = 0; 4260 bp->eq_cons = 0;
4240 bp->eq_prod = NUM_EQ_DESC; 4261 bp->eq_prod = NUM_EQ_DESC;
4241 bp->eq_cons_sb = BNX2X_EQ_INDEX; 4262 bp->eq_cons_sb = BNX2X_EQ_INDEX;
4263 /* we want a warning message before it gets rought... */
4264 atomic_set(&bp->eq_spq_left,
4265 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
4242} 4266}
4243 4267
4244static void bnx2x_init_ind_table(struct bnx2x *bp) 4268void bnx2x_push_indir_table(struct bnx2x *bp)
4245{ 4269{
4246 int func = BP_FUNC(bp); 4270 int func = BP_FUNC(bp);
4247 int i; 4271 int i;
@@ -4249,13 +4273,20 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
4249 if (bp->multi_mode == ETH_RSS_MODE_DISABLED) 4273 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4250 return; 4274 return;
4251 4275
4252 DP(NETIF_MSG_IFUP,
4253 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
4254 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) 4276 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4255 REG_WR8(bp, BAR_TSTRORM_INTMEM + 4277 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4256 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i, 4278 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4257 bp->fp->cl_id + (i % (bp->num_queues - 4279 bp->fp->cl_id + bp->rx_indir_table[i]);
4258 NONE_ETH_CONTEXT_USE))); 4280}
4281
4282static void bnx2x_init_ind_table(struct bnx2x *bp)
4283{
4284 int i;
4285
4286 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4287 bp->rx_indir_table[i] = i % BNX2X_NUM_ETH_QUEUES(bp);
4288
4289 bnx2x_push_indir_table(bp);
4259} 4290}
4260 4291
4261void bnx2x_set_storm_rx_mode(struct bnx2x *bp) 4292void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
@@ -5851,7 +5882,7 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5851 BP_ABS_FUNC(bp), load_code); 5882 BP_ABS_FUNC(bp), load_code);
5852 5883
5853 bp->dmae_ready = 0; 5884 bp->dmae_ready = 0;
5854 mutex_init(&bp->dmae_mutex); 5885 spin_lock_init(&bp->dmae_lock);
5855 rc = bnx2x_gunzip_init(bp); 5886 rc = bnx2x_gunzip_init(bp);
5856 if (rc) 5887 if (rc)
5857 return rc; 5888 return rc;
@@ -6003,6 +6034,8 @@ void bnx2x_free_mem(struct bnx2x *bp)
6003 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, 6034 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
6004 BCM_PAGE_SIZE * NUM_EQ_PAGES); 6035 BCM_PAGE_SIZE * NUM_EQ_PAGES);
6005 6036
6037 BNX2X_FREE(bp->rx_indir_table);
6038
6006#undef BNX2X_PCI_FREE 6039#undef BNX2X_PCI_FREE
6007#undef BNX2X_KFREE 6040#undef BNX2X_KFREE
6008} 6041}
@@ -6133,6 +6166,9 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
6133 /* EQ */ 6166 /* EQ */
6134 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping, 6167 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
6135 BCM_PAGE_SIZE * NUM_EQ_PAGES); 6168 BCM_PAGE_SIZE * NUM_EQ_PAGES);
6169
6170 BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) *
6171 TSTORM_INDIRECTION_TABLE_SIZE);
6136 return 0; 6172 return 0;
6137 6173
6138alloc_mem_err: 6174alloc_mem_err:
@@ -6186,12 +6222,14 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
6186 int ramrod_flags = WAIT_RAMROD_COMMON; 6222 int ramrod_flags = WAIT_RAMROD_COMMON;
6187 6223
6188 bp->set_mac_pending = 1; 6224 bp->set_mac_pending = 1;
6189 smp_wmb();
6190 6225
6191 config->hdr.length = 1; 6226 config->hdr.length = 1;
6192 config->hdr.offset = cam_offset; 6227 config->hdr.offset = cam_offset;
6193 config->hdr.client_id = 0xff; 6228 config->hdr.client_id = 0xff;
6194 config->hdr.reserved1 = 0; 6229 /* Mark the single MAC configuration ramrod as opposed to a
6230 * UC/MC list configuration).
6231 */
6232 config->hdr.echo = 1;
6195 6233
6196 /* primary MAC */ 6234 /* primary MAC */
6197 config->config_table[0].msb_mac_addr = 6235 config->config_table[0].msb_mac_addr =
@@ -6223,6 +6261,8 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
6223 config->config_table[0].middle_mac_addr, 6261 config->config_table[0].middle_mac_addr,
6224 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec); 6262 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
6225 6263
6264 mb();
6265
6226 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, 6266 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6227 U64_HI(bnx2x_sp_mapping(bp, mac_config)), 6267 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6228 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1); 6268 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
@@ -6287,20 +6327,15 @@ static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
6287 if (CHIP_IS_E1H(bp)) 6327 if (CHIP_IS_E1H(bp))
6288 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp); 6328 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6289 else if (CHIP_MODE_IS_4_PORT(bp)) 6329 else if (CHIP_MODE_IS_4_PORT(bp))
6290 return BP_FUNC(bp) * 32 + rel_offset; 6330 return E2_FUNC_MAX * rel_offset + BP_FUNC(bp);
6291 else 6331 else
6292 return BP_VN(bp) * 32 + rel_offset; 6332 return E2_FUNC_MAX * rel_offset + BP_VN(bp);
6293} 6333}
6294 6334
6295/** 6335/**
6296 * LLH CAM line allocations: currently only iSCSI and ETH macs are 6336 * LLH CAM line allocations: currently only iSCSI and ETH macs are
6297 * relevant. In addition, current implementation is tuned for a 6337 * relevant. In addition, current implementation is tuned for a
6298 * single ETH MAC. 6338 * single ETH MAC.
6299 *
6300 * When multiple unicast ETH MACs PF configuration in switch
6301 * independent mode is required (NetQ, multiple netdev MACs,
6302 * etc.), consider better utilisation of 16 per function MAC
6303 * entries in the LLH memory.
6304 */ 6339 */
6305enum { 6340enum {
6306 LLH_CAM_ISCSI_ETH_LINE = 0, 6341 LLH_CAM_ISCSI_ETH_LINE = 0,
@@ -6375,14 +6410,37 @@ void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6375 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1); 6410 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6376 } 6411 }
6377} 6412}
6378static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset) 6413
6414static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
6415{
6416 return CHIP_REV_IS_SLOW(bp) ?
6417 (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
6418 (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
6419}
6420
6421/* set mc list, do not wait as wait implies sleep and
6422 * set_rx_mode can be invoked from non-sleepable context.
6423 *
6424 * Instead we use the same ramrod data buffer each time we need
6425 * to configure a list of addresses, and use the fact that the
6426 * list of MACs is changed in an incremental way and that the
6427 * function is called under the netif_addr_lock. A temporary
6428 * inconsistent CAM configuration (possible in case of a very fast
6429 * sequence of add/del/add on the host side) will shortly be
6430 * restored by the handler of the last ramrod.
6431 */
6432static int bnx2x_set_e1_mc_list(struct bnx2x *bp)
6379{ 6433{
6380 int i = 0, old; 6434 int i = 0, old;
6381 struct net_device *dev = bp->dev; 6435 struct net_device *dev = bp->dev;
6436 u8 offset = bnx2x_e1_cam_mc_offset(bp);
6382 struct netdev_hw_addr *ha; 6437 struct netdev_hw_addr *ha;
6383 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config); 6438 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6384 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config); 6439 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6385 6440
6441 if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
6442 return -EINVAL;
6443
6386 netdev_for_each_mc_addr(ha, dev) { 6444 netdev_for_each_mc_addr(ha, dev) {
6387 /* copy mac */ 6445 /* copy mac */
6388 config_cmd->config_table[i].msb_mac_addr = 6446 config_cmd->config_table[i].msb_mac_addr =
@@ -6423,32 +6481,47 @@ static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6423 } 6481 }
6424 } 6482 }
6425 6483
6484 wmb();
6485
6426 config_cmd->hdr.length = i; 6486 config_cmd->hdr.length = i;
6427 config_cmd->hdr.offset = offset; 6487 config_cmd->hdr.offset = offset;
6428 config_cmd->hdr.client_id = 0xff; 6488 config_cmd->hdr.client_id = 0xff;
6429 config_cmd->hdr.reserved1 = 0; 6489 /* Mark that this ramrod doesn't use bp->set_mac_pending for
6490 * synchronization.
6491 */
6492 config_cmd->hdr.echo = 0;
6430 6493
6431 bp->set_mac_pending = 1; 6494 mb();
6432 smp_wmb();
6433 6495
6434 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, 6496 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6435 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1); 6497 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6436} 6498}
6437static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp) 6499
6500void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
6438{ 6501{
6439 int i; 6502 int i;
6440 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config); 6503 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6441 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config); 6504 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6442 int ramrod_flags = WAIT_RAMROD_COMMON; 6505 int ramrod_flags = WAIT_RAMROD_COMMON;
6506 u8 offset = bnx2x_e1_cam_mc_offset(bp);
6443 6507
6444 bp->set_mac_pending = 1; 6508 for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
6445 smp_wmb();
6446
6447 for (i = 0; i < config_cmd->hdr.length; i++)
6448 SET_FLAG(config_cmd->config_table[i].flags, 6509 SET_FLAG(config_cmd->config_table[i].flags,
6449 MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 6510 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6450 T_ETH_MAC_COMMAND_INVALIDATE); 6511 T_ETH_MAC_COMMAND_INVALIDATE);
6451 6512
6513 wmb();
6514
6515 config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
6516 config_cmd->hdr.offset = offset;
6517 config_cmd->hdr.client_id = 0xff;
6518 /* We'll wait for a completion this time... */
6519 config_cmd->hdr.echo = 1;
6520
6521 bp->set_mac_pending = 1;
6522
6523 mb();
6524
6452 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, 6525 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6453 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1); 6526 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6454 6527
@@ -6458,6 +6531,44 @@ static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
6458 6531
6459} 6532}
6460 6533
6534/* Accept one or more multicasts */
6535static int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
6536{
6537 struct net_device *dev = bp->dev;
6538 struct netdev_hw_addr *ha;
6539 u32 mc_filter[MC_HASH_SIZE];
6540 u32 crc, bit, regidx;
6541 int i;
6542
6543 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6544
6545 netdev_for_each_mc_addr(ha, dev) {
6546 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6547 bnx2x_mc_addr(ha));
6548
6549 crc = crc32c_le(0, bnx2x_mc_addr(ha),
6550 ETH_ALEN);
6551 bit = (crc >> 24) & 0xff;
6552 regidx = bit >> 5;
6553 bit &= 0x1f;
6554 mc_filter[regidx] |= (1 << bit);
6555 }
6556
6557 for (i = 0; i < MC_HASH_SIZE; i++)
6558 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6559 mc_filter[i]);
6560
6561 return 0;
6562}
6563
6564void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
6565{
6566 int i;
6567
6568 for (i = 0; i < MC_HASH_SIZE; i++)
6569 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6570}
6571
6461#ifdef BCM_CNIC 6572#ifdef BCM_CNIC
6462/** 6573/**
6463 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH 6574 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
@@ -6476,12 +6587,13 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
6476 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID + 6587 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
6477 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE; 6588 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
6478 u32 cl_bit_vec = (1 << iscsi_l2_cl_id); 6589 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
6590 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
6479 6591
6480 /* Send a SET_MAC ramrod */ 6592 /* Send a SET_MAC ramrod */
6481 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec, 6593 bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
6482 cam_offset, 0); 6594 cam_offset, 0);
6483 6595
6484 bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE); 6596 bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
6485 6597
6486 return 0; 6598 return 0;
6487} 6599}
@@ -7123,20 +7235,15 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
7123 /* Give HW time to discard old tx messages */ 7235 /* Give HW time to discard old tx messages */
7124 msleep(1); 7236 msleep(1);
7125 7237
7126 if (CHIP_IS_E1(bp)) { 7238 bnx2x_set_eth_mac(bp, 0);
7127 /* invalidate mc list,
7128 * wait and poll (interrupts are off)
7129 */
7130 bnx2x_invlidate_e1_mc_list(bp);
7131 bnx2x_set_eth_mac(bp, 0);
7132
7133 } else {
7134 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7135 7239
7136 bnx2x_set_eth_mac(bp, 0); 7240 bnx2x_invalidate_uc_list(bp);
7137 7241
7138 for (i = 0; i < MC_HASH_SIZE; i++) 7242 if (CHIP_IS_E1(bp))
7139 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); 7243 bnx2x_invalidate_e1_mc_list(bp);
7244 else {
7245 bnx2x_invalidate_e1h_mc_list(bp);
7246 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7140 } 7247 }
7141 7248
7142#ifdef BCM_CNIC 7249#ifdef BCM_CNIC
@@ -8405,11 +8512,47 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8405 bp->common.shmem2_base); 8512 bp->common.shmem2_base);
8406} 8513}
8407 8514
8515#ifdef BCM_CNIC
8516static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
8517{
8518 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
8519 drv_lic_key[BP_PORT(bp)].max_iscsi_conn);
8520 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
8521 drv_lic_key[BP_PORT(bp)].max_fcoe_conn);
8522
8523 /* Get the number of maximum allowed iSCSI and FCoE connections */
8524 bp->cnic_eth_dev.max_iscsi_conn =
8525 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
8526 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
8527
8528 bp->cnic_eth_dev.max_fcoe_conn =
8529 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
8530 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
8531
8532 BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
8533 bp->cnic_eth_dev.max_iscsi_conn,
8534 bp->cnic_eth_dev.max_fcoe_conn);
8535
8536 /* If mamimum allowed number of connections is zero -
8537 * disable the feature.
8538 */
8539 if (!bp->cnic_eth_dev.max_iscsi_conn)
8540 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
8541
8542 if (!bp->cnic_eth_dev.max_fcoe_conn)
8543 bp->flags |= NO_FCOE_FLAG;
8544}
8545#endif
8546
8408static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) 8547static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8409{ 8548{
8410 u32 val, val2; 8549 u32 val, val2;
8411 int func = BP_ABS_FUNC(bp); 8550 int func = BP_ABS_FUNC(bp);
8412 int port = BP_PORT(bp); 8551 int port = BP_PORT(bp);
8552#ifdef BCM_CNIC
8553 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
8554 u8 *fip_mac = bp->fip_mac;
8555#endif
8413 8556
8414 if (BP_NOMCP(bp)) { 8557 if (BP_NOMCP(bp)) {
8415 BNX2X_ERROR("warning: random MAC workaround active\n"); 8558 BNX2X_ERROR("warning: random MAC workaround active\n");
@@ -8422,7 +8565,9 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8422 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); 8565 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8423 8566
8424#ifdef BCM_CNIC 8567#ifdef BCM_CNIC
8425 /* iSCSI NPAR MAC */ 8568 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
8569 * FCoE MAC then the appropriate feature should be disabled.
8570 */
8426 if (IS_MF_SI(bp)) { 8571 if (IS_MF_SI(bp)) {
8427 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); 8572 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
8428 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { 8573 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
@@ -8430,8 +8575,39 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8430 iscsi_mac_addr_upper); 8575 iscsi_mac_addr_upper);
8431 val = MF_CFG_RD(bp, func_ext_config[func]. 8576 val = MF_CFG_RD(bp, func_ext_config[func].
8432 iscsi_mac_addr_lower); 8577 iscsi_mac_addr_lower);
8433 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2); 8578 BNX2X_DEV_INFO("Read iSCSI MAC: "
8434 } 8579 "0x%x:0x%04x\n", val2, val);
8580 bnx2x_set_mac_buf(iscsi_mac, val, val2);
8581
8582 /* Disable iSCSI OOO if MAC configuration is
8583 * invalid.
8584 */
8585 if (!is_valid_ether_addr(iscsi_mac)) {
8586 bp->flags |= NO_ISCSI_OOO_FLAG |
8587 NO_ISCSI_FLAG;
8588 memset(iscsi_mac, 0, ETH_ALEN);
8589 }
8590 } else
8591 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
8592
8593 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
8594 val2 = MF_CFG_RD(bp, func_ext_config[func].
8595 fcoe_mac_addr_upper);
8596 val = MF_CFG_RD(bp, func_ext_config[func].
8597 fcoe_mac_addr_lower);
8598 BNX2X_DEV_INFO("Read FCoE MAC to "
8599 "0x%x:0x%04x\n", val2, val);
8600 bnx2x_set_mac_buf(fip_mac, val, val2);
8601
8602 /* Disable FCoE if MAC configuration is
8603 * invalid.
8604 */
8605 if (!is_valid_ether_addr(fip_mac)) {
8606 bp->flags |= NO_FCOE_FLAG;
8607 memset(bp->fip_mac, 0, ETH_ALEN);
8608 }
8609 } else
8610 bp->flags |= NO_FCOE_FLAG;
8435 } 8611 }
8436#endif 8612#endif
8437 } else { 8613 } else {
@@ -8445,7 +8621,7 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8445 iscsi_mac_upper); 8621 iscsi_mac_upper);
8446 val = SHMEM_RD(bp, dev_info.port_hw_config[port]. 8622 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
8447 iscsi_mac_lower); 8623 iscsi_mac_lower);
8448 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2); 8624 bnx2x_set_mac_buf(iscsi_mac, val, val2);
8449#endif 8625#endif
8450 } 8626 }
8451 8627
@@ -8453,14 +8629,12 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8453 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); 8629 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8454 8630
8455#ifdef BCM_CNIC 8631#ifdef BCM_CNIC
8456 /* Inform the upper layers about FCoE MAC */ 8632 /* Set the FCoE MAC in modes other then MF_SI */
8457 if (!CHIP_IS_E1x(bp)) { 8633 if (!CHIP_IS_E1x(bp)) {
8458 if (IS_MF_SD(bp)) 8634 if (IS_MF_SD(bp))
8459 memcpy(bp->fip_mac, bp->dev->dev_addr, 8635 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
8460 sizeof(bp->fip_mac)); 8636 else if (!IS_MF(bp))
8461 else 8637 memcpy(fip_mac, iscsi_mac, ETH_ALEN);
8462 memcpy(bp->fip_mac, bp->iscsi_mac,
8463 sizeof(bp->fip_mac));
8464 } 8638 }
8465#endif 8639#endif
8466} 8640}
@@ -8623,6 +8797,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8623 /* Get MAC addresses */ 8797 /* Get MAC addresses */
8624 bnx2x_get_mac_hwinfo(bp); 8798 bnx2x_get_mac_hwinfo(bp);
8625 8799
8800#ifdef BCM_CNIC
8801 bnx2x_get_cnic_info(bp);
8802#endif
8803
8626 return rc; 8804 return rc;
8627} 8805}
8628 8806
@@ -8837,12 +9015,197 @@ static int bnx2x_close(struct net_device *dev)
8837 return 0; 9015 return 0;
8838} 9016}
8839 9017
9018#define E1_MAX_UC_LIST 29
9019#define E1H_MAX_UC_LIST 30
9020#define E2_MAX_UC_LIST 14
9021static inline u8 bnx2x_max_uc_list(struct bnx2x *bp)
9022{
9023 if (CHIP_IS_E1(bp))
9024 return E1_MAX_UC_LIST;
9025 else if (CHIP_IS_E1H(bp))
9026 return E1H_MAX_UC_LIST;
9027 else
9028 return E2_MAX_UC_LIST;
9029}
9030
9031
9032static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp)
9033{
9034 if (CHIP_IS_E1(bp))
9035 /* CAM Entries for Port0:
9036 * 0 - prim ETH MAC
9037 * 1 - BCAST MAC
9038 * 2 - iSCSI L2 ring ETH MAC
9039 * 3-31 - UC MACs
9040 *
9041 * Port1 entries are allocated the same way starting from
9042 * entry 32.
9043 */
9044 return 3 + 32 * BP_PORT(bp);
9045 else if (CHIP_IS_E1H(bp)) {
9046 /* CAM Entries:
9047 * 0-7 - prim ETH MAC for each function
9048 * 8-15 - iSCSI L2 ring ETH MAC for each function
9049 * 16 till 255 UC MAC lists for each function
9050 *
9051 * Remark: There is no FCoE support for E1H, thus FCoE related
9052 * MACs are not considered.
9053 */
9054 return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) +
9055 bnx2x_max_uc_list(bp) * BP_FUNC(bp);
9056 } else {
9057 /* CAM Entries (there is a separate CAM per engine):
9058 * 0-4 - prim ETH MAC for each function
9059 * 4-7 - iSCSI L2 ring ETH MAC for each function
9060 * 8-11 - FIP ucast L2 MAC for each function
9061 * 12-15 - ALL_ENODE_MACS mcast MAC for each function
9062 * 16 till 71 UC MAC lists for each function
9063 */
9064 u8 func_idx =
9065 (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp));
9066
9067 return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) +
9068 bnx2x_max_uc_list(bp) * func_idx;
9069 }
9070}
9071
9072/* set uc list, do not wait as wait implies sleep and
9073 * set_rx_mode can be invoked from non-sleepable context.
9074 *
9075 * Instead we use the same ramrod data buffer each time we need
9076 * to configure a list of addresses, and use the fact that the
9077 * list of MACs is changed in an incremental way and that the
9078 * function is called under the netif_addr_lock. A temporary
9079 * inconsistent CAM configuration (possible in case of very fast
9080 * sequence of add/del/add on the host side) will shortly be
9081 * restored by the handler of the last ramrod.
9082 */
9083static int bnx2x_set_uc_list(struct bnx2x *bp)
9084{
9085 int i = 0, old;
9086 struct net_device *dev = bp->dev;
9087 u8 offset = bnx2x_uc_list_cam_offset(bp);
9088 struct netdev_hw_addr *ha;
9089 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
9090 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
9091
9092 if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp))
9093 return -EINVAL;
9094
9095 netdev_for_each_uc_addr(ha, dev) {
9096 /* copy mac */
9097 config_cmd->config_table[i].msb_mac_addr =
9098 swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]);
9099 config_cmd->config_table[i].middle_mac_addr =
9100 swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]);
9101 config_cmd->config_table[i].lsb_mac_addr =
9102 swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]);
9103
9104 config_cmd->config_table[i].vlan_id = 0;
9105 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
9106 config_cmd->config_table[i].clients_bit_vector =
9107 cpu_to_le32(1 << BP_L_ID(bp));
9108
9109 SET_FLAG(config_cmd->config_table[i].flags,
9110 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
9111 T_ETH_MAC_COMMAND_SET);
9112
9113 DP(NETIF_MSG_IFUP,
9114 "setting UCAST[%d] (%04x:%04x:%04x)\n", i,
9115 config_cmd->config_table[i].msb_mac_addr,
9116 config_cmd->config_table[i].middle_mac_addr,
9117 config_cmd->config_table[i].lsb_mac_addr);
9118
9119 i++;
9120
9121 /* Set uc MAC in NIG */
9122 bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha),
9123 LLH_CAM_ETH_LINE + i);
9124 }
9125 old = config_cmd->hdr.length;
9126 if (old > i) {
9127 for (; i < old; i++) {
9128 if (CAM_IS_INVALID(config_cmd->
9129 config_table[i])) {
9130 /* already invalidated */
9131 break;
9132 }
9133 /* invalidate */
9134 SET_FLAG(config_cmd->config_table[i].flags,
9135 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
9136 T_ETH_MAC_COMMAND_INVALIDATE);
9137 }
9138 }
9139
9140 wmb();
9141
9142 config_cmd->hdr.length = i;
9143 config_cmd->hdr.offset = offset;
9144 config_cmd->hdr.client_id = 0xff;
9145 /* Mark that this ramrod doesn't use bp->set_mac_pending for
9146 * synchronization.
9147 */
9148 config_cmd->hdr.echo = 0;
9149
9150 mb();
9151
9152 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
9153 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
9154
9155}
9156
9157void bnx2x_invalidate_uc_list(struct bnx2x *bp)
9158{
9159 int i;
9160 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
9161 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
9162 int ramrod_flags = WAIT_RAMROD_COMMON;
9163 u8 offset = bnx2x_uc_list_cam_offset(bp);
9164 u8 max_list_size = bnx2x_max_uc_list(bp);
9165
9166 for (i = 0; i < max_list_size; i++) {
9167 SET_FLAG(config_cmd->config_table[i].flags,
9168 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
9169 T_ETH_MAC_COMMAND_INVALIDATE);
9170 bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i);
9171 }
9172
9173 wmb();
9174
9175 config_cmd->hdr.length = max_list_size;
9176 config_cmd->hdr.offset = offset;
9177 config_cmd->hdr.client_id = 0xff;
9178 /* We'll wait for a completion this time... */
9179 config_cmd->hdr.echo = 1;
9180
9181 bp->set_mac_pending = 1;
9182
9183 mb();
9184
9185 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
9186 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
9187
9188 /* Wait for a completion */
9189 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
9190 ramrod_flags);
9191
9192}
9193
9194static inline int bnx2x_set_mc_list(struct bnx2x *bp)
9195{
9196 /* some multicasts */
9197 if (CHIP_IS_E1(bp)) {
9198 return bnx2x_set_e1_mc_list(bp);
9199 } else { /* E1H and newer */
9200 return bnx2x_set_e1h_mc_list(bp);
9201 }
9202}
9203
8840/* called with netif_tx_lock from dev_mcast.c */ 9204/* called with netif_tx_lock from dev_mcast.c */
8841void bnx2x_set_rx_mode(struct net_device *dev) 9205void bnx2x_set_rx_mode(struct net_device *dev)
8842{ 9206{
8843 struct bnx2x *bp = netdev_priv(dev); 9207 struct bnx2x *bp = netdev_priv(dev);
8844 u32 rx_mode = BNX2X_RX_MODE_NORMAL; 9208 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8845 int port = BP_PORT(bp);
8846 9209
8847 if (bp->state != BNX2X_STATE_OPEN) { 9210 if (bp->state != BNX2X_STATE_OPEN) {
8848 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); 9211 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
@@ -8853,47 +9216,16 @@ void bnx2x_set_rx_mode(struct net_device *dev)
8853 9216
8854 if (dev->flags & IFF_PROMISC) 9217 if (dev->flags & IFF_PROMISC)
8855 rx_mode = BNX2X_RX_MODE_PROMISC; 9218 rx_mode = BNX2X_RX_MODE_PROMISC;
8856 else if ((dev->flags & IFF_ALLMULTI) || 9219 else if (dev->flags & IFF_ALLMULTI)
8857 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8858 CHIP_IS_E1(bp)))
8859 rx_mode = BNX2X_RX_MODE_ALLMULTI; 9220 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8860 else { /* some multicasts */ 9221 else {
8861 if (CHIP_IS_E1(bp)) { 9222 /* some multicasts */
8862 /* 9223 if (bnx2x_set_mc_list(bp))
8863 * set mc list, do not wait as wait implies sleep 9224 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8864 * and set_rx_mode can be invoked from non-sleepable
8865 * context
8866 */
8867 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8868 BNX2X_MAX_EMUL_MULTI*(1 + port) :
8869 BNX2X_MAX_MULTICAST*(1 + port));
8870
8871 bnx2x_set_e1_mc_list(bp, offset);
8872 } else { /* E1H */
8873 /* Accept one or more multicasts */
8874 struct netdev_hw_addr *ha;
8875 u32 mc_filter[MC_HASH_SIZE];
8876 u32 crc, bit, regidx;
8877 int i;
8878
8879 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8880
8881 netdev_for_each_mc_addr(ha, dev) {
8882 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
8883 bnx2x_mc_addr(ha));
8884
8885 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8886 ETH_ALEN);
8887 bit = (crc >> 24) & 0xff;
8888 regidx = bit >> 5;
8889 bit &= 0x1f;
8890 mc_filter[regidx] |= (1 << bit);
8891 }
8892 9225
8893 for (i = 0; i < MC_HASH_SIZE; i++) 9226 /* some unicasts */
8894 REG_WR(bp, MC_HASH_OFFSET(bp, i), 9227 if (bnx2x_set_uc_list(bp))
8895 mc_filter[i]); 9228 rx_mode = BNX2X_RX_MODE_PROMISC;
8896 }
8897 } 9229 }
8898 9230
8899 bp->rx_mode = rx_mode; 9231 bp->rx_mode = rx_mode;
@@ -8974,7 +9306,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
8974 .ndo_stop = bnx2x_close, 9306 .ndo_stop = bnx2x_close,
8975 .ndo_start_xmit = bnx2x_start_xmit, 9307 .ndo_start_xmit = bnx2x_start_xmit,
8976 .ndo_select_queue = bnx2x_select_queue, 9308 .ndo_select_queue = bnx2x_select_queue,
8977 .ndo_set_multicast_list = bnx2x_set_rx_mode, 9309 .ndo_set_rx_mode = bnx2x_set_rx_mode,
8978 .ndo_set_mac_address = bnx2x_change_mac_addr, 9310 .ndo_set_mac_address = bnx2x_change_mac_addr,
8979 .ndo_validate_addr = eth_validate_addr, 9311 .ndo_validate_addr = eth_validate_addr,
8980 .ndo_do_ioctl = bnx2x_ioctl, 9312 .ndo_do_ioctl = bnx2x_ioctl,
@@ -9120,7 +9452,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9120 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN); 9452 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9121 dev->vlan_features |= NETIF_F_TSO6; 9453 dev->vlan_features |= NETIF_F_TSO6;
9122 9454
9123#ifdef BCM_DCB 9455#ifdef BCM_DCBNL
9124 dev->dcbnl_ops = &bnx2x_dcbnl_ops; 9456 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
9125#endif 9457#endif
9126 9458
@@ -9527,6 +9859,11 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9527 } 9859 }
9528#endif 9860#endif
9529 9861
9862#ifdef BCM_DCBNL
9863 /* Delete app tlvs from dcbnl */
9864 bnx2x_dcbnl_update_applist(bp, true);
9865#endif
9866
9530 unregister_netdev(dev); 9867 unregister_netdev(dev);
9531 9868
9532 /* Delete all NAPI objects */ 9869 /* Delete all NAPI objects */
@@ -9800,15 +10137,21 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9800 HW_CID(bp, BNX2X_ISCSI_ETH_CID)); 10137 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9801 } 10138 }
9802 10139
9803 /* There may be not more than 8 L2 and COMMON SPEs and not more 10140 /* There may be not more than 8 L2 and not more than 8 L5 SPEs
9804 * than 8 L5 SPEs in the air. 10141 * We also check that the number of outstanding
10142 * COMMON ramrods is not more than the EQ and SPQ can
10143 * accommodate.
9805 */ 10144 */
9806 if ((type == NONE_CONNECTION_TYPE) || 10145 if (type == ETH_CONNECTION_TYPE) {
9807 (type == ETH_CONNECTION_TYPE)) { 10146 if (!atomic_read(&bp->cq_spq_left))
9808 if (!atomic_read(&bp->spq_left))
9809 break; 10147 break;
9810 else 10148 else
9811 atomic_dec(&bp->spq_left); 10149 atomic_dec(&bp->cq_spq_left);
10150 } else if (type == NONE_CONNECTION_TYPE) {
10151 if (!atomic_read(&bp->eq_spq_left))
10152 break;
10153 else
10154 atomic_dec(&bp->eq_spq_left);
9812 } else if ((type == ISCSI_CONNECTION_TYPE) || 10155 } else if ((type == ISCSI_CONNECTION_TYPE) ||
9813 (type == FCOE_CONNECTION_TYPE)) { 10156 (type == FCOE_CONNECTION_TYPE)) {
9814 if (bp->cnic_spq_pending >= 10157 if (bp->cnic_spq_pending >=
@@ -9886,7 +10229,8 @@ static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9886 int rc = 0; 10229 int rc = 0;
9887 10230
9888 mutex_lock(&bp->cnic_mutex); 10231 mutex_lock(&bp->cnic_mutex);
9889 c_ops = bp->cnic_ops; 10232 c_ops = rcu_dereference_protected(bp->cnic_ops,
10233 lockdep_is_held(&bp->cnic_mutex));
9890 if (c_ops) 10234 if (c_ops)
9891 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); 10235 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9892 mutex_unlock(&bp->cnic_mutex); 10236 mutex_unlock(&bp->cnic_mutex);
@@ -10000,7 +10344,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
10000 int count = ctl->data.credit.credit_count; 10344 int count = ctl->data.credit.credit_count;
10001 10345
10002 smp_mb__before_atomic_inc(); 10346 smp_mb__before_atomic_inc();
10003 atomic_add(count, &bp->spq_left); 10347 atomic_add(count, &bp->cq_spq_left);
10004 smp_mb__after_atomic_inc(); 10348 smp_mb__after_atomic_inc();
10005 break; 10349 break;
10006 } 10350 }
@@ -10096,6 +10440,13 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
10096 struct bnx2x *bp = netdev_priv(dev); 10440 struct bnx2x *bp = netdev_priv(dev);
10097 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 10441 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10098 10442
10443 /* If both iSCSI and FCoE are disabled - return NULL in
10444 * order to indicate CNIC that it should not try to work
10445 * with this device.
10446 */
10447 if (NO_ISCSI(bp) && NO_FCOE(bp))
10448 return NULL;
10449
10099 cp->drv_owner = THIS_MODULE; 10450 cp->drv_owner = THIS_MODULE;
10100 cp->chip_id = CHIP_ID(bp); 10451 cp->chip_id = CHIP_ID(bp);
10101 cp->pdev = bp->pdev; 10452 cp->pdev = bp->pdev;
@@ -10116,6 +10467,15 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
10116 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE; 10467 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
10117 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID; 10468 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
10118 10469
10470 if (NO_ISCSI_OOO(bp))
10471 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
10472
10473 if (NO_ISCSI(bp))
10474 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
10475
10476 if (NO_FCOE(bp))
10477 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
10478
10119 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, " 10479 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
10120 "starting cid %d\n", 10480 "starting cid %d\n",
10121 cp->ctx_blk_size, 10481 cp->ctx_blk_size,
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index e01330bb36c7..1c89f19a4425 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -6083,6 +6083,7 @@ Theotherbitsarereservedandshouldbezero*/
6083#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808 6083#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808
6084#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e 6084#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e
6085#define MDIO_PMA_REG_8727_PCS_GP 0xc842 6085#define MDIO_PMA_REG_8727_PCS_GP 0xc842
6086#define MDIO_PMA_REG_8727_OPT_CFG_REG 0xc8e4
6086 6087
6087#define MDIO_AN_REG_8727_MISC_CTRL 0x8309 6088#define MDIO_AN_REG_8727_MISC_CTRL 0x8309
6088 6089
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
index 0e2737eac8b7..3c5c014e82b2 100644
--- a/drivers/net/bonding/Makefile
+++ b/drivers/net/bonding/Makefile
@@ -6,6 +6,9 @@ obj-$(CONFIG_BONDING) += bonding.o
6 6
7bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o 7bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o
8 8
9proc-$(CONFIG_PROC_FS) += bond_procfs.o
10bonding-objs += $(proc-y)
11
9ipv6-$(subst m,y,$(CONFIG_IPV6)) += bond_ipv6.o 12ipv6-$(subst m,y,$(CONFIG_IPV6)) += bond_ipv6.o
10bonding-objs += $(ipv6-y) 13bonding-objs += $(ipv6-y)
11 14
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index a5d5d0b5b155..494bf960442d 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -246,7 +246,7 @@ static inline void __enable_port(struct port *port)
246 */ 246 */
247static inline int __port_is_enabled(struct port *port) 247static inline int __port_is_enabled(struct port *port)
248{ 248{
249 return port->slave->state == BOND_STATE_ACTIVE; 249 return bond_is_active_slave(port->slave);
250} 250}
251 251
252/** 252/**
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 5c6fba802f2b..9bc5de3e04a8 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -604,7 +604,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
604 604
605 _lock_rx_hashtbl(bond); 605 _lock_rx_hashtbl(bond);
606 606
607 hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_src)); 607 hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_dst));
608 client_info = &(bond_info->rx_hashtbl[hash_index]); 608 client_info = &(bond_info->rx_hashtbl[hash_index]);
609 609
610 if (client_info->assigned) { 610 if (client_info->assigned) {
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 163e0b06eaa5..1a6e9eb7af43 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -59,15 +59,12 @@
59#include <linux/uaccess.h> 59#include <linux/uaccess.h>
60#include <linux/errno.h> 60#include <linux/errno.h>
61#include <linux/netdevice.h> 61#include <linux/netdevice.h>
62#include <linux/netpoll.h>
63#include <linux/inetdevice.h> 62#include <linux/inetdevice.h>
64#include <linux/igmp.h> 63#include <linux/igmp.h>
65#include <linux/etherdevice.h> 64#include <linux/etherdevice.h>
66#include <linux/skbuff.h> 65#include <linux/skbuff.h>
67#include <net/sock.h> 66#include <net/sock.h>
68#include <linux/rtnetlink.h> 67#include <linux/rtnetlink.h>
69#include <linux/proc_fs.h>
70#include <linux/seq_file.h>
71#include <linux/smp.h> 68#include <linux/smp.h>
72#include <linux/if_ether.h> 69#include <linux/if_ether.h>
73#include <net/arp.h> 70#include <net/arp.h>
@@ -174,9 +171,6 @@ MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on link
174atomic_t netpoll_block_tx = ATOMIC_INIT(0); 171atomic_t netpoll_block_tx = ATOMIC_INIT(0);
175#endif 172#endif
176 173
177static const char * const version =
178 DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n";
179
180int bond_net_id __read_mostly; 174int bond_net_id __read_mostly;
181 175
182static __be32 arp_target[BOND_MAX_ARP_TARGETS]; 176static __be32 arp_target[BOND_MAX_ARP_TARGETS];
@@ -246,7 +240,7 @@ static void bond_uninit(struct net_device *bond_dev);
246 240
247/*---------------------------- General routines -----------------------------*/ 241/*---------------------------- General routines -----------------------------*/
248 242
249static const char *bond_mode_name(int mode) 243const char *bond_mode_name(int mode)
250{ 244{
251 static const char *names[] = { 245 static const char *names[] = {
252 [BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)", 246 [BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)",
@@ -424,15 +418,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
424{ 418{
425 skb->dev = slave_dev; 419 skb->dev = slave_dev;
426 skb->priority = 1; 420 skb->priority = 1;
427#ifdef CONFIG_NET_POLL_CONTROLLER 421 if (unlikely(netpoll_tx_running(slave_dev)))
428 if (unlikely(bond->dev->priv_flags & IFF_IN_NETPOLL)) { 422 bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
429 struct netpoll *np = bond->dev->npinfo->netpoll; 423 else
430 slave_dev->npinfo = bond->dev->npinfo;
431 slave_dev->priv_flags |= IFF_IN_NETPOLL;
432 netpoll_send_skb_on_dev(np, skb, slave_dev);
433 slave_dev->priv_flags &= ~IFF_IN_NETPOLL;
434 } else
435#endif
436 dev_queue_xmit(skb); 424 dev_queue_xmit(skb);
437 425
438 return 0; 426 return 0;
@@ -1288,63 +1276,103 @@ static void bond_detach_slave(struct bonding *bond, struct slave *slave)
1288} 1276}
1289 1277
1290#ifdef CONFIG_NET_POLL_CONTROLLER 1278#ifdef CONFIG_NET_POLL_CONTROLLER
1291/* 1279static inline int slave_enable_netpoll(struct slave *slave)
1292 * You must hold read lock on bond->lock before calling this.
1293 */
1294static bool slaves_support_netpoll(struct net_device *bond_dev)
1295{ 1280{
1296 struct bonding *bond = netdev_priv(bond_dev); 1281 struct netpoll *np;
1297 struct slave *slave; 1282 int err = 0;
1298 int i = 0;
1299 bool ret = true;
1300 1283
1301 bond_for_each_slave(bond, slave, i) { 1284 np = kzalloc(sizeof(*np), GFP_KERNEL);
1302 if ((slave->dev->priv_flags & IFF_DISABLE_NETPOLL) || 1285 err = -ENOMEM;
1303 !slave->dev->netdev_ops->ndo_poll_controller) 1286 if (!np)
1304 ret = false; 1287 goto out;
1288
1289 np->dev = slave->dev;
1290 err = __netpoll_setup(np);
1291 if (err) {
1292 kfree(np);
1293 goto out;
1305 } 1294 }
1306 return i != 0 && ret; 1295 slave->np = np;
1296out:
1297 return err;
1298}
1299static inline void slave_disable_netpoll(struct slave *slave)
1300{
1301 struct netpoll *np = slave->np;
1302
1303 if (!np)
1304 return;
1305
1306 slave->np = NULL;
1307 synchronize_rcu_bh();
1308 __netpoll_cleanup(np);
1309 kfree(np);
1310}
1311static inline bool slave_dev_support_netpoll(struct net_device *slave_dev)
1312{
1313 if (slave_dev->priv_flags & IFF_DISABLE_NETPOLL)
1314 return false;
1315 if (!slave_dev->netdev_ops->ndo_poll_controller)
1316 return false;
1317 return true;
1307} 1318}
1308 1319
1309static void bond_poll_controller(struct net_device *bond_dev) 1320static void bond_poll_controller(struct net_device *bond_dev)
1310{ 1321{
1311 struct bonding *bond = netdev_priv(bond_dev); 1322}
1323
1324static void __bond_netpoll_cleanup(struct bonding *bond)
1325{
1312 struct slave *slave; 1326 struct slave *slave;
1313 int i; 1327 int i;
1314 1328
1315 bond_for_each_slave(bond, slave, i) { 1329 bond_for_each_slave(bond, slave, i)
1316 if (slave->dev && IS_UP(slave->dev)) 1330 if (IS_UP(slave->dev))
1317 netpoll_poll_dev(slave->dev); 1331 slave_disable_netpoll(slave);
1318 }
1319} 1332}
1320
1321static void bond_netpoll_cleanup(struct net_device *bond_dev) 1333static void bond_netpoll_cleanup(struct net_device *bond_dev)
1322{ 1334{
1323 struct bonding *bond = netdev_priv(bond_dev); 1335 struct bonding *bond = netdev_priv(bond_dev);
1336
1337 read_lock(&bond->lock);
1338 __bond_netpoll_cleanup(bond);
1339 read_unlock(&bond->lock);
1340}
1341
1342static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
1343{
1344 struct bonding *bond = netdev_priv(dev);
1324 struct slave *slave; 1345 struct slave *slave;
1325 const struct net_device_ops *ops; 1346 int i, err = 0;
1326 int i;
1327 1347
1328 read_lock(&bond->lock); 1348 read_lock(&bond->lock);
1329 bond_dev->npinfo = NULL;
1330 bond_for_each_slave(bond, slave, i) { 1349 bond_for_each_slave(bond, slave, i) {
1331 if (slave->dev) { 1350 err = slave_enable_netpoll(slave);
1332 ops = slave->dev->netdev_ops; 1351 if (err) {
1333 if (ops->ndo_netpoll_cleanup) 1352 __bond_netpoll_cleanup(bond);
1334 ops->ndo_netpoll_cleanup(slave->dev); 1353 break;
1335 else
1336 slave->dev->npinfo = NULL;
1337 } 1354 }
1338 } 1355 }
1339 read_unlock(&bond->lock); 1356 read_unlock(&bond->lock);
1357 return err;
1340} 1358}
1341 1359
1342#else 1360static struct netpoll_info *bond_netpoll_info(struct bonding *bond)
1361{
1362 return bond->dev->npinfo;
1363}
1343 1364
1365#else
1366static inline int slave_enable_netpoll(struct slave *slave)
1367{
1368 return 0;
1369}
1370static inline void slave_disable_netpoll(struct slave *slave)
1371{
1372}
1344static void bond_netpoll_cleanup(struct net_device *bond_dev) 1373static void bond_netpoll_cleanup(struct net_device *bond_dev)
1345{ 1374{
1346} 1375}
1347
1348#endif 1376#endif
1349 1377
1350/*---------------------------------- IOCTL ----------------------------------*/ 1378/*---------------------------------- IOCTL ----------------------------------*/
@@ -1372,8 +1400,8 @@ static int bond_compute_features(struct bonding *bond)
1372{ 1400{
1373 struct slave *slave; 1401 struct slave *slave;
1374 struct net_device *bond_dev = bond->dev; 1402 struct net_device *bond_dev = bond->dev;
1375 unsigned long features = bond_dev->features; 1403 u32 features = bond_dev->features;
1376 unsigned long vlan_features = 0; 1404 u32 vlan_features = 0;
1377 unsigned short max_hard_header_len = max((u16)ETH_HLEN, 1405 unsigned short max_hard_header_len = max((u16)ETH_HLEN,
1378 bond_dev->hard_header_len); 1406 bond_dev->hard_header_len);
1379 int i; 1407 int i;
@@ -1400,8 +1428,8 @@ static int bond_compute_features(struct bonding *bond)
1400 1428
1401done: 1429done:
1402 features |= (bond_dev->features & BOND_VLAN_FEATURES); 1430 features |= (bond_dev->features & BOND_VLAN_FEATURES);
1403 bond_dev->features = netdev_fix_features(features, NULL); 1431 bond_dev->features = netdev_fix_features(bond_dev, features);
1404 bond_dev->vlan_features = netdev_fix_features(vlan_features, NULL); 1432 bond_dev->vlan_features = netdev_fix_features(bond_dev, vlan_features);
1405 bond_dev->hard_header_len = max_hard_header_len; 1433 bond_dev->hard_header_len = max_hard_header_len;
1406 1434
1407 return 0; 1435 return 0;
@@ -1423,6 +1451,77 @@ static void bond_setup_by_slave(struct net_device *bond_dev,
1423 bond->setup_by_slave = 1; 1451 bond->setup_by_slave = 1;
1424} 1452}
1425 1453
1454/* On bonding slaves other than the currently active slave, suppress
1455 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
1456 * ARP on active-backup slaves with arp_validate enabled.
1457 */
1458static bool bond_should_deliver_exact_match(struct sk_buff *skb,
1459 struct slave *slave,
1460 struct bonding *bond)
1461{
1462 if (bond_is_slave_inactive(slave)) {
1463 if (slave_do_arp_validate(bond, slave) &&
1464 skb->protocol == __cpu_to_be16(ETH_P_ARP))
1465 return false;
1466
1467 if (bond->params.mode == BOND_MODE_ALB &&
1468 skb->pkt_type != PACKET_BROADCAST &&
1469 skb->pkt_type != PACKET_MULTICAST)
1470 return false;
1471
1472 if (bond->params.mode == BOND_MODE_8023AD &&
1473 skb->protocol == __cpu_to_be16(ETH_P_SLOW))
1474 return false;
1475
1476 return true;
1477 }
1478 return false;
1479}
1480
1481static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1482{
1483 struct sk_buff *skb = *pskb;
1484 struct slave *slave;
1485 struct net_device *bond_dev;
1486 struct bonding *bond;
1487
1488 slave = bond_slave_get_rcu(skb->dev);
1489 bond_dev = ACCESS_ONCE(slave->dev->master);
1490 if (unlikely(!bond_dev))
1491 return RX_HANDLER_PASS;
1492
1493 skb = skb_share_check(skb, GFP_ATOMIC);
1494 if (unlikely(!skb))
1495 return RX_HANDLER_CONSUMED;
1496
1497 *pskb = skb;
1498
1499 bond = netdev_priv(bond_dev);
1500
1501 if (bond->params.arp_interval)
1502 slave->dev->last_rx = jiffies;
1503
1504 if (bond_should_deliver_exact_match(skb, slave, bond)) {
1505 return RX_HANDLER_EXACT;
1506 }
1507
1508 skb->dev = bond_dev;
1509
1510 if (bond->params.mode == BOND_MODE_ALB &&
1511 bond_dev->priv_flags & IFF_BRIDGE_PORT &&
1512 skb->pkt_type == PACKET_HOST) {
1513
1514 if (unlikely(skb_cow_head(skb,
1515 skb->data - skb_mac_header(skb)))) {
1516 kfree_skb(skb);
1517 return RX_HANDLER_CONSUMED;
1518 }
1519 memcpy(eth_hdr(skb)->h_dest, bond_dev->dev_addr, ETH_ALEN);
1520 }
1521
1522 return RX_HANDLER_ANOTHER;
1523}
1524
1426/* enslave device <slave> to bond device <master> */ 1525/* enslave device <slave> to bond device <master> */
1427int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) 1526int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1428{ 1527{
@@ -1594,16 +1693,23 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1594 } 1693 }
1595 } 1694 }
1596 1695
1597 res = netdev_set_master(slave_dev, bond_dev); 1696 res = netdev_set_bond_master(slave_dev, bond_dev);
1598 if (res) { 1697 if (res) {
1599 pr_debug("Error %d calling netdev_set_master\n", res); 1698 pr_debug("Error %d calling netdev_set_bond_master\n", res);
1600 goto err_restore_mac; 1699 goto err_restore_mac;
1601 } 1700 }
1701 res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
1702 new_slave);
1703 if (res) {
1704 pr_debug("Error %d calling netdev_rx_handler_register\n", res);
1705 goto err_unset_master;
1706 }
1707
1602 /* open the slave since the application closed it */ 1708 /* open the slave since the application closed it */
1603 res = dev_open(slave_dev); 1709 res = dev_open(slave_dev);
1604 if (res) { 1710 if (res) {
1605 pr_debug("Opening slave %s failed\n", slave_dev->name); 1711 pr_debug("Opening slave %s failed\n", slave_dev->name);
1606 goto err_unset_master; 1712 goto err_unreg_rxhandler;
1607 } 1713 }
1608 1714
1609 new_slave->dev = slave_dev; 1715 new_slave->dev = slave_dev;
@@ -1757,7 +1863,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1757 break; 1863 break;
1758 case BOND_MODE_TLB: 1864 case BOND_MODE_TLB:
1759 case BOND_MODE_ALB: 1865 case BOND_MODE_ALB:
1760 new_slave->state = BOND_STATE_ACTIVE; 1866 bond_set_active_slave(new_slave);
1761 bond_set_slave_inactive_flags(new_slave); 1867 bond_set_slave_inactive_flags(new_slave);
1762 bond_select_active_slave(bond); 1868 bond_select_active_slave(bond);
1763 break; 1869 break;
@@ -1765,7 +1871,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1765 pr_debug("This slave is always active in trunk mode\n"); 1871 pr_debug("This slave is always active in trunk mode\n");
1766 1872
1767 /* always active in trunk mode */ 1873 /* always active in trunk mode */
1768 new_slave->state = BOND_STATE_ACTIVE; 1874 bond_set_active_slave(new_slave);
1769 1875
1770 /* In trunking mode there is little meaning to curr_active_slave 1876 /* In trunking mode there is little meaning to curr_active_slave
1771 * anyway (it holds no special properties of the bond device), 1877 * anyway (it holds no special properties of the bond device),
@@ -1782,17 +1888,19 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1782 bond_set_carrier(bond); 1888 bond_set_carrier(bond);
1783 1889
1784#ifdef CONFIG_NET_POLL_CONTROLLER 1890#ifdef CONFIG_NET_POLL_CONTROLLER
1785 if (slaves_support_netpoll(bond_dev)) { 1891 slave_dev->npinfo = bond_netpoll_info(bond);
1786 bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL; 1892 if (slave_dev->npinfo) {
1787 if (bond_dev->npinfo) 1893 if (slave_enable_netpoll(new_slave)) {
1788 slave_dev->npinfo = bond_dev->npinfo; 1894 read_unlock(&bond->lock);
1789 } else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) { 1895 pr_info("Error, %s: master_dev is using netpoll, "
1790 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; 1896 "but new slave device does not support netpoll.\n",
1791 pr_info("New slave device %s does not support netpoll\n", 1897 bond_dev->name);
1792 slave_dev->name); 1898 res = -EBUSY;
1793 pr_info("Disabling netpoll support for %s\n", bond_dev->name); 1899 goto err_close;
1900 }
1794 } 1901 }
1795#endif 1902#endif
1903
1796 read_unlock(&bond->lock); 1904 read_unlock(&bond->lock);
1797 1905
1798 res = bond_create_slave_symlinks(bond_dev, slave_dev); 1906 res = bond_create_slave_symlinks(bond_dev, slave_dev);
@@ -1801,7 +1909,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1801 1909
1802 pr_info("%s: enslaving %s as a%s interface with a%s link.\n", 1910 pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
1803 bond_dev->name, slave_dev->name, 1911 bond_dev->name, slave_dev->name,
1804 new_slave->state == BOND_STATE_ACTIVE ? "n active" : " backup", 1912 bond_is_active_slave(new_slave) ? "n active" : " backup",
1805 new_slave->link != BOND_LINK_DOWN ? "n up" : " down"); 1913 new_slave->link != BOND_LINK_DOWN ? "n up" : " down");
1806 1914
1807 /* enslave is successful */ 1915 /* enslave is successful */
@@ -1811,8 +1919,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1811err_close: 1919err_close:
1812 dev_close(slave_dev); 1920 dev_close(slave_dev);
1813 1921
1922err_unreg_rxhandler:
1923 netdev_rx_handler_unregister(slave_dev);
1924 synchronize_net();
1925
1814err_unset_master: 1926err_unset_master:
1815 netdev_set_master(slave_dev, NULL); 1927 netdev_set_bond_master(slave_dev, NULL);
1816 1928
1817err_restore_mac: 1929err_restore_mac:
1818 if (!bond->params.fail_over_mac) { 1930 if (!bond->params.fail_over_mac) {
@@ -1895,7 +2007,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1895 2007
1896 pr_info("%s: releasing %s interface %s\n", 2008 pr_info("%s: releasing %s interface %s\n",
1897 bond_dev->name, 2009 bond_dev->name,
1898 (slave->state == BOND_STATE_ACTIVE) ? "active" : "backup", 2010 bond_is_active_slave(slave) ? "active" : "backup",
1899 slave_dev->name); 2011 slave_dev->name);
1900 2012
1901 oldcurrent = bond->curr_active_slave; 2013 oldcurrent = bond->curr_active_slave;
@@ -1992,19 +2104,11 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1992 netif_addr_unlock_bh(bond_dev); 2104 netif_addr_unlock_bh(bond_dev);
1993 } 2105 }
1994 2106
1995 netdev_set_master(slave_dev, NULL); 2107 netdev_rx_handler_unregister(slave_dev);
2108 synchronize_net();
2109 netdev_set_bond_master(slave_dev, NULL);
1996 2110
1997#ifdef CONFIG_NET_POLL_CONTROLLER 2111 slave_disable_netpoll(slave);
1998 read_lock_bh(&bond->lock);
1999
2000 if (slaves_support_netpoll(bond_dev))
2001 bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
2002 read_unlock_bh(&bond->lock);
2003 if (slave_dev->netdev_ops->ndo_netpoll_cleanup)
2004 slave_dev->netdev_ops->ndo_netpoll_cleanup(slave_dev);
2005 else
2006 slave_dev->npinfo = NULL;
2007#endif
2008 2112
2009 /* close slave before restoring its mac address */ 2113 /* close slave before restoring its mac address */
2010 dev_close(slave_dev); 2114 dev_close(slave_dev);
@@ -2018,9 +2122,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
2018 2122
2019 dev_set_mtu(slave_dev, slave->original_mtu); 2123 dev_set_mtu(slave_dev, slave->original_mtu);
2020 2124
2021 slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB | 2125 slave_dev->priv_flags &= ~IFF_BONDING;
2022 IFF_SLAVE_INACTIVE | IFF_BONDING |
2023 IFF_SLAVE_NEEDARP);
2024 2126
2025 kfree(slave); 2127 kfree(slave);
2026 2128
@@ -2039,6 +2141,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
2039 2141
2040 ret = bond_release(bond_dev, slave_dev); 2142 ret = bond_release(bond_dev, slave_dev);
2041 if ((ret == 0) && (bond->slave_cnt == 0)) { 2143 if ((ret == 0) && (bond->slave_cnt == 0)) {
2144 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
2042 pr_info("%s: destroying bond %s.\n", 2145 pr_info("%s: destroying bond %s.\n",
2043 bond_dev->name, bond_dev->name); 2146 bond_dev->name, bond_dev->name);
2044 unregister_netdevice(bond_dev); 2147 unregister_netdevice(bond_dev);
@@ -2114,7 +2217,11 @@ static int bond_release_all(struct net_device *bond_dev)
2114 netif_addr_unlock_bh(bond_dev); 2217 netif_addr_unlock_bh(bond_dev);
2115 } 2218 }
2116 2219
2117 netdev_set_master(slave_dev, NULL); 2220 netdev_rx_handler_unregister(slave_dev);
2221 synchronize_net();
2222 netdev_set_bond_master(slave_dev, NULL);
2223
2224 slave_disable_netpoll(slave);
2118 2225
2119 /* close slave before restoring its mac address */ 2226 /* close slave before restoring its mac address */
2120 dev_close(slave_dev); 2227 dev_close(slave_dev);
@@ -2126,9 +2233,6 @@ static int bond_release_all(struct net_device *bond_dev)
2126 dev_set_mac_address(slave_dev, &addr); 2233 dev_set_mac_address(slave_dev, &addr);
2127 } 2234 }
2128 2235
2129 slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB |
2130 IFF_SLAVE_INACTIVE);
2131
2132 kfree(slave); 2236 kfree(slave);
2133 2237
2134 /* re-acquire the lock before getting the next slave */ 2238 /* re-acquire the lock before getting the next slave */
@@ -2242,7 +2346,7 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
2242 res = 0; 2346 res = 0;
2243 strcpy(info->slave_name, slave->dev->name); 2347 strcpy(info->slave_name, slave->dev->name);
2244 info->link = slave->link; 2348 info->link = slave->link;
2245 info->state = slave->state; 2349 info->state = bond_slave_state(slave);
2246 info->link_failure_count = slave->link_failure_count; 2350 info->link_failure_count = slave->link_failure_count;
2247 break; 2351 break;
2248 } 2352 }
@@ -2281,7 +2385,7 @@ static int bond_miimon_inspect(struct bonding *bond)
2281 bond->dev->name, 2385 bond->dev->name,
2282 (bond->params.mode == 2386 (bond->params.mode ==
2283 BOND_MODE_ACTIVEBACKUP) ? 2387 BOND_MODE_ACTIVEBACKUP) ?
2284 ((slave->state == BOND_STATE_ACTIVE) ? 2388 (bond_is_active_slave(slave) ?
2285 "active " : "backup ") : "", 2389 "active " : "backup ") : "",
2286 slave->dev->name, 2390 slave->dev->name,
2287 bond->params.downdelay * bond->params.miimon); 2391 bond->params.downdelay * bond->params.miimon);
@@ -2372,13 +2476,13 @@ static void bond_miimon_commit(struct bonding *bond)
2372 2476
2373 if (bond->params.mode == BOND_MODE_8023AD) { 2477 if (bond->params.mode == BOND_MODE_8023AD) {
2374 /* prevent it from being the active one */ 2478 /* prevent it from being the active one */
2375 slave->state = BOND_STATE_BACKUP; 2479 bond_set_backup_slave(slave);
2376 } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) { 2480 } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
2377 /* make it immediately active */ 2481 /* make it immediately active */
2378 slave->state = BOND_STATE_ACTIVE; 2482 bond_set_active_slave(slave);
2379 } else if (slave != bond->primary_slave) { 2483 } else if (slave != bond->primary_slave) {
2380 /* prevent it from being the active one */ 2484 /* prevent it from being the active one */
2381 slave->state = BOND_STATE_BACKUP; 2485 bond_set_backup_slave(slave);
2382 } 2486 }
2383 2487
2384 bond_update_speed_duplex(slave); 2488 bond_update_speed_duplex(slave);
@@ -2571,11 +2675,10 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_
2571 2675
2572static void bond_arp_send_all(struct bonding *bond, struct slave *slave) 2676static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2573{ 2677{
2574 int i, vlan_id, rv; 2678 int i, vlan_id;
2575 __be32 *targets = bond->params.arp_targets; 2679 __be32 *targets = bond->params.arp_targets;
2576 struct vlan_entry *vlan; 2680 struct vlan_entry *vlan;
2577 struct net_device *vlan_dev; 2681 struct net_device *vlan_dev;
2578 struct flowi fl;
2579 struct rtable *rt; 2682 struct rtable *rt;
2580 2683
2581 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { 2684 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
@@ -2594,15 +2697,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2594 * determine which VLAN interface would be used, so we 2697 * determine which VLAN interface would be used, so we
2595 * can tag the ARP with the proper VLAN tag. 2698 * can tag the ARP with the proper VLAN tag.
2596 */ 2699 */
2597 memset(&fl, 0, sizeof(fl)); 2700 rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
2598 fl.fl4_dst = targets[i]; 2701 RTO_ONLINK, 0);
2599 fl.fl4_tos = RTO_ONLINK; 2702 if (IS_ERR(rt)) {
2600
2601 rv = ip_route_output_key(dev_net(bond->dev), &rt, &fl);
2602 if (rv) {
2603 if (net_ratelimit()) { 2703 if (net_ratelimit()) {
2604 pr_warning("%s: no route to arp_ip_target %pI4\n", 2704 pr_warning("%s: no route to arp_ip_target %pI4\n",
2605 bond->dev->name, &fl.fl4_dst); 2705 bond->dev->name, &targets[i]);
2606 } 2706 }
2607 continue; 2707 continue;
2608 } 2708 }
@@ -2638,7 +2738,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2638 2738
2639 if (net_ratelimit()) { 2739 if (net_ratelimit()) {
2640 pr_warning("%s: no path to arp_ip_target %pI4 via rt.dev %s\n", 2740 pr_warning("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
2641 bond->dev->name, &fl.fl4_dst, 2741 bond->dev->name, &targets[i],
2642 rt->dst.dev ? rt->dst.dev->name : "NULL"); 2742 rt->dst.dev ? rt->dst.dev->name : "NULL");
2643 } 2743 }
2644 ip_rt_put(rt); 2744 ip_rt_put(rt);
@@ -2756,7 +2856,7 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
2756 memcpy(&tip, arp_ptr, 4); 2856 memcpy(&tip, arp_ptr, 4);
2757 2857
2758 pr_debug("bond_arp_rcv: %s %s/%d av %d sv %d sip %pI4 tip %pI4\n", 2858 pr_debug("bond_arp_rcv: %s %s/%d av %d sv %d sip %pI4 tip %pI4\n",
2759 bond->dev->name, slave->dev->name, slave->state, 2859 bond->dev->name, slave->dev->name, bond_slave_state(slave),
2760 bond->params.arp_validate, slave_do_arp_validate(bond, slave), 2860 bond->params.arp_validate, slave_do_arp_validate(bond, slave),
2761 &sip, &tip); 2861 &sip, &tip);
2762 2862
@@ -2768,7 +2868,7 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
2768 * the active, through one switch, the router, then the other 2868 * the active, through one switch, the router, then the other
2769 * switch before reaching the backup. 2869 * switch before reaching the backup.
2770 */ 2870 */
2771 if (slave->state == BOND_STATE_ACTIVE) 2871 if (bond_is_active_slave(slave))
2772 bond_validate_arp(bond, slave, sip, tip); 2872 bond_validate_arp(bond, slave, sip, tip);
2773 else 2873 else
2774 bond_validate_arp(bond, slave, tip, sip); 2874 bond_validate_arp(bond, slave, tip, sip);
@@ -2830,7 +2930,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2830 slave->dev->last_rx + delta_in_ticks)) { 2930 slave->dev->last_rx + delta_in_ticks)) {
2831 2931
2832 slave->link = BOND_LINK_UP; 2932 slave->link = BOND_LINK_UP;
2833 slave->state = BOND_STATE_ACTIVE; 2933 bond_set_active_slave(slave);
2834 2934
2835 /* primary_slave has no meaning in round-robin 2935 /* primary_slave has no meaning in round-robin
2836 * mode. the window of a slave being up and 2936 * mode. the window of a slave being up and
@@ -2863,7 +2963,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2863 slave->dev->last_rx + 2 * delta_in_ticks)) { 2963 slave->dev->last_rx + 2 * delta_in_ticks)) {
2864 2964
2865 slave->link = BOND_LINK_DOWN; 2965 slave->link = BOND_LINK_DOWN;
2866 slave->state = BOND_STATE_BACKUP; 2966 bond_set_backup_slave(slave);
2867 2967
2868 if (slave->link_failure_count < UINT_MAX) 2968 if (slave->link_failure_count < UINT_MAX)
2869 slave->link_failure_count++; 2969 slave->link_failure_count++;
@@ -2957,7 +3057,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2957 * gives each slave a chance to tx/rx traffic 3057 * gives each slave a chance to tx/rx traffic
2958 * before being taken out 3058 * before being taken out
2959 */ 3059 */
2960 if (slave->state == BOND_STATE_BACKUP && 3060 if (!bond_is_active_slave(slave) &&
2961 !bond->current_arp_slave && 3061 !bond->current_arp_slave &&
2962 !time_in_range(jiffies, 3062 !time_in_range(jiffies,
2963 slave_last_rx(bond, slave) - delta_in_ticks, 3063 slave_last_rx(bond, slave) - delta_in_ticks,
@@ -2974,7 +3074,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2974 * the bond has an IP address) 3074 * the bond has an IP address)
2975 */ 3075 */
2976 trans_start = dev_trans_start(slave->dev); 3076 trans_start = dev_trans_start(slave->dev);
2977 if ((slave->state == BOND_STATE_ACTIVE) && 3077 if (bond_is_active_slave(slave) &&
2978 (!time_in_range(jiffies, 3078 (!time_in_range(jiffies,
2979 trans_start - delta_in_ticks, 3079 trans_start - delta_in_ticks,
2980 trans_start + 2 * delta_in_ticks) || 3080 trans_start + 2 * delta_in_ticks) ||
@@ -3182,299 +3282,6 @@ out:
3182 read_unlock(&bond->lock); 3282 read_unlock(&bond->lock);
3183} 3283}
3184 3284
3185/*------------------------------ proc/seq_file-------------------------------*/
3186
3187#ifdef CONFIG_PROC_FS
3188
3189static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
3190 __acquires(RCU)
3191 __acquires(&bond->lock)
3192{
3193 struct bonding *bond = seq->private;
3194 loff_t off = 0;
3195 struct slave *slave;
3196 int i;
3197
3198 /* make sure the bond won't be taken away */
3199 rcu_read_lock();
3200 read_lock(&bond->lock);
3201
3202 if (*pos == 0)
3203 return SEQ_START_TOKEN;
3204
3205 bond_for_each_slave(bond, slave, i) {
3206 if (++off == *pos)
3207 return slave;
3208 }
3209
3210 return NULL;
3211}
3212
3213static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3214{
3215 struct bonding *bond = seq->private;
3216 struct slave *slave = v;
3217
3218 ++*pos;
3219 if (v == SEQ_START_TOKEN)
3220 return bond->first_slave;
3221
3222 slave = slave->next;
3223
3224 return (slave == bond->first_slave) ? NULL : slave;
3225}
3226
3227static void bond_info_seq_stop(struct seq_file *seq, void *v)
3228 __releases(&bond->lock)
3229 __releases(RCU)
3230{
3231 struct bonding *bond = seq->private;
3232
3233 read_unlock(&bond->lock);
3234 rcu_read_unlock();
3235}
3236
3237static void bond_info_show_master(struct seq_file *seq)
3238{
3239 struct bonding *bond = seq->private;
3240 struct slave *curr;
3241 int i;
3242
3243 read_lock(&bond->curr_slave_lock);
3244 curr = bond->curr_active_slave;
3245 read_unlock(&bond->curr_slave_lock);
3246
3247 seq_printf(seq, "Bonding Mode: %s",
3248 bond_mode_name(bond->params.mode));
3249
3250 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP &&
3251 bond->params.fail_over_mac)
3252 seq_printf(seq, " (fail_over_mac %s)",
3253 fail_over_mac_tbl[bond->params.fail_over_mac].modename);
3254
3255 seq_printf(seq, "\n");
3256
3257 if (bond->params.mode == BOND_MODE_XOR ||
3258 bond->params.mode == BOND_MODE_8023AD) {
3259 seq_printf(seq, "Transmit Hash Policy: %s (%d)\n",
3260 xmit_hashtype_tbl[bond->params.xmit_policy].modename,
3261 bond->params.xmit_policy);
3262 }
3263
3264 if (USES_PRIMARY(bond->params.mode)) {
3265 seq_printf(seq, "Primary Slave: %s",
3266 (bond->primary_slave) ?
3267 bond->primary_slave->dev->name : "None");
3268 if (bond->primary_slave)
3269 seq_printf(seq, " (primary_reselect %s)",
3270 pri_reselect_tbl[bond->params.primary_reselect].modename);
3271
3272 seq_printf(seq, "\nCurrently Active Slave: %s\n",
3273 (curr) ? curr->dev->name : "None");
3274 }
3275
3276 seq_printf(seq, "MII Status: %s\n", netif_carrier_ok(bond->dev) ?
3277 "up" : "down");
3278 seq_printf(seq, "MII Polling Interval (ms): %d\n", bond->params.miimon);
3279 seq_printf(seq, "Up Delay (ms): %d\n",
3280 bond->params.updelay * bond->params.miimon);
3281 seq_printf(seq, "Down Delay (ms): %d\n",
3282 bond->params.downdelay * bond->params.miimon);
3283
3284
3285 /* ARP information */
3286 if (bond->params.arp_interval > 0) {
3287 int printed = 0;
3288 seq_printf(seq, "ARP Polling Interval (ms): %d\n",
3289 bond->params.arp_interval);
3290
3291 seq_printf(seq, "ARP IP target/s (n.n.n.n form):");
3292
3293 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
3294 if (!bond->params.arp_targets[i])
3295 break;
3296 if (printed)
3297 seq_printf(seq, ",");
3298 seq_printf(seq, " %pI4", &bond->params.arp_targets[i]);
3299 printed = 1;
3300 }
3301 seq_printf(seq, "\n");
3302 }
3303
3304 if (bond->params.mode == BOND_MODE_8023AD) {
3305 struct ad_info ad_info;
3306
3307 seq_puts(seq, "\n802.3ad info\n");
3308 seq_printf(seq, "LACP rate: %s\n",
3309 (bond->params.lacp_fast) ? "fast" : "slow");
3310 seq_printf(seq, "Aggregator selection policy (ad_select): %s\n",
3311 ad_select_tbl[bond->params.ad_select].modename);
3312
3313 if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
3314 seq_printf(seq, "bond %s has no active aggregator\n",
3315 bond->dev->name);
3316 } else {
3317 seq_printf(seq, "Active Aggregator Info:\n");
3318
3319 seq_printf(seq, "\tAggregator ID: %d\n",
3320 ad_info.aggregator_id);
3321 seq_printf(seq, "\tNumber of ports: %d\n",
3322 ad_info.ports);
3323 seq_printf(seq, "\tActor Key: %d\n",
3324 ad_info.actor_key);
3325 seq_printf(seq, "\tPartner Key: %d\n",
3326 ad_info.partner_key);
3327 seq_printf(seq, "\tPartner Mac Address: %pM\n",
3328 ad_info.partner_system);
3329 }
3330 }
3331}
3332
3333static void bond_info_show_slave(struct seq_file *seq,
3334 const struct slave *slave)
3335{
3336 struct bonding *bond = seq->private;
3337
3338 seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
3339 seq_printf(seq, "MII Status: %s\n",
3340 (slave->link == BOND_LINK_UP) ? "up" : "down");
3341 seq_printf(seq, "Speed: %d Mbps\n", slave->speed);
3342 seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half");
3343 seq_printf(seq, "Link Failure Count: %u\n",
3344 slave->link_failure_count);
3345
3346 seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr);
3347
3348 if (bond->params.mode == BOND_MODE_8023AD) {
3349 const struct aggregator *agg
3350 = SLAVE_AD_INFO(slave).port.aggregator;
3351
3352 if (agg)
3353 seq_printf(seq, "Aggregator ID: %d\n",
3354 agg->aggregator_identifier);
3355 else
3356 seq_puts(seq, "Aggregator ID: N/A\n");
3357 }
3358 seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id);
3359}
3360
3361static int bond_info_seq_show(struct seq_file *seq, void *v)
3362{
3363 if (v == SEQ_START_TOKEN) {
3364 seq_printf(seq, "%s\n", version);
3365 bond_info_show_master(seq);
3366 } else
3367 bond_info_show_slave(seq, v);
3368
3369 return 0;
3370}
3371
3372static const struct seq_operations bond_info_seq_ops = {
3373 .start = bond_info_seq_start,
3374 .next = bond_info_seq_next,
3375 .stop = bond_info_seq_stop,
3376 .show = bond_info_seq_show,
3377};
3378
3379static int bond_info_open(struct inode *inode, struct file *file)
3380{
3381 struct seq_file *seq;
3382 struct proc_dir_entry *proc;
3383 int res;
3384
3385 res = seq_open(file, &bond_info_seq_ops);
3386 if (!res) {
3387 /* recover the pointer buried in proc_dir_entry data */
3388 seq = file->private_data;
3389 proc = PDE(inode);
3390 seq->private = proc->data;
3391 }
3392
3393 return res;
3394}
3395
3396static const struct file_operations bond_info_fops = {
3397 .owner = THIS_MODULE,
3398 .open = bond_info_open,
3399 .read = seq_read,
3400 .llseek = seq_lseek,
3401 .release = seq_release,
3402};
3403
3404static void bond_create_proc_entry(struct bonding *bond)
3405{
3406 struct net_device *bond_dev = bond->dev;
3407 struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
3408
3409 if (bn->proc_dir) {
3410 bond->proc_entry = proc_create_data(bond_dev->name,
3411 S_IRUGO, bn->proc_dir,
3412 &bond_info_fops, bond);
3413 if (bond->proc_entry == NULL)
3414 pr_warning("Warning: Cannot create /proc/net/%s/%s\n",
3415 DRV_NAME, bond_dev->name);
3416 else
3417 memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
3418 }
3419}
3420
3421static void bond_remove_proc_entry(struct bonding *bond)
3422{
3423 struct net_device *bond_dev = bond->dev;
3424 struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
3425
3426 if (bn->proc_dir && bond->proc_entry) {
3427 remove_proc_entry(bond->proc_file_name, bn->proc_dir);
3428 memset(bond->proc_file_name, 0, IFNAMSIZ);
3429 bond->proc_entry = NULL;
3430 }
3431}
3432
3433/* Create the bonding directory under /proc/net, if doesn't exist yet.
3434 * Caller must hold rtnl_lock.
3435 */
3436static void __net_init bond_create_proc_dir(struct bond_net *bn)
3437{
3438 if (!bn->proc_dir) {
3439 bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
3440 if (!bn->proc_dir)
3441 pr_warning("Warning: cannot create /proc/net/%s\n",
3442 DRV_NAME);
3443 }
3444}
3445
3446/* Destroy the bonding directory under /proc/net, if empty.
3447 * Caller must hold rtnl_lock.
3448 */
3449static void __net_exit bond_destroy_proc_dir(struct bond_net *bn)
3450{
3451 if (bn->proc_dir) {
3452 remove_proc_entry(DRV_NAME, bn->net->proc_net);
3453 bn->proc_dir = NULL;
3454 }
3455}
3456
3457#else /* !CONFIG_PROC_FS */
3458
3459static void bond_create_proc_entry(struct bonding *bond)
3460{
3461}
3462
3463static void bond_remove_proc_entry(struct bonding *bond)
3464{
3465}
3466
3467static inline void bond_create_proc_dir(struct bond_net *bn)
3468{
3469}
3470
3471static inline void bond_destroy_proc_dir(struct bond_net *bn)
3472{
3473}
3474
3475#endif /* CONFIG_PROC_FS */
3476
3477
3478/*-------------------------- netdev event handling --------------------------*/ 3285/*-------------------------- netdev event handling --------------------------*/
3479 3286
3480/* 3287/*
@@ -4331,7 +4138,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
4331 bond_for_each_slave_from(bond, slave, i, start_at) { 4138 bond_for_each_slave_from(bond, slave, i, start_at) {
4332 if (IS_UP(slave->dev) && 4139 if (IS_UP(slave->dev) &&
4333 (slave->link == BOND_LINK_UP) && 4140 (slave->link == BOND_LINK_UP) &&
4334 (slave->state == BOND_STATE_ACTIVE)) { 4141 bond_is_active_slave(slave)) {
4335 res = bond_dev_queue_xmit(bond, skb, slave->dev); 4142 res = bond_dev_queue_xmit(bond, skb, slave->dev);
4336 break; 4143 break;
4337 } 4144 }
@@ -4408,7 +4215,7 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
4408 bond_for_each_slave_from(bond, slave, i, start_at) { 4215 bond_for_each_slave_from(bond, slave, i, start_at) {
4409 if (IS_UP(slave->dev) && 4216 if (IS_UP(slave->dev) &&
4410 (slave->link == BOND_LINK_UP) && 4217 (slave->link == BOND_LINK_UP) &&
4411 (slave->state == BOND_STATE_ACTIVE)) { 4218 bond_is_active_slave(slave)) {
4412 res = bond_dev_queue_xmit(bond, skb, slave->dev); 4219 res = bond_dev_queue_xmit(bond, skb, slave->dev);
4413 break; 4220 break;
4414 } 4221 }
@@ -4449,7 +4256,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
4449 bond_for_each_slave_from(bond, slave, i, start_at) { 4256 bond_for_each_slave_from(bond, slave, i, start_at) {
4450 if (IS_UP(slave->dev) && 4257 if (IS_UP(slave->dev) &&
4451 (slave->link == BOND_LINK_UP) && 4258 (slave->link == BOND_LINK_UP) &&
4452 (slave->state == BOND_STATE_ACTIVE)) { 4259 bond_is_active_slave(slave)) {
4453 if (tx_dev) { 4260 if (tx_dev) {
4454 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 4261 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
4455 if (!skb2) { 4262 if (!skb2) {
@@ -4537,11 +4344,18 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
4537{ 4344{
4538 /* 4345 /*
4539 * This helper function exists to help dev_pick_tx get the correct 4346 * This helper function exists to help dev_pick_tx get the correct
4540 * destination queue. Using a helper function skips the a call to 4347 * destination queue. Using a helper function skips a call to
4541 * skb_tx_hash and will put the skbs in the queue we expect on their 4348 * skb_tx_hash and will put the skbs in the queue we expect on their
4542 * way down to the bonding driver. 4349 * way down to the bonding driver.
4543 */ 4350 */
4544 return skb->queue_mapping; 4351 u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
4352
4353 if (unlikely(txq >= dev->real_num_tx_queues)) {
4354 do
4355 txq -= dev->real_num_tx_queues;
4356 while (txq >= dev->real_num_tx_queues);
4357 }
4358 return txq;
4545} 4359}
4546 4360
4547static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) 4361static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -4603,11 +4417,9 @@ void bond_set_mode_ops(struct bonding *bond, int mode)
4603 case BOND_MODE_BROADCAST: 4417 case BOND_MODE_BROADCAST:
4604 break; 4418 break;
4605 case BOND_MODE_8023AD: 4419 case BOND_MODE_8023AD:
4606 bond_set_master_3ad_flags(bond);
4607 bond_set_xmit_hash_policy(bond); 4420 bond_set_xmit_hash_policy(bond);
4608 break; 4421 break;
4609 case BOND_MODE_ALB: 4422 case BOND_MODE_ALB:
4610 bond_set_master_alb_flags(bond);
4611 /* FALLTHRU */ 4423 /* FALLTHRU */
4612 case BOND_MODE_TLB: 4424 case BOND_MODE_TLB:
4613 break; 4425 break;
@@ -4654,9 +4466,12 @@ static const struct net_device_ops bond_netdev_ops = {
4654 .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid, 4466 .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
4655 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, 4467 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
4656#ifdef CONFIG_NET_POLL_CONTROLLER 4468#ifdef CONFIG_NET_POLL_CONTROLLER
4469 .ndo_netpoll_setup = bond_netpoll_setup,
4657 .ndo_netpoll_cleanup = bond_netpoll_cleanup, 4470 .ndo_netpoll_cleanup = bond_netpoll_cleanup,
4658 .ndo_poll_controller = bond_poll_controller, 4471 .ndo_poll_controller = bond_poll_controller,
4659#endif 4472#endif
4473 .ndo_add_slave = bond_enslave,
4474 .ndo_del_slave = bond_release,
4660}; 4475};
4661 4476
4662static void bond_destructor(struct net_device *bond_dev) 4477static void bond_destructor(struct net_device *bond_dev)
@@ -4695,9 +4510,6 @@ static void bond_setup(struct net_device *bond_dev)
4695 bond_dev->priv_flags |= IFF_BONDING; 4510 bond_dev->priv_flags |= IFF_BONDING;
4696 bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 4511 bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
4697 4512
4698 if (bond->params.arp_interval)
4699 bond_dev->priv_flags |= IFF_MASTER_ARPMON;
4700
4701 /* At first, we block adding VLANs. That's the only way to 4513 /* At first, we block adding VLANs. That's the only way to
4702 * prevent problems that occur when adding VLANs over an 4514 * prevent problems that occur when adding VLANs over an
4703 * empty bond. The block will be removed once non-challenged 4515 * empty bond. The block will be removed once non-challenged
@@ -5166,8 +4978,6 @@ static int bond_init(struct net_device *bond_dev)
5166 4978
5167 bond_set_lockdep_class(bond_dev); 4979 bond_set_lockdep_class(bond_dev);
5168 4980
5169 netif_carrier_off(bond_dev);
5170
5171 bond_create_proc_entry(bond); 4981 bond_create_proc_entry(bond);
5172 list_add_tail(&bond->bond_list, &bn->dev_list); 4982 list_add_tail(&bond->bond_list, &bn->dev_list);
5173 4983
@@ -5237,6 +5047,8 @@ int bond_create(struct net *net, const char *name)
5237 5047
5238 res = register_netdevice(bond_dev); 5048 res = register_netdevice(bond_dev);
5239 5049
5050 netif_carrier_off(bond_dev);
5051
5240out: 5052out:
5241 rtnl_unlock(); 5053 rtnl_unlock();
5242 if (res < 0) 5054 if (res < 0)
@@ -5275,7 +5087,7 @@ static int __init bonding_init(void)
5275 int i; 5087 int i;
5276 int res; 5088 int res;
5277 5089
5278 pr_info("%s", version); 5090 pr_info("%s", bond_version);
5279 5091
5280 res = bond_check_params(&bonding_defaults); 5092 res = bond_check_params(&bonding_defaults);
5281 if (res) 5093 if (res)
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
new file mode 100644
index 000000000000..c32ff55a34c1
--- /dev/null
+++ b/drivers/net/bonding/bond_procfs.c
@@ -0,0 +1,275 @@
1#include <linux/proc_fs.h>
2#include <net/net_namespace.h>
3#include <net/netns/generic.h>
4#include "bonding.h"
5
6
7extern const char *bond_mode_name(int mode);
8
9static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
10 __acquires(RCU)
11 __acquires(&bond->lock)
12{
13 struct bonding *bond = seq->private;
14 loff_t off = 0;
15 struct slave *slave;
16 int i;
17
18 /* make sure the bond won't be taken away */
19 rcu_read_lock();
20 read_lock(&bond->lock);
21
22 if (*pos == 0)
23 return SEQ_START_TOKEN;
24
25 bond_for_each_slave(bond, slave, i) {
26 if (++off == *pos)
27 return slave;
28 }
29
30 return NULL;
31}
32
33static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
34{
35 struct bonding *bond = seq->private;
36 struct slave *slave = v;
37
38 ++*pos;
39 if (v == SEQ_START_TOKEN)
40 return bond->first_slave;
41
42 slave = slave->next;
43
44 return (slave == bond->first_slave) ? NULL : slave;
45}
46
47static void bond_info_seq_stop(struct seq_file *seq, void *v)
48 __releases(&bond->lock)
49 __releases(RCU)
50{
51 struct bonding *bond = seq->private;
52
53 read_unlock(&bond->lock);
54 rcu_read_unlock();
55}
56
57static void bond_info_show_master(struct seq_file *seq)
58{
59 struct bonding *bond = seq->private;
60 struct slave *curr;
61 int i;
62
63 read_lock(&bond->curr_slave_lock);
64 curr = bond->curr_active_slave;
65 read_unlock(&bond->curr_slave_lock);
66
67 seq_printf(seq, "Bonding Mode: %s",
68 bond_mode_name(bond->params.mode));
69
70 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP &&
71 bond->params.fail_over_mac)
72 seq_printf(seq, " (fail_over_mac %s)",
73 fail_over_mac_tbl[bond->params.fail_over_mac].modename);
74
75 seq_printf(seq, "\n");
76
77 if (bond->params.mode == BOND_MODE_XOR ||
78 bond->params.mode == BOND_MODE_8023AD) {
79 seq_printf(seq, "Transmit Hash Policy: %s (%d)\n",
80 xmit_hashtype_tbl[bond->params.xmit_policy].modename,
81 bond->params.xmit_policy);
82 }
83
84 if (USES_PRIMARY(bond->params.mode)) {
85 seq_printf(seq, "Primary Slave: %s",
86 (bond->primary_slave) ?
87 bond->primary_slave->dev->name : "None");
88 if (bond->primary_slave)
89 seq_printf(seq, " (primary_reselect %s)",
90 pri_reselect_tbl[bond->params.primary_reselect].modename);
91
92 seq_printf(seq, "\nCurrently Active Slave: %s\n",
93 (curr) ? curr->dev->name : "None");
94 }
95
96 seq_printf(seq, "MII Status: %s\n", netif_carrier_ok(bond->dev) ?
97 "up" : "down");
98 seq_printf(seq, "MII Polling Interval (ms): %d\n", bond->params.miimon);
99 seq_printf(seq, "Up Delay (ms): %d\n",
100 bond->params.updelay * bond->params.miimon);
101 seq_printf(seq, "Down Delay (ms): %d\n",
102 bond->params.downdelay * bond->params.miimon);
103
104
105 /* ARP information */
106 if (bond->params.arp_interval > 0) {
107 int printed = 0;
108 seq_printf(seq, "ARP Polling Interval (ms): %d\n",
109 bond->params.arp_interval);
110
111 seq_printf(seq, "ARP IP target/s (n.n.n.n form):");
112
113 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
114 if (!bond->params.arp_targets[i])
115 break;
116 if (printed)
117 seq_printf(seq, ",");
118 seq_printf(seq, " %pI4", &bond->params.arp_targets[i]);
119 printed = 1;
120 }
121 seq_printf(seq, "\n");
122 }
123
124 if (bond->params.mode == BOND_MODE_8023AD) {
125 struct ad_info ad_info;
126
127 seq_puts(seq, "\n802.3ad info\n");
128 seq_printf(seq, "LACP rate: %s\n",
129 (bond->params.lacp_fast) ? "fast" : "slow");
130 seq_printf(seq, "Aggregator selection policy (ad_select): %s\n",
131 ad_select_tbl[bond->params.ad_select].modename);
132
133 if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
134 seq_printf(seq, "bond %s has no active aggregator\n",
135 bond->dev->name);
136 } else {
137 seq_printf(seq, "Active Aggregator Info:\n");
138
139 seq_printf(seq, "\tAggregator ID: %d\n",
140 ad_info.aggregator_id);
141 seq_printf(seq, "\tNumber of ports: %d\n",
142 ad_info.ports);
143 seq_printf(seq, "\tActor Key: %d\n",
144 ad_info.actor_key);
145 seq_printf(seq, "\tPartner Key: %d\n",
146 ad_info.partner_key);
147 seq_printf(seq, "\tPartner Mac Address: %pM\n",
148 ad_info.partner_system);
149 }
150 }
151}
152
153static void bond_info_show_slave(struct seq_file *seq,
154 const struct slave *slave)
155{
156 struct bonding *bond = seq->private;
157
158 seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
159 seq_printf(seq, "MII Status: %s\n",
160 (slave->link == BOND_LINK_UP) ? "up" : "down");
161 seq_printf(seq, "Speed: %d Mbps\n", slave->speed);
162 seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half");
163 seq_printf(seq, "Link Failure Count: %u\n",
164 slave->link_failure_count);
165
166 seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr);
167
168 if (bond->params.mode == BOND_MODE_8023AD) {
169 const struct aggregator *agg
170 = SLAVE_AD_INFO(slave).port.aggregator;
171
172 if (agg)
173 seq_printf(seq, "Aggregator ID: %d\n",
174 agg->aggregator_identifier);
175 else
176 seq_puts(seq, "Aggregator ID: N/A\n");
177 }
178 seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id);
179}
180
181static int bond_info_seq_show(struct seq_file *seq, void *v)
182{
183 if (v == SEQ_START_TOKEN) {
184 seq_printf(seq, "%s\n", bond_version);
185 bond_info_show_master(seq);
186 } else
187 bond_info_show_slave(seq, v);
188
189 return 0;
190}
191
192static const struct seq_operations bond_info_seq_ops = {
193 .start = bond_info_seq_start,
194 .next = bond_info_seq_next,
195 .stop = bond_info_seq_stop,
196 .show = bond_info_seq_show,
197};
198
199static int bond_info_open(struct inode *inode, struct file *file)
200{
201 struct seq_file *seq;
202 struct proc_dir_entry *proc;
203 int res;
204
205 res = seq_open(file, &bond_info_seq_ops);
206 if (!res) {
207 /* recover the pointer buried in proc_dir_entry data */
208 seq = file->private_data;
209 proc = PDE(inode);
210 seq->private = proc->data;
211 }
212
213 return res;
214}
215
216static const struct file_operations bond_info_fops = {
217 .owner = THIS_MODULE,
218 .open = bond_info_open,
219 .read = seq_read,
220 .llseek = seq_lseek,
221 .release = seq_release,
222};
223
224void bond_create_proc_entry(struct bonding *bond)
225{
226 struct net_device *bond_dev = bond->dev;
227 struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
228
229 if (bn->proc_dir) {
230 bond->proc_entry = proc_create_data(bond_dev->name,
231 S_IRUGO, bn->proc_dir,
232 &bond_info_fops, bond);
233 if (bond->proc_entry == NULL)
234 pr_warning("Warning: Cannot create /proc/net/%s/%s\n",
235 DRV_NAME, bond_dev->name);
236 else
237 memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
238 }
239}
240
241void bond_remove_proc_entry(struct bonding *bond)
242{
243 struct net_device *bond_dev = bond->dev;
244 struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
245
246 if (bn->proc_dir && bond->proc_entry) {
247 remove_proc_entry(bond->proc_file_name, bn->proc_dir);
248 memset(bond->proc_file_name, 0, IFNAMSIZ);
249 bond->proc_entry = NULL;
250 }
251}
252
253/* Create the bonding directory under /proc/net, if doesn't exist yet.
254 * Caller must hold rtnl_lock.
255 */
256void __net_init bond_create_proc_dir(struct bond_net *bn)
257{
258 if (!bn->proc_dir) {
259 bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
260 if (!bn->proc_dir)
261 pr_warning("Warning: cannot create /proc/net/%s\n",
262 DRV_NAME);
263 }
264}
265
266/* Destroy the bonding directory under /proc/net, if empty.
267 * Caller must hold rtnl_lock.
268 */
269void __net_exit bond_destroy_proc_dir(struct bond_net *bn)
270{
271 if (bn->proc_dir) {
272 remove_proc_entry(DRV_NAME, bn->net->proc_net);
273 bn->proc_dir = NULL;
274 }
275}
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 8fd0174c5380..de87aea6d01a 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -118,7 +118,10 @@ static ssize_t bonding_store_bonds(struct class *cls,
118 pr_info("%s is being created...\n", ifname); 118 pr_info("%s is being created...\n", ifname);
119 rv = bond_create(net, ifname); 119 rv = bond_create(net, ifname);
120 if (rv) { 120 if (rv) {
121 pr_info("Bond creation failed.\n"); 121 if (rv == -EEXIST)
122 pr_info("%s already exists.\n", ifname);
123 else
124 pr_info("%s creation failed.\n", ifname);
122 res = rv; 125 res = rv;
123 } 126 }
124 } else if (command[0] == '-') { 127 } else if (command[0] == '-') {
@@ -322,11 +325,6 @@ static ssize_t bonding_store_mode(struct device *d,
322 ret = -EINVAL; 325 ret = -EINVAL;
323 goto out; 326 goto out;
324 } 327 }
325 if (bond->params.mode == BOND_MODE_8023AD)
326 bond_unset_master_3ad_flags(bond);
327
328 if (bond->params.mode == BOND_MODE_ALB)
329 bond_unset_master_alb_flags(bond);
330 328
331 bond->params.mode = new_value; 329 bond->params.mode = new_value;
332 bond_set_mode_ops(bond, bond->params.mode); 330 bond_set_mode_ops(bond, bond->params.mode);
@@ -527,8 +525,6 @@ static ssize_t bonding_store_arp_interval(struct device *d,
527 pr_info("%s: Setting ARP monitoring interval to %d.\n", 525 pr_info("%s: Setting ARP monitoring interval to %d.\n",
528 bond->dev->name, new_value); 526 bond->dev->name, new_value);
529 bond->params.arp_interval = new_value; 527 bond->params.arp_interval = new_value;
530 if (bond->params.arp_interval)
531 bond->dev->priv_flags |= IFF_MASTER_ARPMON;
532 if (bond->params.miimon) { 528 if (bond->params.miimon) {
533 pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n", 529 pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
534 bond->dev->name, bond->dev->name); 530 bond->dev->name, bond->dev->name);
@@ -1004,7 +1000,6 @@ static ssize_t bonding_store_miimon(struct device *d,
1004 pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n", 1000 pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
1005 bond->dev->name); 1001 bond->dev->name);
1006 bond->params.arp_interval = 0; 1002 bond->params.arp_interval = 0;
1007 bond->dev->priv_flags &= ~IFF_MASTER_ARPMON;
1008 if (bond->params.arp_validate) { 1003 if (bond->params.arp_validate) {
1009 bond_unregister_arp(bond); 1004 bond_unregister_arp(bond);
1010 bond->params.arp_validate = 1005 bond->params.arp_validate =
@@ -1198,7 +1193,7 @@ static ssize_t bonding_store_carrier(struct device *d,
1198 bond->dev->name, new_value); 1193 bond->dev->name, new_value);
1199 } 1194 }
1200out: 1195out:
1201 return count; 1196 return ret;
1202} 1197}
1203static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR, 1198static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR,
1204 bonding_show_carrier, bonding_store_carrier); 1199 bonding_show_carrier, bonding_store_carrier);
@@ -1587,15 +1582,15 @@ static ssize_t bonding_store_slaves_active(struct device *d,
1587 } 1582 }
1588 1583
1589 bond_for_each_slave(bond, slave, i) { 1584 bond_for_each_slave(bond, slave, i) {
1590 if (slave->state == BOND_STATE_BACKUP) { 1585 if (!bond_is_active_slave(slave)) {
1591 if (new_value) 1586 if (new_value)
1592 slave->dev->priv_flags &= ~IFF_SLAVE_INACTIVE; 1587 slave->inactive = 0;
1593 else 1588 else
1594 slave->dev->priv_flags |= IFF_SLAVE_INACTIVE; 1589 slave->inactive = 1;
1595 } 1590 }
1596 } 1591 }
1597out: 1592out:
1598 return count; 1593 return ret;
1599} 1594}
1600static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR, 1595static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
1601 bonding_show_slaves_active, bonding_store_slaves_active); 1596 bonding_show_slaves_active, bonding_store_slaves_active);
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 31fe980e4e28..6b26962fd0ec 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -20,6 +20,7 @@
20#include <linux/if_bonding.h> 20#include <linux/if_bonding.h>
21#include <linux/cpumask.h> 21#include <linux/cpumask.h>
22#include <linux/in6.h> 22#include <linux/in6.h>
23#include <linux/netpoll.h>
23#include "bond_3ad.h" 24#include "bond_3ad.h"
24#include "bond_alb.h" 25#include "bond_alb.h"
25 26
@@ -28,6 +29,8 @@
28#define DRV_NAME "bonding" 29#define DRV_NAME "bonding"
29#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" 30#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
30 31
32#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
33
31#define BOND_MAX_ARP_TARGETS 16 34#define BOND_MAX_ARP_TARGETS 16
32 35
33#define IS_UP(dev) \ 36#define IS_UP(dev) \
@@ -52,7 +55,7 @@
52 (((slave)->dev->flags & IFF_UP) && \ 55 (((slave)->dev->flags & IFF_UP) && \
53 netif_running((slave)->dev) && \ 56 netif_running((slave)->dev) && \
54 ((slave)->link == BOND_LINK_UP) && \ 57 ((slave)->link == BOND_LINK_UP) && \
55 ((slave)->state == BOND_STATE_ACTIVE)) 58 bond_is_active_slave(slave))
56 59
57 60
58#define USES_PRIMARY(mode) \ 61#define USES_PRIMARY(mode) \
@@ -132,7 +135,7 @@ static inline void unblock_netpoll_tx(void)
132 135
133static inline int is_netpoll_tx_blocked(struct net_device *dev) 136static inline int is_netpoll_tx_blocked(struct net_device *dev)
134{ 137{
135 if (unlikely(dev->priv_flags & IFF_IN_NETPOLL)) 138 if (unlikely(netpoll_tx_running(dev)))
136 return atomic_read(&netpoll_block_tx); 139 return atomic_read(&netpoll_block_tx);
137 return 0; 140 return 0;
138} 141}
@@ -189,7 +192,9 @@ struct slave {
189 unsigned long last_arp_rx; 192 unsigned long last_arp_rx;
190 s8 link; /* one of BOND_LINK_XXXX */ 193 s8 link; /* one of BOND_LINK_XXXX */
191 s8 new_link; 194 s8 new_link;
192 s8 state; /* one of BOND_STATE_XXXX */ 195 u8 backup:1, /* indicates backup slave. Value corresponds with
196 BOND_STATE_ACTIVE and BOND_STATE_BACKUP */
197 inactive:1; /* indicates inactive slave */
193 u32 original_mtu; 198 u32 original_mtu;
194 u32 link_failure_count; 199 u32 link_failure_count;
195 u8 perm_hwaddr[ETH_ALEN]; 200 u8 perm_hwaddr[ETH_ALEN];
@@ -198,6 +203,9 @@ struct slave {
198 u16 queue_id; 203 u16 queue_id;
199 struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */ 204 struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */
200 struct tlb_slave_info tlb_info; 205 struct tlb_slave_info tlb_info;
206#ifdef CONFIG_NET_POLL_CONTROLLER
207 struct netpoll *np;
208#endif
201}; 209};
202 210
203/* 211/*
@@ -260,12 +268,16 @@ struct bonding {
260#endif /* CONFIG_DEBUG_FS */ 268#endif /* CONFIG_DEBUG_FS */
261}; 269};
262 270
271#define bond_slave_get_rcu(dev) \
272 ((struct slave *) rcu_dereference(dev->rx_handler_data))
273
263/** 274/**
264 * Returns NULL if the net_device does not belong to any of the bond's slaves 275 * Returns NULL if the net_device does not belong to any of the bond's slaves
265 * 276 *
266 * Caller must hold bond lock for read 277 * Caller must hold bond lock for read
267 */ 278 */
268static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct net_device *slave_dev) 279static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
280 struct net_device *slave_dev)
269{ 281{
270 struct slave *slave = NULL; 282 struct slave *slave = NULL;
271 int i; 283 int i;
@@ -276,7 +288,7 @@ static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct n
276 } 288 }
277 } 289 }
278 290
279 return 0; 291 return NULL;
280} 292}
281 293
282static inline struct bonding *bond_get_bond_by_slave(struct slave *slave) 294static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
@@ -294,6 +306,26 @@ static inline bool bond_is_lb(const struct bonding *bond)
294 bond->params.mode == BOND_MODE_ALB); 306 bond->params.mode == BOND_MODE_ALB);
295} 307}
296 308
309static inline void bond_set_active_slave(struct slave *slave)
310{
311 slave->backup = 0;
312}
313
314static inline void bond_set_backup_slave(struct slave *slave)
315{
316 slave->backup = 1;
317}
318
319static inline int bond_slave_state(struct slave *slave)
320{
321 return slave->backup;
322}
323
324static inline bool bond_is_active_slave(struct slave *slave)
325{
326 return !bond_slave_state(slave);
327}
328
297#define BOND_PRI_RESELECT_ALWAYS 0 329#define BOND_PRI_RESELECT_ALWAYS 0
298#define BOND_PRI_RESELECT_BETTER 1 330#define BOND_PRI_RESELECT_BETTER 1
299#define BOND_PRI_RESELECT_FAILURE 2 331#define BOND_PRI_RESELECT_FAILURE 2
@@ -311,7 +343,7 @@ static inline bool bond_is_lb(const struct bonding *bond)
311static inline int slave_do_arp_validate(struct bonding *bond, 343static inline int slave_do_arp_validate(struct bonding *bond,
312 struct slave *slave) 344 struct slave *slave)
313{ 345{
314 return bond->params.arp_validate & (1 << slave->state); 346 return bond->params.arp_validate & (1 << bond_slave_state(slave));
315} 347}
316 348
317static inline unsigned long slave_last_rx(struct bonding *bond, 349static inline unsigned long slave_last_rx(struct bonding *bond,
@@ -323,41 +355,40 @@ static inline unsigned long slave_last_rx(struct bonding *bond,
323 return slave->dev->last_rx; 355 return slave->dev->last_rx;
324} 356}
325 357
326static inline void bond_set_slave_inactive_flags(struct slave *slave) 358#ifdef CONFIG_NET_POLL_CONTROLLER
359static inline void bond_netpoll_send_skb(const struct slave *slave,
360 struct sk_buff *skb)
327{ 361{
328 struct bonding *bond = netdev_priv(slave->dev->master); 362 struct netpoll *np = slave->np;
329 if (!bond_is_lb(bond))
330 slave->state = BOND_STATE_BACKUP;
331 if (!bond->params.all_slaves_active)
332 slave->dev->priv_flags |= IFF_SLAVE_INACTIVE;
333 if (slave_do_arp_validate(bond, slave))
334 slave->dev->priv_flags |= IFF_SLAVE_NEEDARP;
335}
336 363
337static inline void bond_set_slave_active_flags(struct slave *slave) 364 if (np)
338{ 365 netpoll_send_skb(np, skb);
339 slave->state = BOND_STATE_ACTIVE;
340 slave->dev->priv_flags &= ~(IFF_SLAVE_INACTIVE | IFF_SLAVE_NEEDARP);
341} 366}
342 367#else
343static inline void bond_set_master_3ad_flags(struct bonding *bond) 368static inline void bond_netpoll_send_skb(const struct slave *slave,
369 struct sk_buff *skb)
344{ 370{
345 bond->dev->priv_flags |= IFF_MASTER_8023AD;
346} 371}
372#endif
347 373
348static inline void bond_unset_master_3ad_flags(struct bonding *bond) 374static inline void bond_set_slave_inactive_flags(struct slave *slave)
349{ 375{
350 bond->dev->priv_flags &= ~IFF_MASTER_8023AD; 376 struct bonding *bond = netdev_priv(slave->dev->master);
377 if (!bond_is_lb(bond))
378 bond_set_backup_slave(slave);
379 if (!bond->params.all_slaves_active)
380 slave->inactive = 1;
351} 381}
352 382
353static inline void bond_set_master_alb_flags(struct bonding *bond) 383static inline void bond_set_slave_active_flags(struct slave *slave)
354{ 384{
355 bond->dev->priv_flags |= IFF_MASTER_ALB; 385 bond_set_active_slave(slave);
386 slave->inactive = 0;
356} 387}
357 388
358static inline void bond_unset_master_alb_flags(struct bonding *bond) 389static inline bool bond_is_slave_inactive(struct slave *slave)
359{ 390{
360 bond->dev->priv_flags &= ~IFF_MASTER_ALB; 391 return slave->inactive;
361} 392}
362 393
363struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr); 394struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
@@ -393,6 +424,30 @@ struct bond_net {
393#endif 424#endif
394}; 425};
395 426
427#ifdef CONFIG_PROC_FS
428void bond_create_proc_entry(struct bonding *bond);
429void bond_remove_proc_entry(struct bonding *bond);
430void bond_create_proc_dir(struct bond_net *bn);
431void bond_destroy_proc_dir(struct bond_net *bn);
432#else
433static inline void bond_create_proc_entry(struct bonding *bond)
434{
435}
436
437static inline void bond_remove_proc_entry(struct bonding *bond)
438{
439}
440
441static inline void bond_create_proc_dir(struct bond_net *bn)
442{
443}
444
445static inline void bond_destroy_proc_dir(struct bond_net *bn)
446{
447}
448#endif
449
450
396/* exported from bond_main.c */ 451/* exported from bond_main.c */
397extern int bond_net_id; 452extern int bond_net_id;
398extern const struct bond_parm_tbl bond_lacp_tbl[]; 453extern const struct bond_parm_tbl bond_lacp_tbl[];
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 5dec456fd4a4..1d699e3df547 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -115,6 +115,8 @@ source "drivers/net/can/mscan/Kconfig"
115 115
116source "drivers/net/can/sja1000/Kconfig" 116source "drivers/net/can/sja1000/Kconfig"
117 117
118source "drivers/net/can/c_can/Kconfig"
119
118source "drivers/net/can/usb/Kconfig" 120source "drivers/net/can/usb/Kconfig"
119 121
120source "drivers/net/can/softing/Kconfig" 122source "drivers/net/can/softing/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 53c82a71778e..24ebfe8d758a 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -13,6 +13,7 @@ obj-y += softing/
13 13
14obj-$(CONFIG_CAN_SJA1000) += sja1000/ 14obj-$(CONFIG_CAN_SJA1000) += sja1000/
15obj-$(CONFIG_CAN_MSCAN) += mscan/ 15obj-$(CONFIG_CAN_MSCAN) += mscan/
16obj-$(CONFIG_CAN_C_CAN) += c_can/
16obj-$(CONFIG_CAN_AT91) += at91_can.o 17obj-$(CONFIG_CAN_AT91) += at91_can.o
17obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o 18obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
18obj-$(CONFIG_CAN_MCP251X) += mcp251x.o 19obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig
new file mode 100644
index 000000000000..ffb9773d102d
--- /dev/null
+++ b/drivers/net/can/c_can/Kconfig
@@ -0,0 +1,15 @@
1menuconfig CAN_C_CAN
2 tristate "Bosch C_CAN devices"
3 depends on CAN_DEV && HAS_IOMEM
4
5if CAN_C_CAN
6
7config CAN_C_CAN_PLATFORM
8 tristate "Generic Platform Bus based C_CAN driver"
9 ---help---
10 This driver adds support for the C_CAN chips connected to
11 the "platform bus" (Linux abstraction for directly to the
12 processor attached devices) which can be found on various
13 boards from ST Microelectronics (http://www.st.com)
14 like the SPEAr1310 and SPEAr320 evaluation boards.
15endif
diff --git a/drivers/net/can/c_can/Makefile b/drivers/net/can/c_can/Makefile
new file mode 100644
index 000000000000..9273f6d5c4b7
--- /dev/null
+++ b/drivers/net/can/c_can/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the Bosch C_CAN controller drivers.
3#
4
5obj-$(CONFIG_CAN_C_CAN) += c_can.o
6obj-$(CONFIG_CAN_C_CAN_PLATFORM) += c_can_platform.o
7
8ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
new file mode 100644
index 000000000000..14050786218a
--- /dev/null
+++ b/drivers/net/can/c_can/c_can.c
@@ -0,0 +1,1158 @@
1/*
2 * CAN bus driver for Bosch C_CAN controller
3 *
4 * Copyright (C) 2010 ST Microelectronics
5 * Bhupesh Sharma <bhupesh.sharma@st.com>
6 *
7 * Borrowed heavily from the C_CAN driver originally written by:
8 * Copyright (C) 2007
9 * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
10 * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
11 *
12 * TX and RX NAPI implementation has been borrowed from at91 CAN driver
13 * written by:
14 * Copyright
15 * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
16 * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
17 *
18 * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
19 * Bosch C_CAN user manual can be obtained from:
20 * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
21 * users_manual_c_can.pdf
22 *
23 * This file is licensed under the terms of the GNU General Public
24 * License version 2. This program is licensed "as is" without any
25 * warranty of any kind, whether express or implied.
26 */
27
28#include <linux/kernel.h>
29#include <linux/version.h>
30#include <linux/module.h>
31#include <linux/interrupt.h>
32#include <linux/delay.h>
33#include <linux/netdevice.h>
34#include <linux/if_arp.h>
35#include <linux/if_ether.h>
36#include <linux/list.h>
37#include <linux/delay.h>
38#include <linux/io.h>
39
40#include <linux/can.h>
41#include <linux/can/dev.h>
42#include <linux/can/error.h>
43
44#include "c_can.h"
45
46/* control register */
47#define CONTROL_TEST BIT(7)
48#define CONTROL_CCE BIT(6)
49#define CONTROL_DISABLE_AR BIT(5)
50#define CONTROL_ENABLE_AR (0 << 5)
51#define CONTROL_EIE BIT(3)
52#define CONTROL_SIE BIT(2)
53#define CONTROL_IE BIT(1)
54#define CONTROL_INIT BIT(0)
55
56/* test register */
57#define TEST_RX BIT(7)
58#define TEST_TX1 BIT(6)
59#define TEST_TX2 BIT(5)
60#define TEST_LBACK BIT(4)
61#define TEST_SILENT BIT(3)
62#define TEST_BASIC BIT(2)
63
64/* status register */
65#define STATUS_BOFF BIT(7)
66#define STATUS_EWARN BIT(6)
67#define STATUS_EPASS BIT(5)
68#define STATUS_RXOK BIT(4)
69#define STATUS_TXOK BIT(3)
70
71/* error counter register */
72#define ERR_CNT_TEC_MASK 0xff
73#define ERR_CNT_TEC_SHIFT 0
74#define ERR_CNT_REC_SHIFT 8
75#define ERR_CNT_REC_MASK (0x7f << ERR_CNT_REC_SHIFT)
76#define ERR_CNT_RP_SHIFT 15
77#define ERR_CNT_RP_MASK (0x1 << ERR_CNT_RP_SHIFT)
78
79/* bit-timing register */
80#define BTR_BRP_MASK 0x3f
81#define BTR_BRP_SHIFT 0
82#define BTR_SJW_SHIFT 6
83#define BTR_SJW_MASK (0x3 << BTR_SJW_SHIFT)
84#define BTR_TSEG1_SHIFT 8
85#define BTR_TSEG1_MASK (0xf << BTR_TSEG1_SHIFT)
86#define BTR_TSEG2_SHIFT 12
87#define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT)
88
89/* brp extension register */
90#define BRP_EXT_BRPE_MASK 0x0f
91#define BRP_EXT_BRPE_SHIFT 0
92
93/* IFx command request */
94#define IF_COMR_BUSY BIT(15)
95
96/* IFx command mask */
97#define IF_COMM_WR BIT(7)
98#define IF_COMM_MASK BIT(6)
99#define IF_COMM_ARB BIT(5)
100#define IF_COMM_CONTROL BIT(4)
101#define IF_COMM_CLR_INT_PND BIT(3)
102#define IF_COMM_TXRQST BIT(2)
103#define IF_COMM_DATAA BIT(1)
104#define IF_COMM_DATAB BIT(0)
105#define IF_COMM_ALL (IF_COMM_MASK | IF_COMM_ARB | \
106 IF_COMM_CONTROL | IF_COMM_TXRQST | \
107 IF_COMM_DATAA | IF_COMM_DATAB)
108
109/* IFx arbitration */
110#define IF_ARB_MSGVAL BIT(15)
111#define IF_ARB_MSGXTD BIT(14)
112#define IF_ARB_TRANSMIT BIT(13)
113
114/* IFx message control */
115#define IF_MCONT_NEWDAT BIT(15)
116#define IF_MCONT_MSGLST BIT(14)
117#define IF_MCONT_CLR_MSGLST (0 << 14)
118#define IF_MCONT_INTPND BIT(13)
119#define IF_MCONT_UMASK BIT(12)
120#define IF_MCONT_TXIE BIT(11)
121#define IF_MCONT_RXIE BIT(10)
122#define IF_MCONT_RMTEN BIT(9)
123#define IF_MCONT_TXRQST BIT(8)
124#define IF_MCONT_EOB BIT(7)
125#define IF_MCONT_DLC_MASK 0xf
126
127/*
128 * IFx register masks:
129 * allow easy operation on 16-bit registers when the
130 * argument is 32-bit instead
131 */
132#define IFX_WRITE_LOW_16BIT(x) ((x) & 0xFFFF)
133#define IFX_WRITE_HIGH_16BIT(x) (((x) & 0xFFFF0000) >> 16)
134
135/* message object split */
136#define C_CAN_NO_OF_OBJECTS 32
137#define C_CAN_MSG_OBJ_RX_NUM 16
138#define C_CAN_MSG_OBJ_TX_NUM 16
139
140#define C_CAN_MSG_OBJ_RX_FIRST 1
141#define C_CAN_MSG_OBJ_RX_LAST (C_CAN_MSG_OBJ_RX_FIRST + \
142 C_CAN_MSG_OBJ_RX_NUM - 1)
143
144#define C_CAN_MSG_OBJ_TX_FIRST (C_CAN_MSG_OBJ_RX_LAST + 1)
145#define C_CAN_MSG_OBJ_TX_LAST (C_CAN_MSG_OBJ_TX_FIRST + \
146 C_CAN_MSG_OBJ_TX_NUM - 1)
147
148#define C_CAN_MSG_OBJ_RX_SPLIT 9
149#define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1)
150
151#define C_CAN_NEXT_MSG_OBJ_MASK (C_CAN_MSG_OBJ_TX_NUM - 1)
152#define RECEIVE_OBJECT_BITS 0x0000ffff
153
154/* status interrupt */
155#define STATUS_INTERRUPT 0x8000
156
157/* global interrupt masks */
158#define ENABLE_ALL_INTERRUPTS 1
159#define DISABLE_ALL_INTERRUPTS 0
160
161/* minimum timeout for checking BUSY status */
162#define MIN_TIMEOUT_VALUE 6
163
164/* napi related */
165#define C_CAN_NAPI_WEIGHT C_CAN_MSG_OBJ_RX_NUM
166
167/* c_can lec values */
168enum c_can_lec_type {
169 LEC_NO_ERROR = 0,
170 LEC_STUFF_ERROR,
171 LEC_FORM_ERROR,
172 LEC_ACK_ERROR,
173 LEC_BIT1_ERROR,
174 LEC_BIT0_ERROR,
175 LEC_CRC_ERROR,
176 LEC_UNUSED,
177};
178
179/*
180 * c_can error types:
181 * Bus errors (BUS_OFF, ERROR_WARNING, ERROR_PASSIVE) are supported
182 */
183enum c_can_bus_error_types {
184 C_CAN_NO_ERROR = 0,
185 C_CAN_BUS_OFF,
186 C_CAN_ERROR_WARNING,
187 C_CAN_ERROR_PASSIVE,
188};
189
190static struct can_bittiming_const c_can_bittiming_const = {
191 .name = KBUILD_MODNAME,
192 .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
193 .tseg1_max = 16,
194 .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
195 .tseg2_max = 8,
196 .sjw_max = 4,
197 .brp_min = 1,
198 .brp_max = 1024, /* 6-bit BRP field + 4-bit BRPE field*/
199 .brp_inc = 1,
200};
201
202static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
203{
204 return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
205 C_CAN_MSG_OBJ_TX_FIRST;
206}
207
208static inline int get_tx_echo_msg_obj(const struct c_can_priv *priv)
209{
210 return (priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) +
211 C_CAN_MSG_OBJ_TX_FIRST;
212}
213
214static u32 c_can_read_reg32(struct c_can_priv *priv, void *reg)
215{
216 u32 val = priv->read_reg(priv, reg);
217 val |= ((u32) priv->read_reg(priv, reg + 2)) << 16;
218 return val;
219}
220
221static void c_can_enable_all_interrupts(struct c_can_priv *priv,
222 int enable)
223{
224 unsigned int cntrl_save = priv->read_reg(priv,
225 &priv->regs->control);
226
227 if (enable)
228 cntrl_save |= (CONTROL_SIE | CONTROL_EIE | CONTROL_IE);
229 else
230 cntrl_save &= ~(CONTROL_EIE | CONTROL_IE | CONTROL_SIE);
231
232 priv->write_reg(priv, &priv->regs->control, cntrl_save);
233}
234
235static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface)
236{
237 int count = MIN_TIMEOUT_VALUE;
238
239 while (count && priv->read_reg(priv,
240 &priv->regs->ifregs[iface].com_req) &
241 IF_COMR_BUSY) {
242 count--;
243 udelay(1);
244 }
245
246 if (!count)
247 return 1;
248
249 return 0;
250}
251
252static inline void c_can_object_get(struct net_device *dev,
253 int iface, int objno, int mask)
254{
255 struct c_can_priv *priv = netdev_priv(dev);
256
257 /*
258 * As per specs, after writting the message object number in the
259 * IF command request register the transfer b/w interface
260 * register and message RAM must be complete in 6 CAN-CLK
261 * period.
262 */
263 priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask,
264 IFX_WRITE_LOW_16BIT(mask));
265 priv->write_reg(priv, &priv->regs->ifregs[iface].com_req,
266 IFX_WRITE_LOW_16BIT(objno));
267
268 if (c_can_msg_obj_is_busy(priv, iface))
269 netdev_err(dev, "timed out in object get\n");
270}
271
272static inline void c_can_object_put(struct net_device *dev,
273 int iface, int objno, int mask)
274{
275 struct c_can_priv *priv = netdev_priv(dev);
276
277 /*
278 * As per specs, after writting the message object number in the
279 * IF command request register the transfer b/w interface
280 * register and message RAM must be complete in 6 CAN-CLK
281 * period.
282 */
283 priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask,
284 (IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask)));
285 priv->write_reg(priv, &priv->regs->ifregs[iface].com_req,
286 IFX_WRITE_LOW_16BIT(objno));
287
288 if (c_can_msg_obj_is_busy(priv, iface))
289 netdev_err(dev, "timed out in object put\n");
290}
291
292static void c_can_write_msg_object(struct net_device *dev,
293 int iface, struct can_frame *frame, int objno)
294{
295 int i;
296 u16 flags = 0;
297 unsigned int id;
298 struct c_can_priv *priv = netdev_priv(dev);
299
300 if (!(frame->can_id & CAN_RTR_FLAG))
301 flags |= IF_ARB_TRANSMIT;
302
303 if (frame->can_id & CAN_EFF_FLAG) {
304 id = frame->can_id & CAN_EFF_MASK;
305 flags |= IF_ARB_MSGXTD;
306 } else
307 id = ((frame->can_id & CAN_SFF_MASK) << 18);
308
309 flags |= IF_ARB_MSGVAL;
310
311 priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
312 IFX_WRITE_LOW_16BIT(id));
313 priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, flags |
314 IFX_WRITE_HIGH_16BIT(id));
315
316 for (i = 0; i < frame->can_dlc; i += 2) {
317 priv->write_reg(priv, &priv->regs->ifregs[iface].data[i / 2],
318 frame->data[i] | (frame->data[i + 1] << 8));
319 }
320
321 /* enable interrupt for this message object */
322 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
323 IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB |
324 frame->can_dlc);
325 c_can_object_put(dev, iface, objno, IF_COMM_ALL);
326}
327
328static inline void c_can_mark_rx_msg_obj(struct net_device *dev,
329 int iface, int ctrl_mask,
330 int obj)
331{
332 struct c_can_priv *priv = netdev_priv(dev);
333
334 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
335 ctrl_mask & ~(IF_MCONT_MSGLST | IF_MCONT_INTPND));
336 c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
337
338}
339
340static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
341 int iface,
342 int ctrl_mask)
343{
344 int i;
345 struct c_can_priv *priv = netdev_priv(dev);
346
347 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) {
348 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
349 ctrl_mask & ~(IF_MCONT_MSGLST |
350 IF_MCONT_INTPND | IF_MCONT_NEWDAT));
351 c_can_object_put(dev, iface, i, IF_COMM_CONTROL);
352 }
353}
354
355static inline void c_can_activate_rx_msg_obj(struct net_device *dev,
356 int iface, int ctrl_mask,
357 int obj)
358{
359 struct c_can_priv *priv = netdev_priv(dev);
360
361 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
362 ctrl_mask & ~(IF_MCONT_MSGLST |
363 IF_MCONT_INTPND | IF_MCONT_NEWDAT));
364 c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
365}
366
367static void c_can_handle_lost_msg_obj(struct net_device *dev,
368 int iface, int objno)
369{
370 struct c_can_priv *priv = netdev_priv(dev);
371 struct net_device_stats *stats = &dev->stats;
372 struct sk_buff *skb;
373 struct can_frame *frame;
374
375 netdev_err(dev, "msg lost in buffer %d\n", objno);
376
377 c_can_object_get(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
378
379 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
380 IF_MCONT_CLR_MSGLST);
381
382 c_can_object_put(dev, 0, objno, IF_COMM_CONTROL);
383
384 /* create an error msg */
385 skb = alloc_can_err_skb(dev, &frame);
386 if (unlikely(!skb))
387 return;
388
389 frame->can_id |= CAN_ERR_CRTL;
390 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
391 stats->rx_errors++;
392 stats->rx_over_errors++;
393
394 netif_receive_skb(skb);
395}
396
397static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
398{
399 u16 flags, data;
400 int i;
401 unsigned int val;
402 struct c_can_priv *priv = netdev_priv(dev);
403 struct net_device_stats *stats = &dev->stats;
404 struct sk_buff *skb;
405 struct can_frame *frame;
406
407 skb = alloc_can_skb(dev, &frame);
408 if (!skb) {
409 stats->rx_dropped++;
410 return -ENOMEM;
411 }
412
413 frame->can_dlc = get_can_dlc(ctrl & 0x0F);
414
415 flags = priv->read_reg(priv, &priv->regs->ifregs[iface].arb2);
416 val = priv->read_reg(priv, &priv->regs->ifregs[iface].arb1) |
417 (flags << 16);
418
419 if (flags & IF_ARB_MSGXTD)
420 frame->can_id = (val & CAN_EFF_MASK) | CAN_EFF_FLAG;
421 else
422 frame->can_id = (val >> 18) & CAN_SFF_MASK;
423
424 if (flags & IF_ARB_TRANSMIT)
425 frame->can_id |= CAN_RTR_FLAG;
426 else {
427 for (i = 0; i < frame->can_dlc; i += 2) {
428 data = priv->read_reg(priv,
429 &priv->regs->ifregs[iface].data[i / 2]);
430 frame->data[i] = data;
431 frame->data[i + 1] = data >> 8;
432 }
433 }
434
435 netif_receive_skb(skb);
436
437 stats->rx_packets++;
438 stats->rx_bytes += frame->can_dlc;
439
440 return 0;
441}
442
443static void c_can_setup_receive_object(struct net_device *dev, int iface,
444 int objno, unsigned int mask,
445 unsigned int id, unsigned int mcont)
446{
447 struct c_can_priv *priv = netdev_priv(dev);
448
449 priv->write_reg(priv, &priv->regs->ifregs[iface].mask1,
450 IFX_WRITE_LOW_16BIT(mask));
451 priv->write_reg(priv, &priv->regs->ifregs[iface].mask2,
452 IFX_WRITE_HIGH_16BIT(mask));
453
454 priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
455 IFX_WRITE_LOW_16BIT(id));
456 priv->write_reg(priv, &priv->regs->ifregs[iface].arb2,
457 (IF_ARB_MSGVAL | IFX_WRITE_HIGH_16BIT(id)));
458
459 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, mcont);
460 c_can_object_put(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
461
462 netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
463 c_can_read_reg32(priv, &priv->regs->msgval1));
464}
465
466static void c_can_inval_msg_object(struct net_device *dev, int iface, int objno)
467{
468 struct c_can_priv *priv = netdev_priv(dev);
469
470 priv->write_reg(priv, &priv->regs->ifregs[iface].arb1, 0);
471 priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, 0);
472 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, 0);
473
474 c_can_object_put(dev, iface, objno, IF_COMM_ARB | IF_COMM_CONTROL);
475
476 netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
477 c_can_read_reg32(priv, &priv->regs->msgval1));
478}
479
480static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno)
481{
482 int val = c_can_read_reg32(priv, &priv->regs->txrqst1);
483
484 /*
485 * as transmission request register's bit n-1 corresponds to
486 * message object n, we need to handle the same properly.
487 */
488 if (val & (1 << (objno - 1)))
489 return 1;
490
491 return 0;
492}
493
494static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
495 struct net_device *dev)
496{
497 u32 msg_obj_no;
498 struct c_can_priv *priv = netdev_priv(dev);
499 struct can_frame *frame = (struct can_frame *)skb->data;
500
501 if (can_dropped_invalid_skb(dev, skb))
502 return NETDEV_TX_OK;
503
504 msg_obj_no = get_tx_next_msg_obj(priv);
505
506 /* prepare message object for transmission */
507 c_can_write_msg_object(dev, 0, frame, msg_obj_no);
508 can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
509
510 /*
511 * we have to stop the queue in case of a wrap around or
512 * if the next TX message object is still in use
513 */
514 priv->tx_next++;
515 if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) ||
516 (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0)
517 netif_stop_queue(dev);
518
519 return NETDEV_TX_OK;
520}
521
522static int c_can_set_bittiming(struct net_device *dev)
523{
524 unsigned int reg_btr, reg_brpe, ctrl_save;
525 u8 brp, brpe, sjw, tseg1, tseg2;
526 u32 ten_bit_brp;
527 struct c_can_priv *priv = netdev_priv(dev);
528 const struct can_bittiming *bt = &priv->can.bittiming;
529
530 /* c_can provides a 6-bit brp and 4-bit brpe fields */
531 ten_bit_brp = bt->brp - 1;
532 brp = ten_bit_brp & BTR_BRP_MASK;
533 brpe = ten_bit_brp >> 6;
534
535 sjw = bt->sjw - 1;
536 tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
537 tseg2 = bt->phase_seg2 - 1;
538 reg_btr = brp | (sjw << BTR_SJW_SHIFT) | (tseg1 << BTR_TSEG1_SHIFT) |
539 (tseg2 << BTR_TSEG2_SHIFT);
540 reg_brpe = brpe & BRP_EXT_BRPE_MASK;
541
542 netdev_info(dev,
543 "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe);
544
545 ctrl_save = priv->read_reg(priv, &priv->regs->control);
546 priv->write_reg(priv, &priv->regs->control,
547 ctrl_save | CONTROL_CCE | CONTROL_INIT);
548 priv->write_reg(priv, &priv->regs->btr, reg_btr);
549 priv->write_reg(priv, &priv->regs->brp_ext, reg_brpe);
550 priv->write_reg(priv, &priv->regs->control, ctrl_save);
551
552 return 0;
553}
554
555/*
556 * Configure C_CAN message objects for Tx and Rx purposes:
557 * C_CAN provides a total of 32 message objects that can be configured
558 * either for Tx or Rx purposes. Here the first 16 message objects are used as
559 * a reception FIFO. The end of reception FIFO is signified by the EoB bit
560 * being SET. The remaining 16 message objects are kept aside for Tx purposes.
561 * See user guide document for further details on configuring message
562 * objects.
563 */
564static void c_can_configure_msg_objects(struct net_device *dev)
565{
566 int i;
567
568 /* first invalidate all message objects */
569 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_NO_OF_OBJECTS; i++)
570 c_can_inval_msg_object(dev, 0, i);
571
572 /* setup receive message objects */
573 for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
574 c_can_setup_receive_object(dev, 0, i, 0, 0,
575 (IF_MCONT_RXIE | IF_MCONT_UMASK) & ~IF_MCONT_EOB);
576
577 c_can_setup_receive_object(dev, 0, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
578 IF_MCONT_EOB | IF_MCONT_RXIE | IF_MCONT_UMASK);
579}
580
581/*
582 * Configure C_CAN chip:
583 * - enable/disable auto-retransmission
584 * - set operating mode
585 * - configure message objects
586 */
587static void c_can_chip_config(struct net_device *dev)
588{
589 struct c_can_priv *priv = netdev_priv(dev);
590
591 if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
592 /* disable automatic retransmission */
593 priv->write_reg(priv, &priv->regs->control,
594 CONTROL_DISABLE_AR);
595 else
596 /* enable automatic retransmission */
597 priv->write_reg(priv, &priv->regs->control,
598 CONTROL_ENABLE_AR);
599
600 if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY &
601 CAN_CTRLMODE_LOOPBACK)) {
602 /* loopback + silent mode : useful for hot self-test */
603 priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
604 CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
605 priv->write_reg(priv, &priv->regs->test,
606 TEST_LBACK | TEST_SILENT);
607 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
608 /* loopback mode : useful for self-test function */
609 priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
610 CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
611 priv->write_reg(priv, &priv->regs->test, TEST_LBACK);
612 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
613 /* silent mode : bus-monitoring mode */
614 priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
615 CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
616 priv->write_reg(priv, &priv->regs->test, TEST_SILENT);
617 } else
618 /* normal mode*/
619 priv->write_reg(priv, &priv->regs->control,
620 CONTROL_EIE | CONTROL_SIE | CONTROL_IE);
621
622 /* configure message objects */
623 c_can_configure_msg_objects(dev);
624
625 /* set a `lec` value so that we can check for updates later */
626 priv->write_reg(priv, &priv->regs->status, LEC_UNUSED);
627
628 /* set bittiming params */
629 c_can_set_bittiming(dev);
630}
631
632static void c_can_start(struct net_device *dev)
633{
634 struct c_can_priv *priv = netdev_priv(dev);
635
636 /* enable status change, error and module interrupts */
637 c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
638
639 /* basic c_can configuration */
640 c_can_chip_config(dev);
641
642 priv->can.state = CAN_STATE_ERROR_ACTIVE;
643
644 /* reset tx helper pointers */
645 priv->tx_next = priv->tx_echo = 0;
646}
647
648static void c_can_stop(struct net_device *dev)
649{
650 struct c_can_priv *priv = netdev_priv(dev);
651
652 /* disable all interrupts */
653 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
654
655 /* set the state as STOPPED */
656 priv->can.state = CAN_STATE_STOPPED;
657}
658
659static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
660{
661 switch (mode) {
662 case CAN_MODE_START:
663 c_can_start(dev);
664 netif_wake_queue(dev);
665 break;
666 default:
667 return -EOPNOTSUPP;
668 }
669
670 return 0;
671}
672
673static int c_can_get_berr_counter(const struct net_device *dev,
674 struct can_berr_counter *bec)
675{
676 unsigned int reg_err_counter;
677 struct c_can_priv *priv = netdev_priv(dev);
678
679 reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt);
680 bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
681 ERR_CNT_REC_SHIFT;
682 bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
683
684 return 0;
685}
686
687/*
688 * theory of operation:
689 *
690 * priv->tx_echo holds the number of the oldest can_frame put for
691 * transmission into the hardware, but not yet ACKed by the CAN tx
692 * complete IRQ.
693 *
694 * We iterate from priv->tx_echo to priv->tx_next and check if the
695 * packet has been transmitted, echo it back to the CAN framework.
696 * If we discover a not yet transmitted package, stop looking for more.
697 */
698static void c_can_do_tx(struct net_device *dev)
699{
700 u32 val;
701 u32 msg_obj_no;
702 struct c_can_priv *priv = netdev_priv(dev);
703 struct net_device_stats *stats = &dev->stats;
704
705 for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
706 msg_obj_no = get_tx_echo_msg_obj(priv);
707 c_can_inval_msg_object(dev, 0, msg_obj_no);
708 val = c_can_read_reg32(priv, &priv->regs->txrqst1);
709 if (!(val & (1 << msg_obj_no))) {
710 can_get_echo_skb(dev,
711 msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
712 stats->tx_bytes += priv->read_reg(priv,
713 &priv->regs->ifregs[0].msg_cntrl)
714 & IF_MCONT_DLC_MASK;
715 stats->tx_packets++;
716 }
717 }
718
719 /* restart queue if wrap-up or if queue stalled on last pkt */
720 if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) ||
721 ((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0))
722 netif_wake_queue(dev);
723}
724
725/*
726 * theory of operation:
727 *
728 * c_can core saves a received CAN message into the first free message
729 * object it finds free (starting with the lowest). Bits NEWDAT and
730 * INTPND are set for this message object indicating that a new message
731 * has arrived. To work-around this issue, we keep two groups of message
732 * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
733 *
734 * To ensure in-order frame reception we use the following
735 * approach while re-activating a message object to receive further
736 * frames:
737 * - if the current message object number is lower than
738 * C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
739 * the INTPND bit.
740 * - if the current message object number is equal to
741 * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
742 * receive message objects.
743 * - if the current message object number is greater than
744 * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
745 * only this message object.
746 */
747static int c_can_do_rx_poll(struct net_device *dev, int quota)
748{
749 u32 num_rx_pkts = 0;
750 unsigned int msg_obj, msg_ctrl_save;
751 struct c_can_priv *priv = netdev_priv(dev);
752 u32 val = c_can_read_reg32(priv, &priv->regs->intpnd1);
753
754 for (msg_obj = C_CAN_MSG_OBJ_RX_FIRST;
755 msg_obj <= C_CAN_MSG_OBJ_RX_LAST && quota > 0;
756 val = c_can_read_reg32(priv, &priv->regs->intpnd1),
757 msg_obj++) {
758 /*
759 * as interrupt pending register's bit n-1 corresponds to
760 * message object n, we need to handle the same properly.
761 */
762 if (val & (1 << (msg_obj - 1))) {
763 c_can_object_get(dev, 0, msg_obj, IF_COMM_ALL &
764 ~IF_COMM_TXRQST);
765 msg_ctrl_save = priv->read_reg(priv,
766 &priv->regs->ifregs[0].msg_cntrl);
767
768 if (msg_ctrl_save & IF_MCONT_EOB)
769 return num_rx_pkts;
770
771 if (msg_ctrl_save & IF_MCONT_MSGLST) {
772 c_can_handle_lost_msg_obj(dev, 0, msg_obj);
773 num_rx_pkts++;
774 quota--;
775 continue;
776 }
777
778 if (!(msg_ctrl_save & IF_MCONT_NEWDAT))
779 continue;
780
781 /* read the data from the message object */
782 c_can_read_msg_object(dev, 0, msg_ctrl_save);
783
784 if (msg_obj < C_CAN_MSG_RX_LOW_LAST)
785 c_can_mark_rx_msg_obj(dev, 0,
786 msg_ctrl_save, msg_obj);
787 else if (msg_obj > C_CAN_MSG_RX_LOW_LAST)
788 /* activate this msg obj */
789 c_can_activate_rx_msg_obj(dev, 0,
790 msg_ctrl_save, msg_obj);
791 else if (msg_obj == C_CAN_MSG_RX_LOW_LAST)
792 /* activate all lower message objects */
793 c_can_activate_all_lower_rx_msg_obj(dev,
794 0, msg_ctrl_save);
795
796 num_rx_pkts++;
797 quota--;
798 }
799 }
800
801 return num_rx_pkts;
802}
803
804static inline int c_can_has_and_handle_berr(struct c_can_priv *priv)
805{
806 return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
807 (priv->current_status & LEC_UNUSED);
808}
809
810static int c_can_handle_state_change(struct net_device *dev,
811 enum c_can_bus_error_types error_type)
812{
813 unsigned int reg_err_counter;
814 unsigned int rx_err_passive;
815 struct c_can_priv *priv = netdev_priv(dev);
816 struct net_device_stats *stats = &dev->stats;
817 struct can_frame *cf;
818 struct sk_buff *skb;
819 struct can_berr_counter bec;
820
821 /* propogate the error condition to the CAN stack */
822 skb = alloc_can_err_skb(dev, &cf);
823 if (unlikely(!skb))
824 return 0;
825
826 c_can_get_berr_counter(dev, &bec);
827 reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt);
828 rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
829 ERR_CNT_RP_SHIFT;
830
831 switch (error_type) {
832 case C_CAN_ERROR_WARNING:
833 /* error warning state */
834 priv->can.can_stats.error_warning++;
835 priv->can.state = CAN_STATE_ERROR_WARNING;
836 cf->can_id |= CAN_ERR_CRTL;
837 cf->data[1] = (bec.txerr > bec.rxerr) ?
838 CAN_ERR_CRTL_TX_WARNING :
839 CAN_ERR_CRTL_RX_WARNING;
840 cf->data[6] = bec.txerr;
841 cf->data[7] = bec.rxerr;
842
843 break;
844 case C_CAN_ERROR_PASSIVE:
845 /* error passive state */
846 priv->can.can_stats.error_passive++;
847 priv->can.state = CAN_STATE_ERROR_PASSIVE;
848 cf->can_id |= CAN_ERR_CRTL;
849 if (rx_err_passive)
850 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
851 if (bec.txerr > 127)
852 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
853
854 cf->data[6] = bec.txerr;
855 cf->data[7] = bec.rxerr;
856 break;
857 case C_CAN_BUS_OFF:
858 /* bus-off state */
859 priv->can.state = CAN_STATE_BUS_OFF;
860 cf->can_id |= CAN_ERR_BUSOFF;
861 /*
862 * disable all interrupts in bus-off mode to ensure that
863 * the CPU is not hogged down
864 */
865 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
866 can_bus_off(dev);
867 break;
868 default:
869 break;
870 }
871
872 netif_receive_skb(skb);
873 stats->rx_packets++;
874 stats->rx_bytes += cf->can_dlc;
875
876 return 1;
877}
878
879static int c_can_handle_bus_err(struct net_device *dev,
880 enum c_can_lec_type lec_type)
881{
882 struct c_can_priv *priv = netdev_priv(dev);
883 struct net_device_stats *stats = &dev->stats;
884 struct can_frame *cf;
885 struct sk_buff *skb;
886
887 /*
888 * early exit if no lec update or no error.
889 * no lec update means that no CAN bus event has been detected
890 * since CPU wrote 0x7 value to status reg.
891 */
892 if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR)
893 return 0;
894
895 /* propogate the error condition to the CAN stack */
896 skb = alloc_can_err_skb(dev, &cf);
897 if (unlikely(!skb))
898 return 0;
899
900 /*
901 * check for 'last error code' which tells us the
902 * type of the last error to occur on the CAN bus
903 */
904
905 /* common for all type of bus errors */
906 priv->can.can_stats.bus_error++;
907 stats->rx_errors++;
908 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
909 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
910
911 switch (lec_type) {
912 case LEC_STUFF_ERROR:
913 netdev_dbg(dev, "stuff error\n");
914 cf->data[2] |= CAN_ERR_PROT_STUFF;
915 break;
916 case LEC_FORM_ERROR:
917 netdev_dbg(dev, "form error\n");
918 cf->data[2] |= CAN_ERR_PROT_FORM;
919 break;
920 case LEC_ACK_ERROR:
921 netdev_dbg(dev, "ack error\n");
922 cf->data[2] |= (CAN_ERR_PROT_LOC_ACK |
923 CAN_ERR_PROT_LOC_ACK_DEL);
924 break;
925 case LEC_BIT1_ERROR:
926 netdev_dbg(dev, "bit1 error\n");
927 cf->data[2] |= CAN_ERR_PROT_BIT1;
928 break;
929 case LEC_BIT0_ERROR:
930 netdev_dbg(dev, "bit0 error\n");
931 cf->data[2] |= CAN_ERR_PROT_BIT0;
932 break;
933 case LEC_CRC_ERROR:
934 netdev_dbg(dev, "CRC error\n");
935 cf->data[2] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
936 CAN_ERR_PROT_LOC_CRC_DEL);
937 break;
938 default:
939 break;
940 }
941
942 /* set a `lec` value so that we can check for updates later */
943 priv->write_reg(priv, &priv->regs->status, LEC_UNUSED);
944
945 netif_receive_skb(skb);
946 stats->rx_packets++;
947 stats->rx_bytes += cf->can_dlc;
948
949 return 1;
950}
951
952static int c_can_poll(struct napi_struct *napi, int quota)
953{
954 u16 irqstatus;
955 int lec_type = 0;
956 int work_done = 0;
957 struct net_device *dev = napi->dev;
958 struct c_can_priv *priv = netdev_priv(dev);
959
960 irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
961 if (!irqstatus)
962 goto end;
963
964 /* status events have the highest priority */
965 if (irqstatus == STATUS_INTERRUPT) {
966 priv->current_status = priv->read_reg(priv,
967 &priv->regs->status);
968
969 /* handle Tx/Rx events */
970 if (priv->current_status & STATUS_TXOK)
971 priv->write_reg(priv, &priv->regs->status,
972 priv->current_status & ~STATUS_TXOK);
973
974 if (priv->current_status & STATUS_RXOK)
975 priv->write_reg(priv, &priv->regs->status,
976 priv->current_status & ~STATUS_RXOK);
977
978 /* handle state changes */
979 if ((priv->current_status & STATUS_EWARN) &&
980 (!(priv->last_status & STATUS_EWARN))) {
981 netdev_dbg(dev, "entered error warning state\n");
982 work_done += c_can_handle_state_change(dev,
983 C_CAN_ERROR_WARNING);
984 }
985 if ((priv->current_status & STATUS_EPASS) &&
986 (!(priv->last_status & STATUS_EPASS))) {
987 netdev_dbg(dev, "entered error passive state\n");
988 work_done += c_can_handle_state_change(dev,
989 C_CAN_ERROR_PASSIVE);
990 }
991 if ((priv->current_status & STATUS_BOFF) &&
992 (!(priv->last_status & STATUS_BOFF))) {
993 netdev_dbg(dev, "entered bus off state\n");
994 work_done += c_can_handle_state_change(dev,
995 C_CAN_BUS_OFF);
996 }
997
998 /* handle bus recovery events */
999 if ((!(priv->current_status & STATUS_BOFF)) &&
1000 (priv->last_status & STATUS_BOFF)) {
1001 netdev_dbg(dev, "left bus off state\n");
1002 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1003 }
1004 if ((!(priv->current_status & STATUS_EPASS)) &&
1005 (priv->last_status & STATUS_EPASS)) {
1006 netdev_dbg(dev, "left error passive state\n");
1007 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1008 }
1009
1010 priv->last_status = priv->current_status;
1011
1012 /* handle lec errors on the bus */
1013 lec_type = c_can_has_and_handle_berr(priv);
1014 if (lec_type)
1015 work_done += c_can_handle_bus_err(dev, lec_type);
1016 } else if ((irqstatus >= C_CAN_MSG_OBJ_RX_FIRST) &&
1017 (irqstatus <= C_CAN_MSG_OBJ_RX_LAST)) {
1018 /* handle events corresponding to receive message objects */
1019 work_done += c_can_do_rx_poll(dev, (quota - work_done));
1020 } else if ((irqstatus >= C_CAN_MSG_OBJ_TX_FIRST) &&
1021 (irqstatus <= C_CAN_MSG_OBJ_TX_LAST)) {
1022 /* handle events corresponding to transmit message objects */
1023 c_can_do_tx(dev);
1024 }
1025
1026end:
1027 if (work_done < quota) {
1028 napi_complete(napi);
1029 /* enable all IRQs */
1030 c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
1031 }
1032
1033 return work_done;
1034}
1035
1036static irqreturn_t c_can_isr(int irq, void *dev_id)
1037{
1038 u16 irqstatus;
1039 struct net_device *dev = (struct net_device *)dev_id;
1040 struct c_can_priv *priv = netdev_priv(dev);
1041
1042 irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
1043 if (!irqstatus)
1044 return IRQ_NONE;
1045
1046 /* disable all interrupts and schedule the NAPI */
1047 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
1048 napi_schedule(&priv->napi);
1049
1050 return IRQ_HANDLED;
1051}
1052
1053static int c_can_open(struct net_device *dev)
1054{
1055 int err;
1056 struct c_can_priv *priv = netdev_priv(dev);
1057
1058 /* open the can device */
1059 err = open_candev(dev);
1060 if (err) {
1061 netdev_err(dev, "failed to open can device\n");
1062 return err;
1063 }
1064
1065 /* register interrupt handler */
1066 err = request_irq(dev->irq, &c_can_isr, IRQF_SHARED, dev->name,
1067 dev);
1068 if (err < 0) {
1069 netdev_err(dev, "failed to request interrupt\n");
1070 goto exit_irq_fail;
1071 }
1072
1073 /* start the c_can controller */
1074 c_can_start(dev);
1075
1076 napi_enable(&priv->napi);
1077 netif_start_queue(dev);
1078
1079 return 0;
1080
1081exit_irq_fail:
1082 close_candev(dev);
1083 return err;
1084}
1085
1086static int c_can_close(struct net_device *dev)
1087{
1088 struct c_can_priv *priv = netdev_priv(dev);
1089
1090 netif_stop_queue(dev);
1091 napi_disable(&priv->napi);
1092 c_can_stop(dev);
1093 free_irq(dev->irq, dev);
1094 close_candev(dev);
1095
1096 return 0;
1097}
1098
1099struct net_device *alloc_c_can_dev(void)
1100{
1101 struct net_device *dev;
1102 struct c_can_priv *priv;
1103
1104 dev = alloc_candev(sizeof(struct c_can_priv), C_CAN_MSG_OBJ_TX_NUM);
1105 if (!dev)
1106 return NULL;
1107
1108 priv = netdev_priv(dev);
1109 netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
1110
1111 priv->dev = dev;
1112 priv->can.bittiming_const = &c_can_bittiming_const;
1113 priv->can.do_set_mode = c_can_set_mode;
1114 priv->can.do_get_berr_counter = c_can_get_berr_counter;
1115 priv->can.ctrlmode_supported = CAN_CTRLMODE_ONE_SHOT |
1116 CAN_CTRLMODE_LOOPBACK |
1117 CAN_CTRLMODE_LISTENONLY |
1118 CAN_CTRLMODE_BERR_REPORTING;
1119
1120 return dev;
1121}
1122EXPORT_SYMBOL_GPL(alloc_c_can_dev);
1123
1124void free_c_can_dev(struct net_device *dev)
1125{
1126 free_candev(dev);
1127}
1128EXPORT_SYMBOL_GPL(free_c_can_dev);
1129
1130static const struct net_device_ops c_can_netdev_ops = {
1131 .ndo_open = c_can_open,
1132 .ndo_stop = c_can_close,
1133 .ndo_start_xmit = c_can_start_xmit,
1134};
1135
1136int register_c_can_dev(struct net_device *dev)
1137{
1138 dev->flags |= IFF_ECHO; /* we support local echo */
1139 dev->netdev_ops = &c_can_netdev_ops;
1140
1141 return register_candev(dev);
1142}
1143EXPORT_SYMBOL_GPL(register_c_can_dev);
1144
1145void unregister_c_can_dev(struct net_device *dev)
1146{
1147 struct c_can_priv *priv = netdev_priv(dev);
1148
1149 /* disable all interrupts */
1150 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
1151
1152 unregister_candev(dev);
1153}
1154EXPORT_SYMBOL_GPL(unregister_c_can_dev);
1155
1156MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
1157MODULE_LICENSE("GPL v2");
1158MODULE_DESCRIPTION("CAN bus driver for Bosch C_CAN controller");
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
new file mode 100644
index 000000000000..9b7fbef3d09a
--- /dev/null
+++ b/drivers/net/can/c_can/c_can.h
@@ -0,0 +1,86 @@
1/*
2 * CAN bus driver for Bosch C_CAN controller
3 *
4 * Copyright (C) 2010 ST Microelectronics
5 * Bhupesh Sharma <bhupesh.sharma@st.com>
6 *
7 * Borrowed heavily from the C_CAN driver originally written by:
8 * Copyright (C) 2007
9 * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
10 * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
11 *
12 * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
13 * Bosch C_CAN user manual can be obtained from:
14 * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
15 * users_manual_c_can.pdf
16 *
17 * This file is licensed under the terms of the GNU General Public
18 * License version 2. This program is licensed "as is" without any
19 * warranty of any kind, whether express or implied.
20 */
21
22#ifndef C_CAN_H
23#define C_CAN_H
24
25/* c_can IF registers */
26struct c_can_if_regs {
27 u16 com_req;
28 u16 com_mask;
29 u16 mask1;
30 u16 mask2;
31 u16 arb1;
32 u16 arb2;
33 u16 msg_cntrl;
34 u16 data[4];
35 u16 _reserved[13];
36};
37
38/* c_can hardware registers */
39struct c_can_regs {
40 u16 control;
41 u16 status;
42 u16 err_cnt;
43 u16 btr;
44 u16 interrupt;
45 u16 test;
46 u16 brp_ext;
47 u16 _reserved1;
48 struct c_can_if_regs ifregs[2]; /* [0] = IF1 and [1] = IF2 */
49 u16 _reserved2[8];
50 u16 txrqst1;
51 u16 txrqst2;
52 u16 _reserved3[6];
53 u16 newdat1;
54 u16 newdat2;
55 u16 _reserved4[6];
56 u16 intpnd1;
57 u16 intpnd2;
58 u16 _reserved5[6];
59 u16 msgval1;
60 u16 msgval2;
61 u16 _reserved6[6];
62};
63
64/* c_can private data structure */
65struct c_can_priv {
66 struct can_priv can; /* must be the first member */
67 struct napi_struct napi;
68 struct net_device *dev;
69 int tx_object;
70 int current_status;
71 int last_status;
72 u16 (*read_reg) (struct c_can_priv *priv, void *reg);
73 void (*write_reg) (struct c_can_priv *priv, void *reg, u16 val);
74 struct c_can_regs __iomem *regs;
75 unsigned long irq_flags; /* for request_irq() */
76 unsigned int tx_next;
77 unsigned int tx_echo;
78 void *priv; /* for board-specific data */
79};
80
81struct net_device *alloc_c_can_dev(void);
82void free_c_can_dev(struct net_device *dev);
83int register_c_can_dev(struct net_device *dev);
84void unregister_c_can_dev(struct net_device *dev);
85
86#endif /* C_CAN_H */
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
new file mode 100644
index 000000000000..e629b961ae2d
--- /dev/null
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -0,0 +1,215 @@
1/*
2 * Platform CAN bus driver for Bosch C_CAN controller
3 *
4 * Copyright (C) 2010 ST Microelectronics
5 * Bhupesh Sharma <bhupesh.sharma@st.com>
6 *
7 * Borrowed heavily from the C_CAN driver originally written by:
8 * Copyright (C) 2007
9 * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
10 * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
11 *
12 * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
13 * Bosch C_CAN user manual can be obtained from:
14 * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
15 * users_manual_c_can.pdf
16 *
17 * This file is licensed under the terms of the GNU General Public
18 * License version 2. This program is licensed "as is" without any
19 * warranty of any kind, whether express or implied.
20 */
21
22#include <linux/kernel.h>
23#include <linux/version.h>
24#include <linux/module.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <linux/netdevice.h>
28#include <linux/if_arp.h>
29#include <linux/if_ether.h>
30#include <linux/list.h>
31#include <linux/delay.h>
32#include <linux/io.h>
33#include <linux/platform_device.h>
34#include <linux/clk.h>
35
36#include <linux/can/dev.h>
37
38#include "c_can.h"
39
40/*
41 * 16-bit c_can registers can be arranged differently in the memory
42 * architecture of different implementations. For example: 16-bit
43 * registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
44 * Handle the same by providing a common read/write interface.
45 */
46static u16 c_can_plat_read_reg_aligned_to_16bit(struct c_can_priv *priv,
47 void *reg)
48{
49 return readw(reg);
50}
51
52static void c_can_plat_write_reg_aligned_to_16bit(struct c_can_priv *priv,
53 void *reg, u16 val)
54{
55 writew(val, reg);
56}
57
58static u16 c_can_plat_read_reg_aligned_to_32bit(struct c_can_priv *priv,
59 void *reg)
60{
61 return readw(reg + (long)reg - (long)priv->regs);
62}
63
64static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
65 void *reg, u16 val)
66{
67 writew(val, reg + (long)reg - (long)priv->regs);
68}
69
70static int __devinit c_can_plat_probe(struct platform_device *pdev)
71{
72 int ret;
73 void __iomem *addr;
74 struct net_device *dev;
75 struct c_can_priv *priv;
76 struct resource *mem, *irq;
77#ifdef CONFIG_HAVE_CLK
78 struct clk *clk;
79
80 /* get the appropriate clk */
81 clk = clk_get(&pdev->dev, NULL);
82 if (IS_ERR(clk)) {
83 dev_err(&pdev->dev, "no clock defined\n");
84 ret = -ENODEV;
85 goto exit;
86 }
87#endif
88
89 /* get the platform data */
90 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
91 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
92 if (!mem || (irq <= 0)) {
93 ret = -ENODEV;
94 goto exit_free_clk;
95 }
96
97 if (!request_mem_region(mem->start, resource_size(mem),
98 KBUILD_MODNAME)) {
99 dev_err(&pdev->dev, "resource unavailable\n");
100 ret = -ENODEV;
101 goto exit_free_clk;
102 }
103
104 addr = ioremap(mem->start, resource_size(mem));
105 if (!addr) {
106 dev_err(&pdev->dev, "failed to map can port\n");
107 ret = -ENOMEM;
108 goto exit_release_mem;
109 }
110
111 /* allocate the c_can device */
112 dev = alloc_c_can_dev();
113 if (!dev) {
114 ret = -ENOMEM;
115 goto exit_iounmap;
116 }
117
118 priv = netdev_priv(dev);
119
120 dev->irq = irq->start;
121 priv->regs = addr;
122#ifdef CONFIG_HAVE_CLK
123 priv->can.clock.freq = clk_get_rate(clk);
124 priv->priv = clk;
125#endif
126
127 switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
128 case IORESOURCE_MEM_32BIT:
129 priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
130 priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
131 break;
132 case IORESOURCE_MEM_16BIT:
133 default:
134 priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
135 priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
136 break;
137 }
138
139 platform_set_drvdata(pdev, dev);
140 SET_NETDEV_DEV(dev, &pdev->dev);
141
142 ret = register_c_can_dev(dev);
143 if (ret) {
144 dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
145 KBUILD_MODNAME, ret);
146 goto exit_free_device;
147 }
148
149 dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
150 KBUILD_MODNAME, priv->regs, dev->irq);
151 return 0;
152
153exit_free_device:
154 platform_set_drvdata(pdev, NULL);
155 free_c_can_dev(dev);
156exit_iounmap:
157 iounmap(addr);
158exit_release_mem:
159 release_mem_region(mem->start, resource_size(mem));
160exit_free_clk:
161#ifdef CONFIG_HAVE_CLK
162 clk_put(clk);
163exit:
164#endif
165 dev_err(&pdev->dev, "probe failed\n");
166
167 return ret;
168}
169
170static int __devexit c_can_plat_remove(struct platform_device *pdev)
171{
172 struct net_device *dev = platform_get_drvdata(pdev);
173 struct c_can_priv *priv = netdev_priv(dev);
174 struct resource *mem;
175
176 unregister_c_can_dev(dev);
177 platform_set_drvdata(pdev, NULL);
178
179 free_c_can_dev(dev);
180 iounmap(priv->regs);
181
182 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
183 release_mem_region(mem->start, resource_size(mem));
184
185#ifdef CONFIG_HAVE_CLK
186 clk_put(priv->priv);
187#endif
188
189 return 0;
190}
191
192static struct platform_driver c_can_plat_driver = {
193 .driver = {
194 .name = KBUILD_MODNAME,
195 .owner = THIS_MODULE,
196 },
197 .probe = c_can_plat_probe,
198 .remove = __devexit_p(c_can_plat_remove),
199};
200
201static int __init c_can_plat_init(void)
202{
203 return platform_driver_register(&c_can_plat_driver);
204}
205module_init(c_can_plat_init);
206
207static void __exit c_can_plat_exit(void)
208{
209 platform_driver_unregister(&c_can_plat_driver);
210}
211module_exit(c_can_plat_exit);
212
213MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
214MODULE_LICENSE("GPL v2");
215MODULE_DESCRIPTION("Platform CAN bus driver for Bosch C_CAN controller");
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 05a52754f486..dc53c831ea95 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -659,7 +659,7 @@ failed:
659static void unlink_all_urbs(struct esd_usb2 *dev) 659static void unlink_all_urbs(struct esd_usb2 *dev)
660{ 660{
661 struct esd_usb2_net_priv *priv; 661 struct esd_usb2_net_priv *priv;
662 int i; 662 int i, j;
663 663
664 usb_kill_anchored_urbs(&dev->rx_submitted); 664 usb_kill_anchored_urbs(&dev->rx_submitted);
665 for (i = 0; i < dev->net_count; i++) { 665 for (i = 0; i < dev->net_count; i++) {
@@ -668,8 +668,8 @@ static void unlink_all_urbs(struct esd_usb2 *dev)
668 usb_kill_anchored_urbs(&priv->tx_submitted); 668 usb_kill_anchored_urbs(&priv->tx_submitted);
669 atomic_set(&priv->active_tx_jobs, 0); 669 atomic_set(&priv->active_tx_jobs, 0);
670 670
671 for (i = 0; i < MAX_TX_URBS; i++) 671 for (j = 0; j < MAX_TX_URBS; j++)
672 priv->tx_contexts[i].echo_index = MAX_TX_URBS; 672 priv->tx_contexts[j].echo_index = MAX_TX_URBS;
673 } 673 }
674 } 674 }
675} 675}
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 302be4aa69d6..8cca60e43444 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -65,7 +65,14 @@ static LIST_HEAD(cnic_udev_list);
65static DEFINE_RWLOCK(cnic_dev_lock); 65static DEFINE_RWLOCK(cnic_dev_lock);
66static DEFINE_MUTEX(cnic_lock); 66static DEFINE_MUTEX(cnic_lock);
67 67
68static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; 68static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
69
70/* helper function, assuming cnic_lock is held */
71static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
72{
73 return rcu_dereference_protected(cnic_ulp_tbl[type],
74 lockdep_is_held(&cnic_lock));
75}
69 76
70static int cnic_service_bnx2(void *, void *); 77static int cnic_service_bnx2(void *, void *);
71static int cnic_service_bnx2x(void *, void *); 78static int cnic_service_bnx2x(void *, void *);
@@ -435,7 +442,7 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
435 return -EINVAL; 442 return -EINVAL;
436 } 443 }
437 mutex_lock(&cnic_lock); 444 mutex_lock(&cnic_lock);
438 if (cnic_ulp_tbl[ulp_type]) { 445 if (cnic_ulp_tbl_prot(ulp_type)) {
439 pr_err("%s: Type %d has already been registered\n", 446 pr_err("%s: Type %d has already been registered\n",
440 __func__, ulp_type); 447 __func__, ulp_type);
441 mutex_unlock(&cnic_lock); 448 mutex_unlock(&cnic_lock);
@@ -478,7 +485,7 @@ int cnic_unregister_driver(int ulp_type)
478 return -EINVAL; 485 return -EINVAL;
479 } 486 }
480 mutex_lock(&cnic_lock); 487 mutex_lock(&cnic_lock);
481 ulp_ops = cnic_ulp_tbl[ulp_type]; 488 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
482 if (!ulp_ops) { 489 if (!ulp_ops) {
483 pr_err("%s: Type %d has not been registered\n", 490 pr_err("%s: Type %d has not been registered\n",
484 __func__, ulp_type); 491 __func__, ulp_type);
@@ -529,7 +536,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
529 return -EINVAL; 536 return -EINVAL;
530 } 537 }
531 mutex_lock(&cnic_lock); 538 mutex_lock(&cnic_lock);
532 if (cnic_ulp_tbl[ulp_type] == NULL) { 539 if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
533 pr_err("%s: Driver with type %d has not been registered\n", 540 pr_err("%s: Driver with type %d has not been registered\n",
534 __func__, ulp_type); 541 __func__, ulp_type);
535 mutex_unlock(&cnic_lock); 542 mutex_unlock(&cnic_lock);
@@ -544,7 +551,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
544 551
545 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]); 552 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
546 cp->ulp_handle[ulp_type] = ulp_ctx; 553 cp->ulp_handle[ulp_type] = ulp_ctx;
547 ulp_ops = cnic_ulp_tbl[ulp_type]; 554 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
548 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops); 555 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
549 cnic_hold(dev); 556 cnic_hold(dev);
550 557
@@ -2970,7 +2977,8 @@ static void cnic_ulp_stop(struct cnic_dev *dev)
2970 struct cnic_ulp_ops *ulp_ops; 2977 struct cnic_ulp_ops *ulp_ops;
2971 2978
2972 mutex_lock(&cnic_lock); 2979 mutex_lock(&cnic_lock);
2973 ulp_ops = cp->ulp_ops[if_type]; 2980 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
2981 lockdep_is_held(&cnic_lock));
2974 if (!ulp_ops) { 2982 if (!ulp_ops) {
2975 mutex_unlock(&cnic_lock); 2983 mutex_unlock(&cnic_lock);
2976 continue; 2984 continue;
@@ -2994,7 +3002,8 @@ static void cnic_ulp_start(struct cnic_dev *dev)
2994 struct cnic_ulp_ops *ulp_ops; 3002 struct cnic_ulp_ops *ulp_ops;
2995 3003
2996 mutex_lock(&cnic_lock); 3004 mutex_lock(&cnic_lock);
2997 ulp_ops = cp->ulp_ops[if_type]; 3005 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3006 lockdep_is_held(&cnic_lock));
2998 if (!ulp_ops || !ulp_ops->cnic_start) { 3007 if (!ulp_ops || !ulp_ops->cnic_start) {
2999 mutex_unlock(&cnic_lock); 3008 mutex_unlock(&cnic_lock);
3000 continue; 3009 continue;
@@ -3058,7 +3067,7 @@ static void cnic_ulp_init(struct cnic_dev *dev)
3058 struct cnic_ulp_ops *ulp_ops; 3067 struct cnic_ulp_ops *ulp_ops;
3059 3068
3060 mutex_lock(&cnic_lock); 3069 mutex_lock(&cnic_lock);
3061 ulp_ops = cnic_ulp_tbl[i]; 3070 ulp_ops = cnic_ulp_tbl_prot(i);
3062 if (!ulp_ops || !ulp_ops->cnic_init) { 3071 if (!ulp_ops || !ulp_ops->cnic_init) {
3063 mutex_unlock(&cnic_lock); 3072 mutex_unlock(&cnic_lock);
3064 continue; 3073 continue;
@@ -3082,7 +3091,7 @@ static void cnic_ulp_exit(struct cnic_dev *dev)
3082 struct cnic_ulp_ops *ulp_ops; 3091 struct cnic_ulp_ops *ulp_ops;
3083 3092
3084 mutex_lock(&cnic_lock); 3093 mutex_lock(&cnic_lock);
3085 ulp_ops = cnic_ulp_tbl[i]; 3094 ulp_ops = cnic_ulp_tbl_prot(i);
3086 if (!ulp_ops || !ulp_ops->cnic_exit) { 3095 if (!ulp_ops || !ulp_ops->cnic_exit) {
3087 mutex_unlock(&cnic_lock); 3096 mutex_unlock(&cnic_lock);
3088 continue; 3097 continue;
@@ -3398,17 +3407,14 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3398 struct dst_entry **dst) 3407 struct dst_entry **dst)
3399{ 3408{
3400#if defined(CONFIG_INET) 3409#if defined(CONFIG_INET)
3401 struct flowi fl;
3402 int err;
3403 struct rtable *rt; 3410 struct rtable *rt;
3404 3411
3405 memset(&fl, 0, sizeof(fl)); 3412 rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
3406 fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr; 3413 if (!IS_ERR(rt)) {
3407
3408 err = ip_route_output_key(&init_net, &rt, &fl);
3409 if (!err)
3410 *dst = &rt->dst; 3414 *dst = &rt->dst;
3411 return err; 3415 return 0;
3416 }
3417 return PTR_ERR(rt);
3412#else 3418#else
3413 return -ENETUNREACH; 3419 return -ENETUNREACH;
3414#endif 3420#endif
@@ -3418,14 +3424,14 @@ static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
3418 struct dst_entry **dst) 3424 struct dst_entry **dst)
3419{ 3425{
3420#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE)) 3426#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
3421 struct flowi fl; 3427 struct flowi6 fl6;
3422 3428
3423 memset(&fl, 0, sizeof(fl)); 3429 memset(&fl6, 0, sizeof(fl6));
3424 ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr); 3430 ipv6_addr_copy(&fl6.daddr, &dst_addr->sin6_addr);
3425 if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL) 3431 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
3426 fl.oif = dst_addr->sin6_scope_id; 3432 fl6.flowi6_oif = dst_addr->sin6_scope_id;
3427 3433
3428 *dst = ip6_route_output(&init_net, NULL, &fl); 3434 *dst = ip6_route_output(&init_net, NULL, &fl6);
3429 if (*dst) 3435 if (*dst)
3430 return 0; 3436 return 0;
3431#endif 3437#endif
@@ -4187,6 +4193,14 @@ static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4187 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); 4193 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
4188} 4194}
4189 4195
4196static void cnic_get_bnx2_iscsi_info(struct cnic_dev *dev)
4197{
4198 u32 max_conn;
4199
4200 max_conn = cnic_reg_rd_ind(dev, BNX2_FW_MAX_ISCSI_CONN);
4201 dev->max_iscsi_conn = max_conn;
4202}
4203
4190static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev) 4204static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4191{ 4205{
4192 struct cnic_local *cp = dev->cnic_priv; 4206 struct cnic_local *cp = dev->cnic_priv;
@@ -4511,6 +4525,8 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4511 return err; 4525 return err;
4512 } 4526 }
4513 4527
4528 cnic_get_bnx2_iscsi_info(dev);
4529
4514 return 0; 4530 return 0;
4515} 4531}
4516 4532
@@ -4722,129 +4738,6 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4722 cp->rx_cons = *cp->rx_cons_ptr; 4738 cp->rx_cons = *cp->rx_cons_ptr;
4723} 4739}
4724 4740
4725static int cnic_read_bnx2x_iscsi_mac(struct cnic_dev *dev, u32 upper_addr,
4726 u32 lower_addr)
4727{
4728 u32 val;
4729 u8 mac[6];
4730
4731 val = CNIC_RD(dev, upper_addr);
4732
4733 mac[0] = (u8) (val >> 8);
4734 mac[1] = (u8) val;
4735
4736 val = CNIC_RD(dev, lower_addr);
4737
4738 mac[2] = (u8) (val >> 24);
4739 mac[3] = (u8) (val >> 16);
4740 mac[4] = (u8) (val >> 8);
4741 mac[5] = (u8) val;
4742
4743 if (is_valid_ether_addr(mac)) {
4744 memcpy(dev->mac_addr, mac, 6);
4745 return 0;
4746 } else {
4747 return -EINVAL;
4748 }
4749}
4750
4751static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
4752{
4753 struct cnic_local *cp = dev->cnic_priv;
4754 u32 base, base2, addr, addr1, val;
4755 int port = CNIC_PORT(cp);
4756
4757 dev->max_iscsi_conn = 0;
4758 base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR);
4759 if (base == 0)
4760 return;
4761
4762 base2 = CNIC_RD(dev, (CNIC_PATH(cp) ? MISC_REG_GENERIC_CR_1 :
4763 MISC_REG_GENERIC_CR_0));
4764 addr = BNX2X_SHMEM_ADDR(base,
4765 dev_info.port_hw_config[port].iscsi_mac_upper);
4766
4767 addr1 = BNX2X_SHMEM_ADDR(base,
4768 dev_info.port_hw_config[port].iscsi_mac_lower);
4769
4770 cnic_read_bnx2x_iscsi_mac(dev, addr, addr1);
4771
4772 addr = BNX2X_SHMEM_ADDR(base, validity_map[port]);
4773 val = CNIC_RD(dev, addr);
4774
4775 if (!(val & SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT)) {
4776 u16 val16;
4777
4778 addr = BNX2X_SHMEM_ADDR(base,
4779 drv_lic_key[port].max_iscsi_init_conn);
4780 val16 = CNIC_RD16(dev, addr);
4781
4782 if (val16)
4783 val16 ^= 0x1e1e;
4784 dev->max_iscsi_conn = val16;
4785 }
4786
4787 if (BNX2X_CHIP_IS_E2(cp->chip_id))
4788 dev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
4789
4790 if (BNX2X_CHIP_IS_E1H(cp->chip_id) || BNX2X_CHIP_IS_E2(cp->chip_id)) {
4791 int func = CNIC_FUNC(cp);
4792 u32 mf_cfg_addr;
4793
4794 if (BNX2X_SHMEM2_HAS(base2, mf_cfg_addr))
4795 mf_cfg_addr = CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base2,
4796 mf_cfg_addr));
4797 else
4798 mf_cfg_addr = base + BNX2X_SHMEM_MF_BLK_OFFSET;
4799
4800 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4801 /* Must determine if the MF is SD vs SI mode */
4802 addr = BNX2X_SHMEM_ADDR(base,
4803 dev_info.shared_feature_config.config);
4804 val = CNIC_RD(dev, addr);
4805 if ((val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) ==
4806 SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT) {
4807 int rc;
4808
4809 /* MULTI_FUNCTION_SI mode */
4810 addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4811 func_ext_config[func].func_cfg);
4812 val = CNIC_RD(dev, addr);
4813 if (!(val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD))
4814 dev->max_iscsi_conn = 0;
4815
4816 if (!(val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
4817 dev->max_fcoe_conn = 0;
4818
4819 addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4820 func_ext_config[func].
4821 iscsi_mac_addr_upper);
4822 addr1 = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4823 func_ext_config[func].
4824 iscsi_mac_addr_lower);
4825 rc = cnic_read_bnx2x_iscsi_mac(dev, addr,
4826 addr1);
4827 if (rc && func > 1)
4828 dev->max_iscsi_conn = 0;
4829
4830 return;
4831 }
4832 }
4833
4834 addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4835 func_mf_config[func].e1hov_tag);
4836
4837 val = CNIC_RD(dev, addr);
4838 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
4839 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
4840 dev->max_fcoe_conn = 0;
4841 dev->max_iscsi_conn = 0;
4842 }
4843 }
4844 if (!is_valid_ether_addr(dev->mac_addr))
4845 dev->max_iscsi_conn = 0;
4846}
4847
4848static void cnic_init_bnx2x_kcq(struct cnic_dev *dev) 4741static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
4849{ 4742{
4850 struct cnic_local *cp = dev->cnic_priv; 4743 struct cnic_local *cp = dev->cnic_priv;
@@ -4926,8 +4819,6 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4926 4819
4927 cnic_init_bnx2x_kcq(dev); 4820 cnic_init_bnx2x_kcq(dev);
4928 4821
4929 cnic_get_bnx2x_iscsi_info(dev);
4930
4931 /* Only 1 EQ */ 4822 /* Only 1 EQ */
4932 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX); 4823 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
4933 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4824 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
@@ -5281,15 +5172,11 @@ static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5281 5172
5282 dev_hold(dev); 5173 dev_hold(dev);
5283 pci_dev_get(pdev); 5174 pci_dev_get(pdev);
5284 if (pdev->device == PCI_DEVICE_ID_NX2_5709 || 5175 if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
5285 pdev->device == PCI_DEVICE_ID_NX2_5709S) { 5176 pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
5286 u8 rev; 5177 (pdev->revision < 0x10)) {
5287 5178 pci_dev_put(pdev);
5288 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); 5179 goto cnic_err;
5289 if (rev < 0x10) {
5290 pci_dev_put(pdev);
5291 goto cnic_err;
5292 }
5293 } 5180 }
5294 pci_dev_put(pdev); 5181 pci_dev_put(pdev);
5295 5182
@@ -5360,6 +5247,14 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5360 cdev->pcidev = pdev; 5247 cdev->pcidev = pdev;
5361 cp->chip_id = ethdev->chip_id; 5248 cp->chip_id = ethdev->chip_id;
5362 5249
5250 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5251 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5252 if (BNX2X_CHIP_IS_E2(cp->chip_id) &&
5253 !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
5254 cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
5255
5256 memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
5257
5363 cp->cnic_ops = &cnic_bnx2x_ops; 5258 cp->cnic_ops = &cnic_bnx2x_ops;
5364 cp->start_hw = cnic_start_bnx2x_hw; 5259 cp->start_hw = cnic_start_bnx2x_hw;
5365 cp->stop_hw = cnic_stop_bnx2x_hw; 5260 cp->stop_hw = cnic_stop_bnx2x_hw;
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index b328f6c924c3..4456260c653c 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -220,7 +220,7 @@ struct cnic_local {
220#define ULP_F_INIT 0 220#define ULP_F_INIT 0
221#define ULP_F_START 1 221#define ULP_F_START 1
222#define ULP_F_CALL_PENDING 2 222#define ULP_F_CALL_PENDING 2
223 struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE]; 223 struct cnic_ulp_ops __rcu *ulp_ops[MAX_CNIC_ULP_TYPE];
224 224
225 unsigned long cnic_local_flags; 225 unsigned long cnic_local_flags;
226#define CNIC_LCL_FL_KWQ_INIT 0x0 226#define CNIC_LCL_FL_KWQ_INIT 0x0
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index 9f44e0ffe003..e01b49ee3591 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
12#ifndef CNIC_IF_H 12#ifndef CNIC_IF_H
13#define CNIC_IF_H 13#define CNIC_IF_H
14 14
15#define CNIC_MODULE_VERSION "2.2.12" 15#define CNIC_MODULE_VERSION "2.2.13"
16#define CNIC_MODULE_RELDATE "Jan 03, 2011" 16#define CNIC_MODULE_RELDATE "Jan 31, 2011"
17 17
18#define CNIC_ULP_RDMA 0 18#define CNIC_ULP_RDMA 0
19#define CNIC_ULP_ISCSI 1 19#define CNIC_ULP_ISCSI 1
@@ -159,6 +159,9 @@ struct cnic_eth_dev {
159 u32 drv_state; 159 u32 drv_state;
160#define CNIC_DRV_STATE_REGD 0x00000001 160#define CNIC_DRV_STATE_REGD 0x00000001
161#define CNIC_DRV_STATE_USING_MSIX 0x00000002 161#define CNIC_DRV_STATE_USING_MSIX 0x00000002
162#define CNIC_DRV_STATE_NO_ISCSI_OOO 0x00000004
163#define CNIC_DRV_STATE_NO_ISCSI 0x00000008
164#define CNIC_DRV_STATE_NO_FCOE 0x00000010
162 u32 chip_id; 165 u32 chip_id;
163 u32 max_kwqe_pending; 166 u32 max_kwqe_pending;
164 struct pci_dev *pdev; 167 struct pci_dev *pdev;
@@ -176,6 +179,7 @@ struct cnic_eth_dev {
176 u32 fcoe_init_cid; 179 u32 fcoe_init_cid;
177 u16 iscsi_l2_client_id; 180 u16 iscsi_l2_client_id;
178 u16 iscsi_l2_cid; 181 u16 iscsi_l2_cid;
182 u8 iscsi_mac[ETH_ALEN];
179 183
180 int num_irq; 184 int num_irq;
181 struct cnic_irq irq_arr[MAX_CNIC_VEC]; 185 struct cnic_irq irq_arr[MAX_CNIC_VEC];
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index d325e01a53e0..537a4b2e2020 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -95,6 +95,9 @@
95 Dmitry Pervushin : dpervushin@ru.mvista.com 95 Dmitry Pervushin : dpervushin@ru.mvista.com
96 : PNX010X platform support 96 : PNX010X platform support
97 97
98 Domenico Andreoli : cavokz@gmail.com
99 : QQ2440 platform support
100
98*/ 101*/
99 102
100/* Always include 'config.h' first in case the user wants to turn on 103/* Always include 'config.h' first in case the user wants to turn on
@@ -176,6 +179,10 @@ static unsigned int cs8900_irq_map[] = {IRQ_IXDP2351_CS8900, 0, 0, 0};
176#elif defined(CONFIG_ARCH_IXDP2X01) 179#elif defined(CONFIG_ARCH_IXDP2X01)
177static unsigned int netcard_portlist[] __used __initdata = {IXDP2X01_CS8900_VIRT_BASE, 0}; 180static unsigned int netcard_portlist[] __used __initdata = {IXDP2X01_CS8900_VIRT_BASE, 0};
178static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0}; 181static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0};
182#elif defined(CONFIG_MACH_QQ2440)
183#include <mach/qq2440.h>
184static unsigned int netcard_portlist[] __used __initdata = { QQ2440_CS8900_VIRT_BASE + 0x300, 0 };
185static unsigned int cs8900_irq_map[] = { QQ2440_CS8900_IRQ, 0, 0, 0 };
179#elif defined(CONFIG_MACH_MX31ADS) 186#elif defined(CONFIG_MACH_MX31ADS)
180#include <mach/board-mx31ads.h> 187#include <mach/board-mx31ads.h>
181static unsigned int netcard_portlist[] __used __initdata = { 188static unsigned int netcard_portlist[] __used __initdata = {
@@ -521,6 +528,10 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
521#endif 528#endif
522 lp->force = g_cs89x0_media__force; 529 lp->force = g_cs89x0_media__force;
523#endif 530#endif
531
532#if defined(CONFIG_MACH_QQ2440)
533 lp->force |= FORCE_RJ45 | FORCE_FULL;
534#endif
524 } 535 }
525 536
526 /* Grab the region so we can find another board if autoIRQ fails. */ 537 /* Grab the region so we can find another board if autoIRQ fails. */
@@ -943,10 +954,10 @@ skip_this_frame:
943static void __init reset_chip(struct net_device *dev) 954static void __init reset_chip(struct net_device *dev)
944{ 955{
945#if !defined(CONFIG_MACH_MX31ADS) 956#if !defined(CONFIG_MACH_MX31ADS)
946#if !defined(CONFIG_MACH_IXDP2351) && !defined(CONFIG_ARCH_IXDP2X01) 957#if !defined(CS89x0_NONISA_IRQ)
947 struct net_local *lp = netdev_priv(dev); 958 struct net_local *lp = netdev_priv(dev);
948 int ioaddr = dev->base_addr; 959 int ioaddr = dev->base_addr;
949#endif 960#endif /* CS89x0_NONISA_IRQ */
950 int reset_start_time; 961 int reset_start_time;
951 962
952 writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET); 963 writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET);
@@ -954,7 +965,7 @@ static void __init reset_chip(struct net_device *dev)
954 /* wait 30 ms */ 965 /* wait 30 ms */
955 msleep(30); 966 msleep(30);
956 967
957#if !defined(CONFIG_MACH_IXDP2351) && !defined(CONFIG_ARCH_IXDP2X01) 968#if !defined(CS89x0_NONISA_IRQ)
958 if (lp->chip_type != CS8900) { 969 if (lp->chip_type != CS8900) {
959 /* Hardware problem requires PNP registers to be reconfigured after a reset */ 970 /* Hardware problem requires PNP registers to be reconfigured after a reset */
960 writeword(ioaddr, ADD_PORT, PP_CS8920_ISAINT); 971 writeword(ioaddr, ADD_PORT, PP_CS8920_ISAINT);
@@ -965,7 +976,7 @@ static void __init reset_chip(struct net_device *dev)
965 outb((dev->mem_start >> 16) & 0xff, ioaddr + DATA_PORT); 976 outb((dev->mem_start >> 16) & 0xff, ioaddr + DATA_PORT);
966 outb((dev->mem_start >> 8) & 0xff, ioaddr + DATA_PORT + 1); 977 outb((dev->mem_start >> 8) & 0xff, ioaddr + DATA_PORT + 1);
967 } 978 }
968#endif /* IXDP2x01 */ 979#endif /* CS89x0_NONISA_IRQ */
969 980
970 /* Wait until the chip is reset */ 981 /* Wait until the chip is reset */
971 reset_start_time = jiffies; 982 reset_start_time = jiffies;
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index ef02aa68c926..862804f32b6e 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -186,9 +186,10 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
186 dev = NULL; 186 dev = NULL;
187 if (grp) 187 if (grp)
188 dev = vlan_group_get_device(grp, vlan); 188 dev = vlan_group_get_device(grp, vlan);
189 } else 189 } else if (netif_is_bond_slave(dev)) {
190 while (dev->master) 190 while (dev->master)
191 dev = dev->master; 191 dev = dev->master;
192 }
192 return dev; 193 return dev;
193 } 194 }
194 } 195 }
@@ -967,8 +968,6 @@ static int nb_callback(struct notifier_block *self, unsigned long event,
967 cxgb_neigh_update((struct neighbour *)ctx); 968 cxgb_neigh_update((struct neighbour *)ctx);
968 break; 969 break;
969 } 970 }
970 case (NETEVENT_PMTU_UPDATE):
971 break;
972 case (NETEVENT_REDIRECT):{ 971 case (NETEVENT_REDIRECT):{
973 struct netevent_redirect *nr = ctx; 972 struct netevent_redirect *nr = ctx;
974 cxgb_redirect(nr->old, nr->new); 973 cxgb_redirect(nr->old, nr->new);
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index ec35d458102c..5352c8a23f4d 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -2471,7 +2471,6 @@ static int netevent_cb(struct notifier_block *nb, unsigned long event,
2471 case NETEVENT_NEIGH_UPDATE: 2471 case NETEVENT_NEIGH_UPDATE:
2472 check_neigh_update(data); 2472 check_neigh_update(data);
2473 break; 2473 break;
2474 case NETEVENT_PMTU_UPDATE:
2475 case NETEVENT_REDIRECT: 2474 case NETEVENT_REDIRECT:
2476 default: 2475 default:
2477 break; 2476 break;
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 7018bfe408a4..082d6ea69920 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -1730,7 +1730,7 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
1730 emac_read(EMAC_TXCARRIERSENSE); 1730 emac_read(EMAC_TXCARRIERSENSE);
1731 emac_write(EMAC_TXCARRIERSENSE, stats_clear_mask); 1731 emac_write(EMAC_TXCARRIERSENSE, stats_clear_mask);
1732 1732
1733 ndev->stats.tx_fifo_errors = emac_read(EMAC_TXUNDERRUN); 1733 ndev->stats.tx_fifo_errors += emac_read(EMAC_TXUNDERRUN);
1734 emac_write(EMAC_TXUNDERRUN, stats_clear_mask); 1734 emac_write(EMAC_TXUNDERRUN, stats_clear_mask);
1735 1735
1736 return &ndev->stats; 1736 return &ndev->stats;
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 461dd6f905f7..317708113601 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -1593,10 +1593,15 @@ dm9000_probe(struct platform_device *pdev)
1593 ndev->dev_addr[i] = ior(db, i+DM9000_PAR); 1593 ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
1594 } 1594 }
1595 1595
1596 if (!is_valid_ether_addr(ndev->dev_addr)) 1596 if (!is_valid_ether_addr(ndev->dev_addr)) {
1597 dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please " 1597 dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please "
1598 "set using ifconfig\n", ndev->name); 1598 "set using ifconfig\n", ndev->name);
1599 1599
1600 random_ether_addr(ndev->dev_addr);
1601 mac_src = "random";
1602 }
1603
1604
1600 platform_set_drvdata(pdev, ndev); 1605 platform_set_drvdata(pdev, ndev);
1601 ret = register_netdev(ndev); 1606 ret = register_netdev(ndev);
1602 1607
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 13149983d07e..c516a7440bec 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -86,6 +86,7 @@
86#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ 86#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
87#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ 87#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
88#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ 88#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
89#define E1000_CTRL_EXT_LSECCK 0x00001000
89#define E1000_CTRL_EXT_PHYPDEN 0x00100000 90#define E1000_CTRL_EXT_PHYPDEN 0x00100000
90 91
91/* Receive Descriptor bit definitions */ 92/* Receive Descriptor bit definitions */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index e610e1369053..00bf595ebd67 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -364,6 +364,7 @@ struct e1000_adapter {
364 /* structs defined in e1000_hw.h */ 364 /* structs defined in e1000_hw.h */
365 struct e1000_hw hw; 365 struct e1000_hw hw;
366 366
367 spinlock_t stats64_lock;
367 struct e1000_hw_stats stats; 368 struct e1000_hw_stats stats;
368 struct e1000_phy_info phy_info; 369 struct e1000_phy_info phy_info;
369 struct e1000_phy_stats phy_stats; 370 struct e1000_phy_stats phy_stats;
@@ -494,7 +495,9 @@ extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter);
494extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter); 495extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
495extern void e1000e_free_rx_resources(struct e1000_adapter *adapter); 496extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
496extern void e1000e_free_tx_resources(struct e1000_adapter *adapter); 497extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
497extern void e1000e_update_stats(struct e1000_adapter *adapter); 498extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
499 struct rtnl_link_stats64
500 *stats);
498extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); 501extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
499extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); 502extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
500extern void e1000e_get_hw_control(struct e1000_adapter *adapter); 503extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index fa08b6336cfb..07f09e96e453 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -46,15 +46,15 @@ struct e1000_stats {
46}; 46};
47 47
48#define E1000_STAT(str, m) { \ 48#define E1000_STAT(str, m) { \
49 .stat_string = str, \ 49 .stat_string = str, \
50 .type = E1000_STATS, \ 50 .type = E1000_STATS, \
51 .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \ 51 .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \
52 .stat_offset = offsetof(struct e1000_adapter, m) } 52 .stat_offset = offsetof(struct e1000_adapter, m) }
53#define E1000_NETDEV_STAT(str, m) { \ 53#define E1000_NETDEV_STAT(str, m) { \
54 .stat_string = str, \ 54 .stat_string = str, \
55 .type = NETDEV_STATS, \ 55 .type = NETDEV_STATS, \
56 .sizeof_stat = sizeof(((struct net_device *)0)->m), \ 56 .sizeof_stat = sizeof(((struct rtnl_link_stats64 *)0)->m), \
57 .stat_offset = offsetof(struct net_device, m) } 57 .stat_offset = offsetof(struct rtnl_link_stats64, m) }
58 58
59static const struct e1000_stats e1000_gstrings_stats[] = { 59static const struct e1000_stats e1000_gstrings_stats[] = {
60 E1000_STAT("rx_packets", stats.gprc), 60 E1000_STAT("rx_packets", stats.gprc),
@@ -65,21 +65,21 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
65 E1000_STAT("tx_broadcast", stats.bptc), 65 E1000_STAT("tx_broadcast", stats.bptc),
66 E1000_STAT("rx_multicast", stats.mprc), 66 E1000_STAT("rx_multicast", stats.mprc),
67 E1000_STAT("tx_multicast", stats.mptc), 67 E1000_STAT("tx_multicast", stats.mptc),
68 E1000_NETDEV_STAT("rx_errors", stats.rx_errors), 68 E1000_NETDEV_STAT("rx_errors", rx_errors),
69 E1000_NETDEV_STAT("tx_errors", stats.tx_errors), 69 E1000_NETDEV_STAT("tx_errors", tx_errors),
70 E1000_NETDEV_STAT("tx_dropped", stats.tx_dropped), 70 E1000_NETDEV_STAT("tx_dropped", tx_dropped),
71 E1000_STAT("multicast", stats.mprc), 71 E1000_STAT("multicast", stats.mprc),
72 E1000_STAT("collisions", stats.colc), 72 E1000_STAT("collisions", stats.colc),
73 E1000_NETDEV_STAT("rx_length_errors", stats.rx_length_errors), 73 E1000_NETDEV_STAT("rx_length_errors", rx_length_errors),
74 E1000_NETDEV_STAT("rx_over_errors", stats.rx_over_errors), 74 E1000_NETDEV_STAT("rx_over_errors", rx_over_errors),
75 E1000_STAT("rx_crc_errors", stats.crcerrs), 75 E1000_STAT("rx_crc_errors", stats.crcerrs),
76 E1000_NETDEV_STAT("rx_frame_errors", stats.rx_frame_errors), 76 E1000_NETDEV_STAT("rx_frame_errors", rx_frame_errors),
77 E1000_STAT("rx_no_buffer_count", stats.rnbc), 77 E1000_STAT("rx_no_buffer_count", stats.rnbc),
78 E1000_STAT("rx_missed_errors", stats.mpc), 78 E1000_STAT("rx_missed_errors", stats.mpc),
79 E1000_STAT("tx_aborted_errors", stats.ecol), 79 E1000_STAT("tx_aborted_errors", stats.ecol),
80 E1000_STAT("tx_carrier_errors", stats.tncrs), 80 E1000_STAT("tx_carrier_errors", stats.tncrs),
81 E1000_NETDEV_STAT("tx_fifo_errors", stats.tx_fifo_errors), 81 E1000_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors),
82 E1000_NETDEV_STAT("tx_heartbeat_errors", stats.tx_heartbeat_errors), 82 E1000_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors),
83 E1000_STAT("tx_window_errors", stats.latecol), 83 E1000_STAT("tx_window_errors", stats.latecol),
84 E1000_STAT("tx_abort_late_coll", stats.latecol), 84 E1000_STAT("tx_abort_late_coll", stats.latecol),
85 E1000_STAT("tx_deferred_ok", stats.dc), 85 E1000_STAT("tx_deferred_ok", stats.dc),
@@ -433,13 +433,11 @@ static void e1000_get_regs(struct net_device *netdev,
433 struct e1000_hw *hw = &adapter->hw; 433 struct e1000_hw *hw = &adapter->hw;
434 u32 *regs_buff = p; 434 u32 *regs_buff = p;
435 u16 phy_data; 435 u16 phy_data;
436 u8 revision_id;
437 436
438 memset(p, 0, E1000_REGS_LEN * sizeof(u32)); 437 memset(p, 0, E1000_REGS_LEN * sizeof(u32));
439 438
440 pci_read_config_byte(adapter->pdev, PCI_REVISION_ID, &revision_id); 439 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
441 440 adapter->pdev->device;
442 regs->version = (1 << 24) | (revision_id << 16) | adapter->pdev->device;
443 441
444 regs_buff[0] = er32(CTRL); 442 regs_buff[0] = er32(CTRL);
445 regs_buff[1] = er32(STATUS); 443 regs_buff[1] = er32(STATUS);
@@ -684,20 +682,13 @@ static int e1000_set_ringparam(struct net_device *netdev,
684 rx_old = adapter->rx_ring; 682 rx_old = adapter->rx_ring;
685 683
686 err = -ENOMEM; 684 err = -ENOMEM;
687 tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); 685 tx_ring = kmemdup(tx_old, sizeof(struct e1000_ring), GFP_KERNEL);
688 if (!tx_ring) 686 if (!tx_ring)
689 goto err_alloc_tx; 687 goto err_alloc_tx;
690 /*
691 * use a memcpy to save any previously configured
692 * items like napi structs from having to be
693 * reinitialized
694 */
695 memcpy(tx_ring, tx_old, sizeof(struct e1000_ring));
696 688
697 rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); 689 rx_ring = kmemdup(rx_old, sizeof(struct e1000_ring), GFP_KERNEL);
698 if (!rx_ring) 690 if (!rx_ring)
699 goto err_alloc_rx; 691 goto err_alloc_rx;
700 memcpy(rx_ring, rx_old, sizeof(struct e1000_ring));
701 692
702 adapter->tx_ring = tx_ring; 693 adapter->tx_ring = tx_ring;
703 adapter->rx_ring = rx_ring; 694 adapter->rx_ring = rx_ring;
@@ -1255,7 +1246,6 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1255{ 1246{
1256 struct e1000_hw *hw = &adapter->hw; 1247 struct e1000_hw *hw = &adapter->hw;
1257 u32 ctrl_reg = 0; 1248 u32 ctrl_reg = 0;
1258 u32 stat_reg = 0;
1259 u16 phy_reg = 0; 1249 u16 phy_reg = 0;
1260 s32 ret_val = 0; 1250 s32 ret_val = 0;
1261 1251
@@ -1363,8 +1353,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1363 * Set the ILOS bit on the fiber Nic if half duplex link is 1353 * Set the ILOS bit on the fiber Nic if half duplex link is
1364 * detected. 1354 * detected.
1365 */ 1355 */
1366 stat_reg = er32(STATUS); 1356 if ((er32(STATUS) & E1000_STATUS_FD) == 0)
1367 if ((stat_reg & E1000_STATUS_FD) == 0)
1368 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); 1357 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
1369 } 1358 }
1370 1359
@@ -1677,10 +1666,13 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
1677 } else { 1666 } else {
1678 hw->mac.ops.check_for_link(hw); 1667 hw->mac.ops.check_for_link(hw);
1679 if (hw->mac.autoneg) 1668 if (hw->mac.autoneg)
1680 msleep(4000); 1669 /*
1670 * On some Phy/switch combinations, link establishment
1671 * can take a few seconds more than expected.
1672 */
1673 msleep(5000);
1681 1674
1682 if (!(er32(STATUS) & 1675 if (!(er32(STATUS) & E1000_STATUS_LU))
1683 E1000_STATUS_LU))
1684 *data = 1; 1676 *data = 1;
1685 } 1677 }
1686 return *data; 1678 return *data;
@@ -1807,8 +1799,7 @@ static void e1000_get_wol(struct net_device *netdev,
1807 return; 1799 return;
1808 1800
1809 wol->supported = WAKE_UCAST | WAKE_MCAST | 1801 wol->supported = WAKE_UCAST | WAKE_MCAST |
1810 WAKE_BCAST | WAKE_MAGIC | 1802 WAKE_BCAST | WAKE_MAGIC | WAKE_PHY;
1811 WAKE_PHY | WAKE_ARP;
1812 1803
1813 /* apply any specific unsupported masks here */ 1804 /* apply any specific unsupported masks here */
1814 if (adapter->flags & FLAG_NO_WAKE_UCAST) { 1805 if (adapter->flags & FLAG_NO_WAKE_UCAST) {
@@ -1829,19 +1820,16 @@ static void e1000_get_wol(struct net_device *netdev,
1829 wol->wolopts |= WAKE_MAGIC; 1820 wol->wolopts |= WAKE_MAGIC;
1830 if (adapter->wol & E1000_WUFC_LNKC) 1821 if (adapter->wol & E1000_WUFC_LNKC)
1831 wol->wolopts |= WAKE_PHY; 1822 wol->wolopts |= WAKE_PHY;
1832 if (adapter->wol & E1000_WUFC_ARP)
1833 wol->wolopts |= WAKE_ARP;
1834} 1823}
1835 1824
1836static int e1000_set_wol(struct net_device *netdev, 1825static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1837 struct ethtool_wolinfo *wol)
1838{ 1826{
1839 struct e1000_adapter *adapter = netdev_priv(netdev); 1827 struct e1000_adapter *adapter = netdev_priv(netdev);
1840 1828
1841 if (!(adapter->flags & FLAG_HAS_WOL) || 1829 if (!(adapter->flags & FLAG_HAS_WOL) ||
1842 !device_can_wakeup(&adapter->pdev->dev) || 1830 !device_can_wakeup(&adapter->pdev->dev) ||
1843 (wol->wolopts & ~(WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | 1831 (wol->wolopts & ~(WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
1844 WAKE_MAGIC | WAKE_PHY | WAKE_ARP))) 1832 WAKE_MAGIC | WAKE_PHY)))
1845 return -EOPNOTSUPP; 1833 return -EOPNOTSUPP;
1846 1834
1847 /* these settings will always override what we currently have */ 1835 /* these settings will always override what we currently have */
@@ -1857,8 +1845,6 @@ static int e1000_set_wol(struct net_device *netdev,
1857 adapter->wol |= E1000_WUFC_MAG; 1845 adapter->wol |= E1000_WUFC_MAG;
1858 if (wol->wolopts & WAKE_PHY) 1846 if (wol->wolopts & WAKE_PHY)
1859 adapter->wol |= E1000_WUFC_LNKC; 1847 adapter->wol |= E1000_WUFC_LNKC;
1860 if (wol->wolopts & WAKE_ARP)
1861 adapter->wol |= E1000_WUFC_ARP;
1862 1848
1863 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1849 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1864 1850
@@ -1972,8 +1958,15 @@ static int e1000_set_coalesce(struct net_device *netdev,
1972static int e1000_nway_reset(struct net_device *netdev) 1958static int e1000_nway_reset(struct net_device *netdev)
1973{ 1959{
1974 struct e1000_adapter *adapter = netdev_priv(netdev); 1960 struct e1000_adapter *adapter = netdev_priv(netdev);
1975 if (netif_running(netdev)) 1961
1976 e1000e_reinit_locked(adapter); 1962 if (!netif_running(netdev))
1963 return -EAGAIN;
1964
1965 if (!adapter->hw.mac.autoneg)
1966 return -EINVAL;
1967
1968 e1000e_reinit_locked(adapter);
1969
1977 return 0; 1970 return 0;
1978} 1971}
1979 1972
@@ -1982,14 +1975,15 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
1982 u64 *data) 1975 u64 *data)
1983{ 1976{
1984 struct e1000_adapter *adapter = netdev_priv(netdev); 1977 struct e1000_adapter *adapter = netdev_priv(netdev);
1978 struct rtnl_link_stats64 net_stats;
1985 int i; 1979 int i;
1986 char *p = NULL; 1980 char *p = NULL;
1987 1981
1988 e1000e_update_stats(adapter); 1982 e1000e_get_stats64(netdev, &net_stats);
1989 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { 1983 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1990 switch (e1000_gstrings_stats[i].type) { 1984 switch (e1000_gstrings_stats[i].type) {
1991 case NETDEV_STATS: 1985 case NETDEV_STATS:
1992 p = (char *) netdev + 1986 p = (char *) &net_stats +
1993 e1000_gstrings_stats[i].stat_offset; 1987 e1000_gstrings_stats[i].stat_offset;
1994 break; 1988 break;
1995 case E1000_STATS: 1989 case E1000_STATS:
@@ -2014,7 +2008,7 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
2014 2008
2015 switch (stringset) { 2009 switch (stringset) {
2016 case ETH_SS_TEST: 2010 case ETH_SS_TEST:
2017 memcpy(data, *e1000_gstrings_test, sizeof(e1000_gstrings_test)); 2011 memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test));
2018 break; 2012 break;
2019 case ETH_SS_STATS: 2013 case ETH_SS_STATS:
2020 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { 2014 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index bc0860a598c9..307e1ec22417 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -812,9 +812,8 @@ struct e1000_nvm_operations {
812 812
813struct e1000_mac_info { 813struct e1000_mac_info {
814 struct e1000_mac_operations ops; 814 struct e1000_mac_operations ops;
815 815 u8 addr[ETH_ALEN];
816 u8 addr[6]; 816 u8 perm_addr[ETH_ALEN];
817 u8 perm_addr[6];
818 817
819 enum e1000_mac_type type; 818 enum e1000_mac_type type;
820 819
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index fb46974cfec1..ce1dbfdca112 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -140,6 +140,11 @@
140#define I82579_LPI_CTRL PHY_REG(772, 20) 140#define I82579_LPI_CTRL PHY_REG(772, 20)
141#define I82579_LPI_CTRL_ENABLE_MASK 0x6000 141#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
142 142
143/* EMI Registers */
144#define I82579_EMI_ADDR 0x10
145#define I82579_EMI_DATA 0x11
146#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */
147
143/* Strapping Option Register - RO */ 148/* Strapping Option Register - RO */
144#define E1000_STRAP 0x0000C 149#define E1000_STRAP 0x0000C
145#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000 150#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
@@ -302,9 +307,9 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
302 * the interconnect to PCIe mode. 307 * the interconnect to PCIe mode.
303 */ 308 */
304 fwsm = er32(FWSM); 309 fwsm = er32(FWSM);
305 if (!(fwsm & E1000_ICH_FWSM_FW_VALID)) { 310 if (!(fwsm & E1000_ICH_FWSM_FW_VALID) && !e1000_check_reset_block(hw)) {
306 ctrl = er32(CTRL); 311 ctrl = er32(CTRL);
307 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; 312 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
308 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; 313 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
309 ew32(CTRL, ctrl); 314 ew32(CTRL, ctrl);
310 udelay(10); 315 udelay(10);
@@ -331,7 +336,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
331 goto out; 336 goto out;
332 337
333 /* Ungate automatic PHY configuration on non-managed 82579 */ 338 /* Ungate automatic PHY configuration on non-managed 82579 */
334 if ((hw->mac.type == e1000_pch2lan) && 339 if ((hw->mac.type == e1000_pch2lan) &&
335 !(fwsm & E1000_ICH_FWSM_FW_VALID)) { 340 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
336 msleep(10); 341 msleep(10);
337 e1000_gate_hw_phy_config_ich8lan(hw, false); 342 e1000_gate_hw_phy_config_ich8lan(hw, false);
@@ -366,7 +371,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
366 case e1000_phy_82579: 371 case e1000_phy_82579:
367 phy->ops.check_polarity = e1000_check_polarity_82577; 372 phy->ops.check_polarity = e1000_check_polarity_82577;
368 phy->ops.force_speed_duplex = 373 phy->ops.force_speed_duplex =
369 e1000_phy_force_speed_duplex_82577; 374 e1000_phy_force_speed_duplex_82577;
370 phy->ops.get_cable_length = e1000_get_cable_length_82577; 375 phy->ops.get_cable_length = e1000_get_cable_length_82577;
371 phy->ops.get_info = e1000_get_phy_info_82577; 376 phy->ops.get_info = e1000_get_phy_info_82577;
372 phy->ops.commit = e1000e_phy_sw_reset; 377 phy->ops.commit = e1000e_phy_sw_reset;
@@ -753,7 +758,13 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
753 if (rc) 758 if (rc)
754 return rc; 759 return rc;
755 760
756 if (adapter->hw.phy.type == e1000_phy_ife) { 761 /*
762 * Disable Jumbo Frame support on parts with Intel 10/100 PHY or
763 * on parts with MACsec enabled in NVM (reflected in CTRL_EXT).
764 */
765 if ((adapter->hw.phy.type == e1000_phy_ife) ||
766 ((adapter->hw.mac.type >= e1000_pch2lan) &&
767 (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
757 adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES; 768 adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
758 adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN; 769 adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN;
759 } 770 }
@@ -1723,11 +1734,25 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1723 /* Configure the LCD with the OEM bits in NVM */ 1734 /* Configure the LCD with the OEM bits in NVM */
1724 ret_val = e1000_oem_bits_config_ich8lan(hw, true); 1735 ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1725 1736
1726 /* Ungate automatic PHY configuration on non-managed 82579 */ 1737 if (hw->mac.type == e1000_pch2lan) {
1727 if ((hw->mac.type == e1000_pch2lan) && 1738 /* Ungate automatic PHY configuration on non-managed 82579 */
1728 !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { 1739 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
1729 msleep(10); 1740 msleep(10);
1730 e1000_gate_hw_phy_config_ich8lan(hw, false); 1741 e1000_gate_hw_phy_config_ich8lan(hw, false);
1742 }
1743
1744 /* Set EEE LPI Update Timer to 200usec */
1745 ret_val = hw->phy.ops.acquire(hw);
1746 if (ret_val)
1747 goto out;
1748 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
1749 I82579_LPI_UPDATE_TIMER);
1750 if (ret_val)
1751 goto release;
1752 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
1753 0x1387);
1754release:
1755 hw->phy.ops.release(hw);
1731 } 1756 }
1732 1757
1733out: 1758out:
@@ -2104,7 +2129,6 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2104{ 2129{
2105 union ich8_hws_flash_status hsfsts; 2130 union ich8_hws_flash_status hsfsts;
2106 s32 ret_val = -E1000_ERR_NVM; 2131 s32 ret_val = -E1000_ERR_NVM;
2107 s32 i = 0;
2108 2132
2109 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 2133 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2110 2134
@@ -2140,6 +2164,8 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2140 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); 2164 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
2141 ret_val = 0; 2165 ret_val = 0;
2142 } else { 2166 } else {
2167 s32 i = 0;
2168
2143 /* 2169 /*
2144 * Otherwise poll for sometime so the current 2170 * Otherwise poll for sometime so the current
2145 * cycle has a chance to end before giving up. 2171 * cycle has a chance to end before giving up.
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 68aa1749bf66..96921de5df2e 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1978,15 +1978,15 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
1978{ 1978{
1979 struct e1000_nvm_info *nvm = &hw->nvm; 1979 struct e1000_nvm_info *nvm = &hw->nvm;
1980 u32 eecd = er32(EECD); 1980 u32 eecd = er32(EECD);
1981 u16 timeout = 0;
1982 u8 spi_stat_reg; 1981 u8 spi_stat_reg;
1983 1982
1984 if (nvm->type == e1000_nvm_eeprom_spi) { 1983 if (nvm->type == e1000_nvm_eeprom_spi) {
1984 u16 timeout = NVM_MAX_RETRY_SPI;
1985
1985 /* Clear SK and CS */ 1986 /* Clear SK and CS */
1986 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); 1987 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
1987 ew32(EECD, eecd); 1988 ew32(EECD, eecd);
1988 udelay(1); 1989 udelay(1);
1989 timeout = NVM_MAX_RETRY_SPI;
1990 1990
1991 /* 1991 /*
1992 * Read "Status Register" repeatedly until the LSB is cleared. 1992 * Read "Status Register" repeatedly until the LSB is cleared.
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 6d513a383340..a39d4a4d871c 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -54,7 +54,7 @@
54 54
55#define DRV_EXTRAVERSION "-k2" 55#define DRV_EXTRAVERSION "-k2"
56 56
57#define DRV_VERSION "1.2.20" DRV_EXTRAVERSION 57#define DRV_VERSION "1.3.10" DRV_EXTRAVERSION
58char e1000e_driver_name[] = "e1000e"; 58char e1000e_driver_name[] = "e1000e";
59const char e1000e_driver_version[] = DRV_VERSION; 59const char e1000e_driver_version[] = DRV_VERSION;
60 60
@@ -900,8 +900,6 @@ next_desc:
900 900
901 adapter->total_rx_bytes += total_rx_bytes; 901 adapter->total_rx_bytes += total_rx_bytes;
902 adapter->total_rx_packets += total_rx_packets; 902 adapter->total_rx_packets += total_rx_packets;
903 netdev->stats.rx_bytes += total_rx_bytes;
904 netdev->stats.rx_packets += total_rx_packets;
905 return cleaned; 903 return cleaned;
906} 904}
907 905
@@ -1060,8 +1058,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
1060 } 1058 }
1061 adapter->total_tx_bytes += total_tx_bytes; 1059 adapter->total_tx_bytes += total_tx_bytes;
1062 adapter->total_tx_packets += total_tx_packets; 1060 adapter->total_tx_packets += total_tx_packets;
1063 netdev->stats.tx_bytes += total_tx_bytes;
1064 netdev->stats.tx_packets += total_tx_packets;
1065 return count < tx_ring->count; 1061 return count < tx_ring->count;
1066} 1062}
1067 1063
@@ -1248,8 +1244,6 @@ next_desc:
1248 1244
1249 adapter->total_rx_bytes += total_rx_bytes; 1245 adapter->total_rx_bytes += total_rx_bytes;
1250 adapter->total_rx_packets += total_rx_packets; 1246 adapter->total_rx_packets += total_rx_packets;
1251 netdev->stats.rx_bytes += total_rx_bytes;
1252 netdev->stats.rx_packets += total_rx_packets;
1253 return cleaned; 1247 return cleaned;
1254} 1248}
1255 1249
@@ -1328,7 +1322,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
1328 /* an error means any chain goes out the window 1322 /* an error means any chain goes out the window
1329 * too */ 1323 * too */
1330 if (rx_ring->rx_skb_top) 1324 if (rx_ring->rx_skb_top)
1331 dev_kfree_skb(rx_ring->rx_skb_top); 1325 dev_kfree_skb_irq(rx_ring->rx_skb_top);
1332 rx_ring->rx_skb_top = NULL; 1326 rx_ring->rx_skb_top = NULL;
1333 goto next_desc; 1327 goto next_desc;
1334 } 1328 }
@@ -1401,7 +1395,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
1401 /* eth type trans needs skb->data to point to something */ 1395 /* eth type trans needs skb->data to point to something */
1402 if (!pskb_may_pull(skb, ETH_HLEN)) { 1396 if (!pskb_may_pull(skb, ETH_HLEN)) {
1403 e_err("pskb_may_pull failed.\n"); 1397 e_err("pskb_may_pull failed.\n");
1404 dev_kfree_skb(skb); 1398 dev_kfree_skb_irq(skb);
1405 goto next_desc; 1399 goto next_desc;
1406 } 1400 }
1407 1401
@@ -1429,8 +1423,6 @@ next_desc:
1429 1423
1430 adapter->total_rx_bytes += total_rx_bytes; 1424 adapter->total_rx_bytes += total_rx_bytes;
1431 adapter->total_rx_packets += total_rx_packets; 1425 adapter->total_rx_packets += total_rx_packets;
1432 netdev->stats.rx_bytes += total_rx_bytes;
1433 netdev->stats.rx_packets += total_rx_packets;
1434 return cleaned; 1426 return cleaned;
1435} 1427}
1436 1428
@@ -1857,7 +1849,9 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
1857 int err = 0, vector = 0; 1849 int err = 0, vector = 0;
1858 1850
1859 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 1851 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1860 sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name); 1852 snprintf(adapter->rx_ring->name,
1853 sizeof(adapter->rx_ring->name) - 1,
1854 "%s-rx-0", netdev->name);
1861 else 1855 else
1862 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); 1856 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1863 err = request_irq(adapter->msix_entries[vector].vector, 1857 err = request_irq(adapter->msix_entries[vector].vector,
@@ -1870,7 +1864,9 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
1870 vector++; 1864 vector++;
1871 1865
1872 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 1866 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1873 sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name); 1867 snprintf(adapter->tx_ring->name,
1868 sizeof(adapter->tx_ring->name) - 1,
1869 "%s-tx-0", netdev->name);
1874 else 1870 else
1875 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); 1871 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1876 err = request_irq(adapter->msix_entries[vector].vector, 1872 err = request_irq(adapter->msix_entries[vector].vector,
@@ -2734,7 +2730,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2734{ 2730{
2735 struct e1000_hw *hw = &adapter->hw; 2731 struct e1000_hw *hw = &adapter->hw;
2736 u32 rctl, rfctl; 2732 u32 rctl, rfctl;
2737 u32 psrctl = 0;
2738 u32 pages = 0; 2733 u32 pages = 0;
2739 2734
2740 /* Workaround Si errata on 82579 - configure jumbo frame flow */ 2735 /* Workaround Si errata on 82579 - configure jumbo frame flow */
@@ -2833,6 +2828,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2833 adapter->rx_ps_pages = 0; 2828 adapter->rx_ps_pages = 0;
2834 2829
2835 if (adapter->rx_ps_pages) { 2830 if (adapter->rx_ps_pages) {
2831 u32 psrctl = 0;
2832
2836 /* Configure extra packet-split registers */ 2833 /* Configure extra packet-split registers */
2837 rfctl = er32(RFCTL); 2834 rfctl = er32(RFCTL);
2838 rfctl |= E1000_RFCTL_EXTEN; 2835 rfctl |= E1000_RFCTL_EXTEN;
@@ -3034,7 +3031,6 @@ static void e1000_set_multi(struct net_device *netdev)
3034 struct netdev_hw_addr *ha; 3031 struct netdev_hw_addr *ha;
3035 u8 *mta_list; 3032 u8 *mta_list;
3036 u32 rctl; 3033 u32 rctl;
3037 int i;
3038 3034
3039 /* Check for Promiscuous and All Multicast modes */ 3035 /* Check for Promiscuous and All Multicast modes */
3040 3036
@@ -3057,12 +3053,13 @@ static void e1000_set_multi(struct net_device *netdev)
3057 ew32(RCTL, rctl); 3053 ew32(RCTL, rctl);
3058 3054
3059 if (!netdev_mc_empty(netdev)) { 3055 if (!netdev_mc_empty(netdev)) {
3056 int i = 0;
3057
3060 mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); 3058 mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
3061 if (!mta_list) 3059 if (!mta_list)
3062 return; 3060 return;
3063 3061
3064 /* prepare a packed array of only addresses. */ 3062 /* prepare a packed array of only addresses. */
3065 i = 0;
3066 netdev_for_each_mc_addr(ha, netdev) 3063 netdev_for_each_mc_addr(ha, netdev)
3067 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); 3064 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
3068 3065
@@ -3359,6 +3356,8 @@ static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
3359 e1e_flush(); 3356 e1e_flush();
3360} 3357}
3361 3358
3359static void e1000e_update_stats(struct e1000_adapter *adapter);
3360
3362void e1000e_down(struct e1000_adapter *adapter) 3361void e1000e_down(struct e1000_adapter *adapter)
3363{ 3362{
3364 struct net_device *netdev = adapter->netdev; 3363 struct net_device *netdev = adapter->netdev;
@@ -3393,6 +3392,11 @@ void e1000e_down(struct e1000_adapter *adapter)
3393 del_timer_sync(&adapter->phy_info_timer); 3392 del_timer_sync(&adapter->phy_info_timer);
3394 3393
3395 netif_carrier_off(netdev); 3394 netif_carrier_off(netdev);
3395
3396 spin_lock(&adapter->stats64_lock);
3397 e1000e_update_stats(adapter);
3398 spin_unlock(&adapter->stats64_lock);
3399
3396 adapter->link_speed = 0; 3400 adapter->link_speed = 0;
3397 adapter->link_duplex = 0; 3401 adapter->link_duplex = 0;
3398 3402
@@ -3437,6 +3441,8 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
3437 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 3441 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3438 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 3442 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3439 3443
3444 spin_lock_init(&adapter->stats64_lock);
3445
3440 e1000e_set_interrupt_capability(adapter); 3446 e1000e_set_interrupt_capability(adapter);
3441 3447
3442 if (e1000_alloc_queues(adapter)) 3448 if (e1000_alloc_queues(adapter))
@@ -3918,7 +3924,7 @@ release:
3918 * e1000e_update_stats - Update the board statistics counters 3924 * e1000e_update_stats - Update the board statistics counters
3919 * @adapter: board private structure 3925 * @adapter: board private structure
3920 **/ 3926 **/
3921void e1000e_update_stats(struct e1000_adapter *adapter) 3927static void e1000e_update_stats(struct e1000_adapter *adapter)
3922{ 3928{
3923 struct net_device *netdev = adapter->netdev; 3929 struct net_device *netdev = adapter->netdev;
3924 struct e1000_hw *hw = &adapter->hw; 3930 struct e1000_hw *hw = &adapter->hw;
@@ -4030,10 +4036,11 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
4030{ 4036{
4031 struct e1000_hw *hw = &adapter->hw; 4037 struct e1000_hw *hw = &adapter->hw;
4032 struct e1000_phy_regs *phy = &adapter->phy_regs; 4038 struct e1000_phy_regs *phy = &adapter->phy_regs;
4033 int ret_val;
4034 4039
4035 if ((er32(STATUS) & E1000_STATUS_LU) && 4040 if ((er32(STATUS) & E1000_STATUS_LU) &&
4036 (adapter->hw.phy.media_type == e1000_media_type_copper)) { 4041 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
4042 int ret_val;
4043
4037 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr); 4044 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
4038 ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr); 4045 ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
4039 ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise); 4046 ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
@@ -4179,7 +4186,6 @@ static void e1000_watchdog_task(struct work_struct *work)
4179 struct e1000_ring *tx_ring = adapter->tx_ring; 4186 struct e1000_ring *tx_ring = adapter->tx_ring;
4180 struct e1000_hw *hw = &adapter->hw; 4187 struct e1000_hw *hw = &adapter->hw;
4181 u32 link, tctl; 4188 u32 link, tctl;
4182 int tx_pending = 0;
4183 4189
4184 if (test_bit(__E1000_DOWN, &adapter->state)) 4190 if (test_bit(__E1000_DOWN, &adapter->state))
4185 return; 4191 return;
@@ -4320,7 +4326,9 @@ static void e1000_watchdog_task(struct work_struct *work)
4320 } 4326 }
4321 4327
4322link_up: 4328link_up:
4329 spin_lock(&adapter->stats64_lock);
4323 e1000e_update_stats(adapter); 4330 e1000e_update_stats(adapter);
4331 spin_unlock(&adapter->stats64_lock);
4324 4332
4325 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 4333 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
4326 adapter->tpt_old = adapter->stats.tpt; 4334 adapter->tpt_old = adapter->stats.tpt;
@@ -4334,20 +4342,17 @@ link_up:
4334 4342
4335 e1000e_update_adaptive(&adapter->hw); 4343 e1000e_update_adaptive(&adapter->hw);
4336 4344
4337 if (!netif_carrier_ok(netdev)) { 4345 if (!netif_carrier_ok(netdev) &&
4338 tx_pending = (e1000_desc_unused(tx_ring) + 1 < 4346 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
4339 tx_ring->count); 4347 /*
4340 if (tx_pending) { 4348 * We've lost link, so the controller stops DMA,
4341 /* 4349 * but we've got queued Tx work that's never going
4342 * We've lost link, so the controller stops DMA, 4350 * to get done, so reset controller to flush Tx.
4343 * but we've got queued Tx work that's never going 4351 * (Do the reset outside of interrupt context).
4344 * to get done, so reset controller to flush Tx. 4352 */
4345 * (Do the reset outside of interrupt context). 4353 schedule_work(&adapter->reset_task);
4346 */ 4354 /* return immediately since reset is imminent */
4347 schedule_work(&adapter->reset_task); 4355 return;
4348 /* return immediately since reset is imminent */
4349 return;
4350 }
4351 } 4356 }
4352 4357
4353 /* Simple mode for Interrupt Throttle Rate (ITR) */ 4358 /* Simple mode for Interrupt Throttle Rate (ITR) */
@@ -4411,13 +4416,13 @@ static int e1000_tso(struct e1000_adapter *adapter,
4411 u32 cmd_length = 0; 4416 u32 cmd_length = 0;
4412 u16 ipcse = 0, tucse, mss; 4417 u16 ipcse = 0, tucse, mss;
4413 u8 ipcss, ipcso, tucss, tucso, hdr_len; 4418 u8 ipcss, ipcso, tucss, tucso, hdr_len;
4414 int err;
4415 4419
4416 if (!skb_is_gso(skb)) 4420 if (!skb_is_gso(skb))
4417 return 0; 4421 return 0;
4418 4422
4419 if (skb_header_cloned(skb)) { 4423 if (skb_header_cloned(skb)) {
4420 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 4424 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
4425
4421 if (err) 4426 if (err)
4422 return err; 4427 return err;
4423 } 4428 }
@@ -4928,16 +4933,55 @@ static void e1000_reset_task(struct work_struct *work)
4928} 4933}
4929 4934
4930/** 4935/**
4931 * e1000_get_stats - Get System Network Statistics 4936 * e1000_get_stats64 - Get System Network Statistics
4932 * @netdev: network interface device structure 4937 * @netdev: network interface device structure
4938 * @stats: rtnl_link_stats64 pointer
4933 * 4939 *
4934 * Returns the address of the device statistics structure. 4940 * Returns the address of the device statistics structure.
4935 * The statistics are actually updated from the timer callback.
4936 **/ 4941 **/
4937static struct net_device_stats *e1000_get_stats(struct net_device *netdev) 4942struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
4943 struct rtnl_link_stats64 *stats)
4938{ 4944{
4939 /* only return the current stats */ 4945 struct e1000_adapter *adapter = netdev_priv(netdev);
4940 return &netdev->stats; 4946
4947 memset(stats, 0, sizeof(struct rtnl_link_stats64));
4948 spin_lock(&adapter->stats64_lock);
4949 e1000e_update_stats(adapter);
4950 /* Fill out the OS statistics structure */
4951 stats->rx_bytes = adapter->stats.gorc;
4952 stats->rx_packets = adapter->stats.gprc;
4953 stats->tx_bytes = adapter->stats.gotc;
4954 stats->tx_packets = adapter->stats.gptc;
4955 stats->multicast = adapter->stats.mprc;
4956 stats->collisions = adapter->stats.colc;
4957
4958 /* Rx Errors */
4959
4960 /*
4961 * RLEC on some newer hardware can be incorrect so build
4962 * our own version based on RUC and ROC
4963 */
4964 stats->rx_errors = adapter->stats.rxerrc +
4965 adapter->stats.crcerrs + adapter->stats.algnerrc +
4966 adapter->stats.ruc + adapter->stats.roc +
4967 adapter->stats.cexterr;
4968 stats->rx_length_errors = adapter->stats.ruc +
4969 adapter->stats.roc;
4970 stats->rx_crc_errors = adapter->stats.crcerrs;
4971 stats->rx_frame_errors = adapter->stats.algnerrc;
4972 stats->rx_missed_errors = adapter->stats.mpc;
4973
4974 /* Tx Errors */
4975 stats->tx_errors = adapter->stats.ecol +
4976 adapter->stats.latecol;
4977 stats->tx_aborted_errors = adapter->stats.ecol;
4978 stats->tx_window_errors = adapter->stats.latecol;
4979 stats->tx_carrier_errors = adapter->stats.tncrs;
4980
4981 /* Tx Dropped needs to be maintained elsewhere */
4982
4983 spin_unlock(&adapter->stats64_lock);
4984 return stats;
4941} 4985}
4942 4986
4943/** 4987/**
@@ -5507,9 +5551,10 @@ static irqreturn_t e1000_intr_msix(int irq, void *data)
5507{ 5551{
5508 struct net_device *netdev = data; 5552 struct net_device *netdev = data;
5509 struct e1000_adapter *adapter = netdev_priv(netdev); 5553 struct e1000_adapter *adapter = netdev_priv(netdev);
5510 int vector, msix_irq;
5511 5554
5512 if (adapter->msix_entries) { 5555 if (adapter->msix_entries) {
5556 int vector, msix_irq;
5557
5513 vector = 0; 5558 vector = 0;
5514 msix_irq = adapter->msix_entries[vector].vector; 5559 msix_irq = adapter->msix_entries[vector].vector;
5515 disable_irq(msix_irq); 5560 disable_irq(msix_irq);
@@ -5706,7 +5751,7 @@ static const struct net_device_ops e1000e_netdev_ops = {
5706 .ndo_open = e1000_open, 5751 .ndo_open = e1000_open,
5707 .ndo_stop = e1000_close, 5752 .ndo_stop = e1000_close,
5708 .ndo_start_xmit = e1000_xmit_frame, 5753 .ndo_start_xmit = e1000_xmit_frame,
5709 .ndo_get_stats = e1000_get_stats, 5754 .ndo_get_stats64 = e1000e_get_stats64,
5710 .ndo_set_multicast_list = e1000_set_multi, 5755 .ndo_set_multicast_list = e1000_set_multi,
5711 .ndo_set_mac_address = e1000_set_mac, 5756 .ndo_set_mac_address = e1000_set_mac,
5712 .ndo_change_mtu = e1000_change_mtu, 5757 .ndo_change_mtu = e1000_change_mtu,
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 6bea051b134b..6ae31fcfb629 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -2409,9 +2409,7 @@ static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg)
2409s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) 2409s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
2410{ 2410{
2411 s32 ret_val; 2411 s32 ret_val;
2412 u32 page_select = 0;
2413 u32 page = offset >> IGP_PAGE_SHIFT; 2412 u32 page = offset >> IGP_PAGE_SHIFT;
2414 u32 page_shift = 0;
2415 2413
2416 ret_val = hw->phy.ops.acquire(hw); 2414 ret_val = hw->phy.ops.acquire(hw);
2417 if (ret_val) 2415 if (ret_val)
@@ -2427,6 +2425,8 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
2427 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); 2425 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
2428 2426
2429 if (offset > MAX_PHY_MULTI_PAGE_REG) { 2427 if (offset > MAX_PHY_MULTI_PAGE_REG) {
2428 u32 page_shift, page_select;
2429
2430 /* 2430 /*
2431 * Page select is register 31 for phy address 1 and 22 for 2431 * Page select is register 31 for phy address 1 and 22 for
2432 * phy address 2 and 3. Page select is shifted only for 2432 * phy address 2 and 3. Page select is shifted only for
@@ -2468,9 +2468,7 @@ out:
2468s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) 2468s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
2469{ 2469{
2470 s32 ret_val; 2470 s32 ret_val;
2471 u32 page_select = 0;
2472 u32 page = offset >> IGP_PAGE_SHIFT; 2471 u32 page = offset >> IGP_PAGE_SHIFT;
2473 u32 page_shift = 0;
2474 2472
2475 ret_val = hw->phy.ops.acquire(hw); 2473 ret_val = hw->phy.ops.acquire(hw);
2476 if (ret_val) 2474 if (ret_val)
@@ -2486,6 +2484,8 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
2486 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); 2484 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
2487 2485
2488 if (offset > MAX_PHY_MULTI_PAGE_REG) { 2486 if (offset > MAX_PHY_MULTI_PAGE_REG) {
2487 u32 page_shift, page_select;
2488
2489 /* 2489 /*
2490 * Page select is register 31 for phy address 1 and 22 for 2490 * Page select is register 31 for phy address 1 and 22 for
2491 * phy address 2 and 3. Page select is shifted only for 2491 * phy address 2 and 3. Page select is shifted only for
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
index e7b6c31880ba..2e573be16c13 100644
--- a/drivers/net/enic/Makefile
+++ b/drivers/net/enic/Makefile
@@ -1,5 +1,5 @@
1obj-$(CONFIG_ENIC) := enic.o 1obj-$(CONFIG_ENIC) := enic.o
2 2
3enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \ 3enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
4 enic_res.o vnic_dev.o vnic_rq.o vnic_vic.o 4 enic_res.o enic_dev.o vnic_dev.o vnic_rq.o vnic_vic.o
5 5
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index a937f49d9db7..3a3c3c8a3a9b 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -32,13 +32,13 @@
32 32
33#define DRV_NAME "enic" 33#define DRV_NAME "enic"
34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" 34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
35#define DRV_VERSION "1.4.1.10" 35#define DRV_VERSION "2.1.1.12"
36#define DRV_COPYRIGHT "Copyright 2008-2010 Cisco Systems, Inc" 36#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc"
37 37
38#define ENIC_BARS_MAX 6 38#define ENIC_BARS_MAX 6
39 39
40#define ENIC_WQ_MAX 8 40#define ENIC_WQ_MAX 1
41#define ENIC_RQ_MAX 8 41#define ENIC_RQ_MAX 1
42#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX) 42#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
43#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2) 43#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
44 44
@@ -49,7 +49,7 @@ struct enic_msix_entry {
49 void *devid; 49 void *devid;
50}; 50};
51 51
52#define ENIC_SET_APPLIED (1 << 0) 52#define ENIC_PORT_REQUEST_APPLIED (1 << 0)
53#define ENIC_SET_REQUEST (1 << 1) 53#define ENIC_SET_REQUEST (1 << 1)
54#define ENIC_SET_NAME (1 << 2) 54#define ENIC_SET_NAME (1 << 2)
55#define ENIC_SET_INSTANCE (1 << 3) 55#define ENIC_SET_INSTANCE (1 << 3)
@@ -101,7 +101,6 @@ struct enic {
101 /* receive queue cache line section */ 101 /* receive queue cache line section */
102 ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX]; 102 ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
103 unsigned int rq_count; 103 unsigned int rq_count;
104 int (*rq_alloc_buf)(struct vnic_rq *rq);
105 u64 rq_truncated_pkts; 104 u64 rq_truncated_pkts;
106 u64 rq_bad_fcs; 105 u64 rq_bad_fcs;
107 struct napi_struct napi[ENIC_RQ_MAX]; 106 struct napi_struct napi[ENIC_RQ_MAX];
diff --git a/drivers/net/enic/enic_dev.c b/drivers/net/enic/enic_dev.c
new file mode 100644
index 000000000000..37ad3a1c82ee
--- /dev/null
+++ b/drivers/net/enic/enic_dev.c
@@ -0,0 +1,221 @@
1/*
2 * Copyright 2011 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#include <linux/pci.h>
20#include <linux/etherdevice.h>
21
22#include "vnic_dev.h"
23#include "vnic_vic.h"
24#include "enic_res.h"
25#include "enic.h"
26#include "enic_dev.h"
27
28int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info)
29{
30 int err;
31
32 spin_lock(&enic->devcmd_lock);
33 err = vnic_dev_fw_info(enic->vdev, fw_info);
34 spin_unlock(&enic->devcmd_lock);
35
36 return err;
37}
38
39int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats)
40{
41 int err;
42
43 spin_lock(&enic->devcmd_lock);
44 err = vnic_dev_stats_dump(enic->vdev, vstats);
45 spin_unlock(&enic->devcmd_lock);
46
47 return err;
48}
49
50int enic_dev_add_station_addr(struct enic *enic)
51{
52 int err;
53
54 if (!is_valid_ether_addr(enic->netdev->dev_addr))
55 return -EADDRNOTAVAIL;
56
57 spin_lock(&enic->devcmd_lock);
58 err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
59 spin_unlock(&enic->devcmd_lock);
60
61 return err;
62}
63
64int enic_dev_del_station_addr(struct enic *enic)
65{
66 int err;
67
68 if (!is_valid_ether_addr(enic->netdev->dev_addr))
69 return -EADDRNOTAVAIL;
70
71 spin_lock(&enic->devcmd_lock);
72 err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
73 spin_unlock(&enic->devcmd_lock);
74
75 return err;
76}
77
78int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
79 int broadcast, int promisc, int allmulti)
80{
81 int err;
82
83 spin_lock(&enic->devcmd_lock);
84 err = vnic_dev_packet_filter(enic->vdev, directed,
85 multicast, broadcast, promisc, allmulti);
86 spin_unlock(&enic->devcmd_lock);
87
88 return err;
89}
90
91int enic_dev_add_addr(struct enic *enic, u8 *addr)
92{
93 int err;
94
95 spin_lock(&enic->devcmd_lock);
96 err = vnic_dev_add_addr(enic->vdev, addr);
97 spin_unlock(&enic->devcmd_lock);
98
99 return err;
100}
101
102int enic_dev_del_addr(struct enic *enic, u8 *addr)
103{
104 int err;
105
106 spin_lock(&enic->devcmd_lock);
107 err = vnic_dev_del_addr(enic->vdev, addr);
108 spin_unlock(&enic->devcmd_lock);
109
110 return err;
111}
112
113int enic_dev_notify_unset(struct enic *enic)
114{
115 int err;
116
117 spin_lock(&enic->devcmd_lock);
118 err = vnic_dev_notify_unset(enic->vdev);
119 spin_unlock(&enic->devcmd_lock);
120
121 return err;
122}
123
124int enic_dev_hang_notify(struct enic *enic)
125{
126 int err;
127
128 spin_lock(&enic->devcmd_lock);
129 err = vnic_dev_hang_notify(enic->vdev);
130 spin_unlock(&enic->devcmd_lock);
131
132 return err;
133}
134
135int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
136{
137 int err;
138
139 spin_lock(&enic->devcmd_lock);
140 err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
141 IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
142 spin_unlock(&enic->devcmd_lock);
143
144 return err;
145}
146
147int enic_dev_enable(struct enic *enic)
148{
149 int err;
150
151 spin_lock(&enic->devcmd_lock);
152 err = vnic_dev_enable_wait(enic->vdev);
153 spin_unlock(&enic->devcmd_lock);
154
155 return err;
156}
157
158int enic_dev_disable(struct enic *enic)
159{
160 int err;
161
162 spin_lock(&enic->devcmd_lock);
163 err = vnic_dev_disable(enic->vdev);
164 spin_unlock(&enic->devcmd_lock);
165
166 return err;
167}
168
169int enic_vnic_dev_deinit(struct enic *enic)
170{
171 int err;
172
173 spin_lock(&enic->devcmd_lock);
174 err = vnic_dev_deinit(enic->vdev);
175 spin_unlock(&enic->devcmd_lock);
176
177 return err;
178}
179
180int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp)
181{
182 int err;
183
184 spin_lock(&enic->devcmd_lock);
185 err = vnic_dev_init_prov(enic->vdev,
186 (u8 *)vp, vic_provinfo_size(vp));
187 spin_unlock(&enic->devcmd_lock);
188
189 return err;
190}
191
192int enic_dev_init_done(struct enic *enic, int *done, int *error)
193{
194 int err;
195
196 spin_lock(&enic->devcmd_lock);
197 err = vnic_dev_init_done(enic->vdev, done, error);
198 spin_unlock(&enic->devcmd_lock);
199
200 return err;
201}
202
203/* rtnl lock is held */
204void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
205{
206 struct enic *enic = netdev_priv(netdev);
207
208 spin_lock(&enic->devcmd_lock);
209 enic_add_vlan(enic, vid);
210 spin_unlock(&enic->devcmd_lock);
211}
212
213/* rtnl lock is held */
214void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
215{
216 struct enic *enic = netdev_priv(netdev);
217
218 spin_lock(&enic->devcmd_lock);
219 enic_del_vlan(enic, vid);
220 spin_unlock(&enic->devcmd_lock);
221}
diff --git a/drivers/net/enic/enic_dev.h b/drivers/net/enic/enic_dev.h
new file mode 100644
index 000000000000..495f57fcb887
--- /dev/null
+++ b/drivers/net/enic/enic_dev.h
@@ -0,0 +1,41 @@
1/*
2 * Copyright 2011 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#ifndef _ENIC_DEV_H_
20#define _ENIC_DEV_H_
21
22int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info);
23int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats);
24int enic_dev_add_station_addr(struct enic *enic);
25int enic_dev_del_station_addr(struct enic *enic);
26int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
27 int broadcast, int promisc, int allmulti);
28int enic_dev_add_addr(struct enic *enic, u8 *addr);
29int enic_dev_del_addr(struct enic *enic, u8 *addr);
30void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
31void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
32int enic_dev_notify_unset(struct enic *enic);
33int enic_dev_hang_notify(struct enic *enic);
34int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
35int enic_dev_enable(struct enic *enic);
36int enic_dev_disable(struct enic *enic);
37int enic_vnic_dev_deinit(struct enic *enic);
38int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp);
39int enic_dev_init_done(struct enic *enic, int *done, int *error);
40
41#endif /* _ENIC_DEV_H_ */
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index a0af48c51fb3..8b9cad5e9712 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -44,6 +44,7 @@
44#include "vnic_vic.h" 44#include "vnic_vic.h"
45#include "enic_res.h" 45#include "enic_res.h"
46#include "enic.h" 46#include "enic.h"
47#include "enic_dev.h"
47 48
48#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) 49#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
49#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS) 50#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
@@ -190,18 +191,6 @@ static int enic_get_settings(struct net_device *netdev,
190 return 0; 191 return 0;
191} 192}
192 193
193static int enic_dev_fw_info(struct enic *enic,
194 struct vnic_devcmd_fw_info **fw_info)
195{
196 int err;
197
198 spin_lock(&enic->devcmd_lock);
199 err = vnic_dev_fw_info(enic->vdev, fw_info);
200 spin_unlock(&enic->devcmd_lock);
201
202 return err;
203}
204
205static void enic_get_drvinfo(struct net_device *netdev, 194static void enic_get_drvinfo(struct net_device *netdev,
206 struct ethtool_drvinfo *drvinfo) 195 struct ethtool_drvinfo *drvinfo)
207{ 196{
@@ -246,17 +235,6 @@ static int enic_get_sset_count(struct net_device *netdev, int sset)
246 } 235 }
247} 236}
248 237
249static int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats)
250{
251 int err;
252
253 spin_lock(&enic->devcmd_lock);
254 err = vnic_dev_stats_dump(enic->vdev, vstats);
255 spin_unlock(&enic->devcmd_lock);
256
257 return err;
258}
259
260static void enic_get_ethtool_stats(struct net_device *netdev, 238static void enic_get_ethtool_stats(struct net_device *netdev,
261 struct ethtool_stats *stats, u64 *data) 239 struct ethtool_stats *stats, u64 *data)
262{ 240{
@@ -896,9 +874,10 @@ static struct net_device_stats *enic_get_stats(struct net_device *netdev)
896 return net_stats; 874 return net_stats;
897} 875}
898 876
899static void enic_reset_multicast_list(struct enic *enic) 877static void enic_reset_addr_lists(struct enic *enic)
900{ 878{
901 enic->mc_count = 0; 879 enic->mc_count = 0;
880 enic->uc_count = 0;
902 enic->flags = 0; 881 enic->flags = 0;
903} 882}
904 883
@@ -919,32 +898,6 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr)
919 return 0; 898 return 0;
920} 899}
921 900
922static int enic_dev_add_station_addr(struct enic *enic)
923{
924 int err = 0;
925
926 if (is_valid_ether_addr(enic->netdev->dev_addr)) {
927 spin_lock(&enic->devcmd_lock);
928 err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
929 spin_unlock(&enic->devcmd_lock);
930 }
931
932 return err;
933}
934
935static int enic_dev_del_station_addr(struct enic *enic)
936{
937 int err = 0;
938
939 if (is_valid_ether_addr(enic->netdev->dev_addr)) {
940 spin_lock(&enic->devcmd_lock);
941 err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
942 spin_unlock(&enic->devcmd_lock);
943 }
944
945 return err;
946}
947
948static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p) 901static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
949{ 902{
950 struct enic *enic = netdev_priv(netdev); 903 struct enic *enic = netdev_priv(netdev);
@@ -989,42 +942,7 @@ static int enic_set_mac_address(struct net_device *netdev, void *p)
989 return enic_dev_add_station_addr(enic); 942 return enic_dev_add_station_addr(enic);
990} 943}
991 944
992static int enic_dev_packet_filter(struct enic *enic, int directed, 945static void enic_update_multicast_addr_list(struct enic *enic)
993 int multicast, int broadcast, int promisc, int allmulti)
994{
995 int err;
996
997 spin_lock(&enic->devcmd_lock);
998 err = vnic_dev_packet_filter(enic->vdev, directed,
999 multicast, broadcast, promisc, allmulti);
1000 spin_unlock(&enic->devcmd_lock);
1001
1002 return err;
1003}
1004
1005static int enic_dev_add_addr(struct enic *enic, u8 *addr)
1006{
1007 int err;
1008
1009 spin_lock(&enic->devcmd_lock);
1010 err = vnic_dev_add_addr(enic->vdev, addr);
1011 spin_unlock(&enic->devcmd_lock);
1012
1013 return err;
1014}
1015
1016static int enic_dev_del_addr(struct enic *enic, u8 *addr)
1017{
1018 int err;
1019
1020 spin_lock(&enic->devcmd_lock);
1021 err = vnic_dev_del_addr(enic->vdev, addr);
1022 spin_unlock(&enic->devcmd_lock);
1023
1024 return err;
1025}
1026
1027static void enic_add_multicast_addr_list(struct enic *enic)
1028{ 946{
1029 struct net_device *netdev = enic->netdev; 947 struct net_device *netdev = enic->netdev;
1030 struct netdev_hw_addr *ha; 948 struct netdev_hw_addr *ha;
@@ -1079,7 +997,7 @@ static void enic_add_multicast_addr_list(struct enic *enic)
1079 enic->mc_count = mc_count; 997 enic->mc_count = mc_count;
1080} 998}
1081 999
1082static void enic_add_unicast_addr_list(struct enic *enic) 1000static void enic_update_unicast_addr_list(struct enic *enic)
1083{ 1001{
1084 struct net_device *netdev = enic->netdev; 1002 struct net_device *netdev = enic->netdev;
1085 struct netdev_hw_addr *ha; 1003 struct netdev_hw_addr *ha;
@@ -1156,9 +1074,9 @@ static void enic_set_rx_mode(struct net_device *netdev)
1156 } 1074 }
1157 1075
1158 if (!promisc) { 1076 if (!promisc) {
1159 enic_add_unicast_addr_list(enic); 1077 enic_update_unicast_addr_list(enic);
1160 if (!allmulti) 1078 if (!allmulti)
1161 enic_add_multicast_addr_list(enic); 1079 enic_update_multicast_addr_list(enic);
1162 } 1080 }
1163} 1081}
1164 1082
@@ -1170,26 +1088,6 @@ static void enic_vlan_rx_register(struct net_device *netdev,
1170 enic->vlan_group = vlan_group; 1088 enic->vlan_group = vlan_group;
1171} 1089}
1172 1090
1173/* rtnl lock is held */
1174static void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1175{
1176 struct enic *enic = netdev_priv(netdev);
1177
1178 spin_lock(&enic->devcmd_lock);
1179 enic_add_vlan(enic, vid);
1180 spin_unlock(&enic->devcmd_lock);
1181}
1182
1183/* rtnl lock is held */
1184static void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1185{
1186 struct enic *enic = netdev_priv(netdev);
1187
1188 spin_lock(&enic->devcmd_lock);
1189 enic_del_vlan(enic, vid);
1190 spin_unlock(&enic->devcmd_lock);
1191}
1192
1193/* netif_tx_lock held, BHs disabled */ 1091/* netif_tx_lock held, BHs disabled */
1194static void enic_tx_timeout(struct net_device *netdev) 1092static void enic_tx_timeout(struct net_device *netdev)
1195{ 1093{
@@ -1197,40 +1095,6 @@ static void enic_tx_timeout(struct net_device *netdev)
1197 schedule_work(&enic->reset); 1095 schedule_work(&enic->reset);
1198} 1096}
1199 1097
1200static int enic_vnic_dev_deinit(struct enic *enic)
1201{
1202 int err;
1203
1204 spin_lock(&enic->devcmd_lock);
1205 err = vnic_dev_deinit(enic->vdev);
1206 spin_unlock(&enic->devcmd_lock);
1207
1208 return err;
1209}
1210
1211static int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp)
1212{
1213 int err;
1214
1215 spin_lock(&enic->devcmd_lock);
1216 err = vnic_dev_init_prov(enic->vdev,
1217 (u8 *)vp, vic_provinfo_size(vp));
1218 spin_unlock(&enic->devcmd_lock);
1219
1220 return err;
1221}
1222
1223static int enic_dev_init_done(struct enic *enic, int *done, int *error)
1224{
1225 int err;
1226
1227 spin_lock(&enic->devcmd_lock);
1228 err = vnic_dev_init_done(enic->vdev, done, error);
1229 spin_unlock(&enic->devcmd_lock);
1230
1231 return err;
1232}
1233
1234static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) 1098static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1235{ 1099{
1236 struct enic *enic = netdev_priv(netdev); 1100 struct enic *enic = netdev_priv(netdev);
@@ -1262,6 +1126,8 @@ static int enic_set_port_profile(struct enic *enic, u8 *mac)
1262 if (err) 1126 if (err)
1263 return err; 1127 return err;
1264 1128
1129 enic_reset_addr_lists(enic);
1130
1265 switch (enic->pp.request) { 1131 switch (enic->pp.request) {
1266 1132
1267 case PORT_REQUEST_ASSOCIATE: 1133 case PORT_REQUEST_ASSOCIATE:
@@ -1318,18 +1184,20 @@ static int enic_set_port_profile(struct enic *enic, u8 *mac)
1318 vic_provinfo_free(vp); 1184 vic_provinfo_free(vp);
1319 if (err) 1185 if (err)
1320 return err; 1186 return err;
1321
1322 enic->pp.set |= ENIC_SET_APPLIED;
1323 break; 1187 break;
1324 1188
1325 case PORT_REQUEST_DISASSOCIATE: 1189 case PORT_REQUEST_DISASSOCIATE:
1326 enic->pp.set &= ~ENIC_SET_APPLIED;
1327 break; 1190 break;
1328 1191
1329 default: 1192 default:
1330 return -EINVAL; 1193 return -EINVAL;
1331 } 1194 }
1332 1195
1196 /* Set flag to indicate that the port assoc/disassoc
1197 * request has been sent out to fw
1198 */
1199 enic->pp.set |= ENIC_PORT_REQUEST_APPLIED;
1200
1333 return 0; 1201 return 0;
1334} 1202}
1335 1203
@@ -1379,9 +1247,6 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
1379 1247
1380 if (is_zero_ether_addr(netdev->dev_addr)) 1248 if (is_zero_ether_addr(netdev->dev_addr))
1381 random_ether_addr(netdev->dev_addr); 1249 random_ether_addr(netdev->dev_addr);
1382 } else if (new_pp.request == PORT_REQUEST_DISASSOCIATE) {
1383 if (!is_zero_ether_addr(enic->pp.mac_addr))
1384 enic_dev_del_addr(enic, enic->pp.mac_addr);
1385 } 1250 }
1386 1251
1387 memcpy(&enic->pp, &new_pp, sizeof(struct enic_port_profile)); 1252 memcpy(&enic->pp, &new_pp, sizeof(struct enic_port_profile));
@@ -1390,9 +1255,6 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
1390 if (err) 1255 if (err)
1391 goto set_port_profile_cleanup; 1256 goto set_port_profile_cleanup;
1392 1257
1393 if (!is_zero_ether_addr(enic->pp.mac_addr))
1394 enic_dev_add_addr(enic, enic->pp.mac_addr);
1395
1396set_port_profile_cleanup: 1258set_port_profile_cleanup:
1397 memset(enic->pp.vf_mac, 0, ETH_ALEN); 1259 memset(enic->pp.vf_mac, 0, ETH_ALEN);
1398 1260
@@ -1411,7 +1273,7 @@ static int enic_get_vf_port(struct net_device *netdev, int vf,
1411 int err, error, done; 1273 int err, error, done;
1412 u16 response = PORT_PROFILE_RESPONSE_SUCCESS; 1274 u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
1413 1275
1414 if (!(enic->pp.set & ENIC_SET_APPLIED)) 1276 if (!(enic->pp.set & ENIC_PORT_REQUEST_APPLIED))
1415 return -ENODATA; 1277 return -ENODATA;
1416 1278
1417 err = enic_dev_init_done(enic, &done, &error); 1279 err = enic_dev_init_done(enic, &done, &error);
@@ -1489,62 +1351,6 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
1489 return 0; 1351 return 0;
1490} 1352}
1491 1353
1492static int enic_rq_alloc_buf_a1(struct vnic_rq *rq)
1493{
1494 struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
1495
1496 if (vnic_rq_posting_soon(rq)) {
1497
1498 /* SW workaround for A0 HW erratum: if we're just about
1499 * to write posted_index, insert a dummy desc
1500 * of type resvd
1501 */
1502
1503 rq_enet_desc_enc(desc, 0, RQ_ENET_TYPE_RESV2, 0);
1504 vnic_rq_post(rq, 0, 0, 0, 0);
1505 } else {
1506 return enic_rq_alloc_buf(rq);
1507 }
1508
1509 return 0;
1510}
1511
1512static int enic_dev_hw_version(struct enic *enic,
1513 enum vnic_dev_hw_version *hw_ver)
1514{
1515 int err;
1516
1517 spin_lock(&enic->devcmd_lock);
1518 err = vnic_dev_hw_version(enic->vdev, hw_ver);
1519 spin_unlock(&enic->devcmd_lock);
1520
1521 return err;
1522}
1523
1524static int enic_set_rq_alloc_buf(struct enic *enic)
1525{
1526 enum vnic_dev_hw_version hw_ver;
1527 int err;
1528
1529 err = enic_dev_hw_version(enic, &hw_ver);
1530 if (err)
1531 return err;
1532
1533 switch (hw_ver) {
1534 case VNIC_DEV_HW_VER_A1:
1535 enic->rq_alloc_buf = enic_rq_alloc_buf_a1;
1536 break;
1537 case VNIC_DEV_HW_VER_A2:
1538 case VNIC_DEV_HW_VER_UNKNOWN:
1539 enic->rq_alloc_buf = enic_rq_alloc_buf;
1540 break;
1541 default:
1542 return -ENODEV;
1543 }
1544
1545 return 0;
1546}
1547
1548static void enic_rq_indicate_buf(struct vnic_rq *rq, 1354static void enic_rq_indicate_buf(struct vnic_rq *rq,
1549 struct cq_desc *cq_desc, struct vnic_rq_buf *buf, 1355 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
1550 int skipped, void *opaque) 1356 int skipped, void *opaque)
@@ -1681,7 +1487,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
1681 0 /* don't unmask intr */, 1487 0 /* don't unmask intr */,
1682 0 /* don't reset intr timer */); 1488 0 /* don't reset intr timer */);
1683 1489
1684 err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf); 1490 err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1685 1491
1686 /* Buffer allocation failed. Stay in polling 1492 /* Buffer allocation failed. Stay in polling
1687 * mode so we can try to fill the ring again. 1493 * mode so we can try to fill the ring again.
@@ -1731,7 +1537,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
1731 0 /* don't unmask intr */, 1537 0 /* don't unmask intr */,
1732 0 /* don't reset intr timer */); 1538 0 /* don't reset intr timer */);
1733 1539
1734 err = vnic_rq_fill(&enic->rq[rq], enic->rq_alloc_buf); 1540 err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
1735 1541
1736 /* Buffer allocation failed. Stay in polling mode 1542 /* Buffer allocation failed. Stay in polling mode
1737 * so we can try to fill the ring again. 1543 * so we can try to fill the ring again.
@@ -1901,39 +1707,6 @@ static int enic_dev_notify_set(struct enic *enic)
1901 return err; 1707 return err;
1902} 1708}
1903 1709
1904static int enic_dev_notify_unset(struct enic *enic)
1905{
1906 int err;
1907
1908 spin_lock(&enic->devcmd_lock);
1909 err = vnic_dev_notify_unset(enic->vdev);
1910 spin_unlock(&enic->devcmd_lock);
1911
1912 return err;
1913}
1914
1915static int enic_dev_enable(struct enic *enic)
1916{
1917 int err;
1918
1919 spin_lock(&enic->devcmd_lock);
1920 err = vnic_dev_enable_wait(enic->vdev);
1921 spin_unlock(&enic->devcmd_lock);
1922
1923 return err;
1924}
1925
1926static int enic_dev_disable(struct enic *enic)
1927{
1928 int err;
1929
1930 spin_lock(&enic->devcmd_lock);
1931 err = vnic_dev_disable(enic->vdev);
1932 spin_unlock(&enic->devcmd_lock);
1933
1934 return err;
1935}
1936
1937static void enic_notify_timer_start(struct enic *enic) 1710static void enic_notify_timer_start(struct enic *enic)
1938{ 1711{
1939 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1712 switch (vnic_dev_get_intr_mode(enic->vdev)) {
@@ -1967,7 +1740,7 @@ static int enic_open(struct net_device *netdev)
1967 } 1740 }
1968 1741
1969 for (i = 0; i < enic->rq_count; i++) { 1742 for (i = 0; i < enic->rq_count; i++) {
1970 vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf); 1743 vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
1971 /* Need at least one buffer on ring to get going */ 1744 /* Need at least one buffer on ring to get going */
1972 if (vnic_rq_desc_used(&enic->rq[i]) == 0) { 1745 if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
1973 netdev_err(netdev, "Unable to alloc receive buffers\n"); 1746 netdev_err(netdev, "Unable to alloc receive buffers\n");
@@ -2285,29 +2058,6 @@ static int enic_set_rss_nic_cfg(struct enic *enic)
2285 rss_hash_bits, rss_base_cpu, rss_enable); 2058 rss_hash_bits, rss_base_cpu, rss_enable);
2286} 2059}
2287 2060
2288static int enic_dev_hang_notify(struct enic *enic)
2289{
2290 int err;
2291
2292 spin_lock(&enic->devcmd_lock);
2293 err = vnic_dev_hang_notify(enic->vdev);
2294 spin_unlock(&enic->devcmd_lock);
2295
2296 return err;
2297}
2298
2299static int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
2300{
2301 int err;
2302
2303 spin_lock(&enic->devcmd_lock);
2304 err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
2305 IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
2306 spin_unlock(&enic->devcmd_lock);
2307
2308 return err;
2309}
2310
2311static void enic_reset(struct work_struct *work) 2061static void enic_reset(struct work_struct *work)
2312{ 2062{
2313 struct enic *enic = container_of(work, struct enic, reset); 2063 struct enic *enic = container_of(work, struct enic, reset);
@@ -2320,7 +2070,7 @@ static void enic_reset(struct work_struct *work)
2320 enic_dev_hang_notify(enic); 2070 enic_dev_hang_notify(enic);
2321 enic_stop(enic->netdev); 2071 enic_stop(enic->netdev);
2322 enic_dev_hang_reset(enic); 2072 enic_dev_hang_reset(enic);
2323 enic_reset_multicast_list(enic); 2073 enic_reset_addr_lists(enic);
2324 enic_init_vnic_resources(enic); 2074 enic_init_vnic_resources(enic);
2325 enic_set_rss_nic_cfg(enic); 2075 enic_set_rss_nic_cfg(enic);
2326 enic_dev_set_ig_vlan_rewrite_mode(enic); 2076 enic_dev_set_ig_vlan_rewrite_mode(enic);
@@ -2332,7 +2082,7 @@ static void enic_reset(struct work_struct *work)
2332static int enic_set_intr_mode(struct enic *enic) 2082static int enic_set_intr_mode(struct enic *enic)
2333{ 2083{
2334 unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX); 2084 unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
2335 unsigned int m = 1; 2085 unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
2336 unsigned int i; 2086 unsigned int i;
2337 2087
2338 /* Set interrupt mode (INTx, MSI, MSI-X) depending 2088 /* Set interrupt mode (INTx, MSI, MSI-X) depending
@@ -2475,9 +2225,7 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
2475 .ndo_tx_timeout = enic_tx_timeout, 2225 .ndo_tx_timeout = enic_tx_timeout,
2476 .ndo_set_vf_port = enic_set_vf_port, 2226 .ndo_set_vf_port = enic_set_vf_port,
2477 .ndo_get_vf_port = enic_get_vf_port, 2227 .ndo_get_vf_port = enic_get_vf_port,
2478#ifdef IFLA_VF_MAX
2479 .ndo_set_vf_mac = enic_set_vf_mac, 2228 .ndo_set_vf_mac = enic_set_vf_mac,
2480#endif
2481#ifdef CONFIG_NET_POLL_CONTROLLER 2229#ifdef CONFIG_NET_POLL_CONTROLLER
2482 .ndo_poll_controller = enic_poll_controller, 2230 .ndo_poll_controller = enic_poll_controller,
2483#endif 2231#endif
@@ -2556,25 +2304,12 @@ static int enic_dev_init(struct enic *enic)
2556 2304
2557 enic_init_vnic_resources(enic); 2305 enic_init_vnic_resources(enic);
2558 2306
2559 err = enic_set_rq_alloc_buf(enic);
2560 if (err) {
2561 dev_err(dev, "Failed to set RQ buffer allocator, aborting\n");
2562 goto err_out_free_vnic_resources;
2563 }
2564
2565 err = enic_set_rss_nic_cfg(enic); 2307 err = enic_set_rss_nic_cfg(enic);
2566 if (err) { 2308 if (err) {
2567 dev_err(dev, "Failed to config nic, aborting\n"); 2309 dev_err(dev, "Failed to config nic, aborting\n");
2568 goto err_out_free_vnic_resources; 2310 goto err_out_free_vnic_resources;
2569 } 2311 }
2570 2312
2571 err = enic_dev_set_ig_vlan_rewrite_mode(enic);
2572 if (err) {
2573 dev_err(dev,
2574 "Failed to set ingress vlan rewrite mode, aborting.\n");
2575 goto err_out_free_vnic_resources;
2576 }
2577
2578 switch (vnic_dev_get_intr_mode(enic->vdev)) { 2313 switch (vnic_dev_get_intr_mode(enic->vdev)) {
2579 default: 2314 default:
2580 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64); 2315 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
@@ -2713,6 +2448,22 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2713 goto err_out_vnic_unregister; 2448 goto err_out_vnic_unregister;
2714 } 2449 }
2715 2450
2451 /* Setup devcmd lock
2452 */
2453
2454 spin_lock_init(&enic->devcmd_lock);
2455
2456 /*
2457 * Set ingress vlan rewrite mode before vnic initialization
2458 */
2459
2460 err = enic_dev_set_ig_vlan_rewrite_mode(enic);
2461 if (err) {
2462 dev_err(dev,
2463 "Failed to set ingress vlan rewrite mode, aborting.\n");
2464 goto err_out_dev_close;
2465 }
2466
2716 /* Issue device init to initialize the vnic-to-switch link. 2467 /* Issue device init to initialize the vnic-to-switch link.
2717 * We'll start with carrier off and wait for link UP 2468 * We'll start with carrier off and wait for link UP
2718 * notification later to turn on carrier. We don't need 2469 * notification later to turn on carrier. We don't need
@@ -2736,11 +2487,6 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2736 } 2487 }
2737 } 2488 }
2738 2489
2739 /* Setup devcmd lock
2740 */
2741
2742 spin_lock_init(&enic->devcmd_lock);
2743
2744 err = enic_dev_init(enic); 2490 err = enic_dev_init(enic);
2745 if (err) { 2491 if (err) {
2746 dev_err(dev, "Device initialization failed, aborting\n"); 2492 dev_err(dev, "Device initialization failed, aborting\n");
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index fb35d8b17668..c089b362a36f 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -408,10 +408,17 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
408 if (!vdev->fw_info) 408 if (!vdev->fw_info)
409 return -ENOMEM; 409 return -ENOMEM;
410 410
411 memset(vdev->fw_info, 0, sizeof(struct vnic_devcmd_fw_info));
412
411 a0 = vdev->fw_info_pa; 413 a0 = vdev->fw_info_pa;
414 a1 = sizeof(struct vnic_devcmd_fw_info);
412 415
413 /* only get fw_info once and cache it */ 416 /* only get fw_info once and cache it */
414 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait); 417 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
418 if (err == ERR_ECMDUNKNOWN) {
419 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO_OLD,
420 &a0, &a1, wait);
421 }
415 } 422 }
416 423
417 *fw_info = vdev->fw_info; 424 *fw_info = vdev->fw_info;
@@ -419,25 +426,6 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
419 return err; 426 return err;
420} 427}
421 428
422int vnic_dev_hw_version(struct vnic_dev *vdev, enum vnic_dev_hw_version *hw_ver)
423{
424 struct vnic_devcmd_fw_info *fw_info;
425 int err;
426
427 err = vnic_dev_fw_info(vdev, &fw_info);
428 if (err)
429 return err;
430
431 if (strncmp(fw_info->hw_version, "A1", sizeof("A1")) == 0)
432 *hw_ver = VNIC_DEV_HW_VER_A1;
433 else if (strncmp(fw_info->hw_version, "A2", sizeof("A2")) == 0)
434 *hw_ver = VNIC_DEV_HW_VER_A2;
435 else
436 *hw_ver = VNIC_DEV_HW_VER_UNKNOWN;
437
438 return 0;
439}
440
441int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size, 429int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
442 void *value) 430 void *value)
443{ 431{
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h
index 05f9a24cd459..e837546213a8 100644
--- a/drivers/net/enic/vnic_dev.h
+++ b/drivers/net/enic/vnic_dev.h
@@ -44,12 +44,6 @@ static inline void writeq(u64 val, void __iomem *reg)
44#undef pr_fmt 44#undef pr_fmt
45#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 45#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46 46
47enum vnic_dev_hw_version {
48 VNIC_DEV_HW_VER_UNKNOWN,
49 VNIC_DEV_HW_VER_A1,
50 VNIC_DEV_HW_VER_A2,
51};
52
53enum vnic_dev_intr_mode { 47enum vnic_dev_intr_mode {
54 VNIC_DEV_INTR_MODE_UNKNOWN, 48 VNIC_DEV_INTR_MODE_UNKNOWN,
55 VNIC_DEV_INTR_MODE_INTX, 49 VNIC_DEV_INTR_MODE_INTX,
@@ -93,8 +87,6 @@ int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
93 u64 *a0, u64 *a1, int wait); 87 u64 *a0, u64 *a1, int wait);
94int vnic_dev_fw_info(struct vnic_dev *vdev, 88int vnic_dev_fw_info(struct vnic_dev *vdev,
95 struct vnic_devcmd_fw_info **fw_info); 89 struct vnic_devcmd_fw_info **fw_info);
96int vnic_dev_hw_version(struct vnic_dev *vdev,
97 enum vnic_dev_hw_version *hw_ver);
98int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size, 90int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
99 void *value); 91 void *value);
100int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats); 92int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
diff --git a/drivers/net/enic/vnic_devcmd.h b/drivers/net/enic/vnic_devcmd.h
index 9abb3d51dea1..d833a071bac5 100644
--- a/drivers/net/enic/vnic_devcmd.h
+++ b/drivers/net/enic/vnic_devcmd.h
@@ -80,8 +80,34 @@
80enum vnic_devcmd_cmd { 80enum vnic_devcmd_cmd {
81 CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0), 81 CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
82 82
83 /* mcpu fw info in mem: (u64)a0=paddr to struct vnic_devcmd_fw_info */ 83 /*
84 CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1), 84 * mcpu fw info in mem:
85 * in:
86 * (u64)a0=paddr to struct vnic_devcmd_fw_info
87 * action:
88 * Fills in struct vnic_devcmd_fw_info (128 bytes)
89 * note:
90 * An old definition of CMD_MCPU_FW_INFO
91 */
92 CMD_MCPU_FW_INFO_OLD = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
93
94 /*
95 * mcpu fw info in mem:
96 * in:
97 * (u64)a0=paddr to struct vnic_devcmd_fw_info
98 * (u16)a1=size of the structure
99 * out:
100 * (u16)a1=0 for in:a1 = 0,
101 * data size actually written for other values.
102 * action:
103 * Fills in first 128 bytes of vnic_devcmd_fw_info for in:a1 = 0,
104 * first in:a1 bytes for 0 < in:a1 <= 132,
105 * 132 bytes for other values of in:a1.
106 * note:
107 * CMD_MCPU_FW_INFO and CMD_MCPU_FW_INFO_OLD have the same enum 1
108 * for source compatibility.
109 */
110 CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 1),
85 111
86 /* dev-specific block member: 112 /* dev-specific block member:
87 * in: (u16)a0=offset,(u8)a1=size 113 * in: (u16)a0=offset,(u8)a1=size
@@ -291,11 +317,19 @@ enum vnic_devcmd_error {
291 ERR_EMAXRES = 10, 317 ERR_EMAXRES = 10,
292}; 318};
293 319
320/*
321 * note: hw_version and asic_rev refer to the same thing,
322 * but have different formats. hw_version is
323 * a 32-byte string (e.g. "A2") and asic_rev is
324 * a 16-bit integer (e.g. 0xA2).
325 */
294struct vnic_devcmd_fw_info { 326struct vnic_devcmd_fw_info {
295 char fw_version[32]; 327 char fw_version[32];
296 char fw_build[32]; 328 char fw_build[32];
297 char hw_version[32]; 329 char hw_version[32];
298 char hw_serial_number[32]; 330 char hw_serial_number[32];
331 u16 asic_type;
332 u16 asic_rev;
299}; 333};
300 334
301struct vnic_devcmd_notify { 335struct vnic_devcmd_notify {
diff --git a/drivers/net/enic/vnic_rq.h b/drivers/net/enic/vnic_rq.h
index 37f08de2454a..2056586f4d4b 100644
--- a/drivers/net/enic/vnic_rq.h
+++ b/drivers/net/enic/vnic_rq.h
@@ -141,11 +141,6 @@ static inline void vnic_rq_post(struct vnic_rq *rq,
141 } 141 }
142} 142}
143 143
144static inline int vnic_rq_posting_soon(struct vnic_rq *rq)
145{
146 return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0;
147}
148
149static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count) 144static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
150{ 145{
151 rq->ring.desc_avail += count; 146 rq->ring.desc_avail += count;
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index 0cb1cf9cf4b0..a59cf961a436 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -111,6 +111,8 @@
111 * Sorry, I had to rewrite most of this for 2.5.x -DaveM 111 * Sorry, I had to rewrite most of this for 2.5.x -DaveM
112 */ 112 */
113 113
114#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
115
114#include <linux/capability.h> 116#include <linux/capability.h>
115#include <linux/module.h> 117#include <linux/module.h>
116#include <linux/kernel.h> 118#include <linux/kernel.h>
@@ -162,7 +164,7 @@ static void eql_timer(unsigned long param)
162} 164}
163 165
164static const char version[] __initconst = 166static const char version[] __initconst =
165 "Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)\n"; 167 "Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)";
166 168
167static const struct net_device_ops eql_netdev_ops = { 169static const struct net_device_ops eql_netdev_ops = {
168 .ndo_open = eql_open, 170 .ndo_open = eql_open,
@@ -204,8 +206,8 @@ static int eql_open(struct net_device *dev)
204 equalizer_t *eql = netdev_priv(dev); 206 equalizer_t *eql = netdev_priv(dev);
205 207
206 /* XXX We should force this off automatically for the user. */ 208 /* XXX We should force this off automatically for the user. */
207 printk(KERN_INFO "%s: remember to turn off Van-Jacobson compression on " 209 netdev_info(dev,
208 "your slave devices.\n", dev->name); 210 "remember to turn off Van-Jacobson compression on your slave devices\n");
209 211
210 BUG_ON(!list_empty(&eql->queue.all_slaves)); 212 BUG_ON(!list_empty(&eql->queue.all_slaves));
211 213
@@ -591,7 +593,7 @@ static int __init eql_init_module(void)
591{ 593{
592 int err; 594 int err;
593 595
594 printk(version); 596 pr_info("%s\n", version);
595 597
596 dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", eql_setup); 598 dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", eql_setup);
597 if (!dev_eql) 599 if (!dev_eql)
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index cd0282d5d40f..885d8baff7d5 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -54,7 +54,7 @@
54 54
55#include "fec.h" 55#include "fec.h"
56 56
57#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) 57#if defined(CONFIG_ARM)
58#define FEC_ALIGNMENT 0xf 58#define FEC_ALIGNMENT 0xf
59#else 59#else
60#define FEC_ALIGNMENT 0x3 60#define FEC_ALIGNMENT 0x3
@@ -148,8 +148,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
148 * account when setting it. 148 * account when setting it.
149 */ 149 */
150#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 150#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
151 defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ 151 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
152 defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
153#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) 152#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
154#else 153#else
155#define OPT_FRAME_SIZE 0 154#define OPT_FRAME_SIZE 0
@@ -184,7 +183,7 @@ struct fec_enet_private {
184 struct bufdesc *rx_bd_base; 183 struct bufdesc *rx_bd_base;
185 struct bufdesc *tx_bd_base; 184 struct bufdesc *tx_bd_base;
186 /* The next free ring entry */ 185 /* The next free ring entry */
187 struct bufdesc *cur_rx, *cur_tx; 186 struct bufdesc *cur_rx, *cur_tx;
188 /* The ring entries to be free()ed */ 187 /* The ring entries to be free()ed */
189 struct bufdesc *dirty_tx; 188 struct bufdesc *dirty_tx;
190 189
@@ -192,28 +191,21 @@ struct fec_enet_private {
192 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ 191 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
193 spinlock_t hw_lock; 192 spinlock_t hw_lock;
194 193
195 struct platform_device *pdev; 194 struct platform_device *pdev;
196 195
197 int opened; 196 int opened;
198 197
199 /* Phylib and MDIO interface */ 198 /* Phylib and MDIO interface */
200 struct mii_bus *mii_bus; 199 struct mii_bus *mii_bus;
201 struct phy_device *phy_dev; 200 struct phy_device *phy_dev;
202 int mii_timeout; 201 int mii_timeout;
203 uint phy_speed; 202 uint phy_speed;
204 phy_interface_t phy_interface; 203 phy_interface_t phy_interface;
205 int link; 204 int link;
206 int full_duplex; 205 int full_duplex;
207 struct completion mdio_done; 206 struct completion mdio_done;
208}; 207};
209 208
210static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
211static void fec_enet_tx(struct net_device *dev);
212static void fec_enet_rx(struct net_device *dev);
213static int fec_enet_close(struct net_device *dev);
214static void fec_restart(struct net_device *dev, int duplex);
215static void fec_stop(struct net_device *dev);
216
217/* FEC MII MMFR bits definition */ 209/* FEC MII MMFR bits definition */
218#define FEC_MMFR_ST (1 << 30) 210#define FEC_MMFR_ST (1 << 30)
219#define FEC_MMFR_OP_READ (2 << 28) 211#define FEC_MMFR_OP_READ (2 << 28)
@@ -240,9 +232,9 @@ static void *swap_buffer(void *bufaddr, int len)
240} 232}
241 233
242static netdev_tx_t 234static netdev_tx_t
243fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 235fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
244{ 236{
245 struct fec_enet_private *fep = netdev_priv(dev); 237 struct fec_enet_private *fep = netdev_priv(ndev);
246 const struct platform_device_id *id_entry = 238 const struct platform_device_id *id_entry =
247 platform_get_device_id(fep->pdev); 239 platform_get_device_id(fep->pdev);
248 struct bufdesc *bdp; 240 struct bufdesc *bdp;
@@ -263,9 +255,9 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
263 255
264 if (status & BD_ENET_TX_READY) { 256 if (status & BD_ENET_TX_READY) {
265 /* Ooops. All transmit buffers are full. Bail out. 257 /* Ooops. All transmit buffers are full. Bail out.
266 * This should not happen, since dev->tbusy should be set. 258 * This should not happen, since ndev->tbusy should be set.
267 */ 259 */
268 printk("%s: tx queue full!.\n", dev->name); 260 printk("%s: tx queue full!.\n", ndev->name);
269 spin_unlock_irqrestore(&fep->hw_lock, flags); 261 spin_unlock_irqrestore(&fep->hw_lock, flags);
270 return NETDEV_TX_BUSY; 262 return NETDEV_TX_BUSY;
271 } 263 }
@@ -285,7 +277,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
285 if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { 277 if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
286 unsigned int index; 278 unsigned int index;
287 index = bdp - fep->tx_bd_base; 279 index = bdp - fep->tx_bd_base;
288 memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len); 280 memcpy(fep->tx_bounce[index], skb->data, skb->len);
289 bufaddr = fep->tx_bounce[index]; 281 bufaddr = fep->tx_bounce[index];
290 } 282 }
291 283
@@ -300,13 +292,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
300 /* Save skb pointer */ 292 /* Save skb pointer */
301 fep->tx_skbuff[fep->skb_cur] = skb; 293 fep->tx_skbuff[fep->skb_cur] = skb;
302 294
303 dev->stats.tx_bytes += skb->len; 295 ndev->stats.tx_bytes += skb->len;
304 fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; 296 fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
305 297
306 /* Push the data cache so the CPM does not get stale memory 298 /* Push the data cache so the CPM does not get stale memory
307 * data. 299 * data.
308 */ 300 */
309 bdp->cbd_bufaddr = dma_map_single(&dev->dev, bufaddr, 301 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
310 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); 302 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
311 303
312 /* Send it on its way. Tell FEC it's ready, interrupt when done, 304 /* Send it on its way. Tell FEC it's ready, interrupt when done,
@@ -327,7 +319,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
327 319
328 if (bdp == fep->dirty_tx) { 320 if (bdp == fep->dirty_tx) {
329 fep->tx_full = 1; 321 fep->tx_full = 1;
330 netif_stop_queue(dev); 322 netif_stop_queue(ndev);
331 } 323 }
332 324
333 fep->cur_tx = bdp; 325 fep->cur_tx = bdp;
@@ -337,62 +329,170 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
337 return NETDEV_TX_OK; 329 return NETDEV_TX_OK;
338} 330}
339 331
332/* This function is called to start or restart the FEC during a link
333 * change. This only happens when switching between half and full
334 * duplex.
335 */
340static void 336static void
341fec_timeout(struct net_device *dev) 337fec_restart(struct net_device *ndev, int duplex)
342{ 338{
343 struct fec_enet_private *fep = netdev_priv(dev); 339 struct fec_enet_private *fep = netdev_priv(ndev);
340 const struct platform_device_id *id_entry =
341 platform_get_device_id(fep->pdev);
342 int i;
343 u32 temp_mac[2];
344 u32 rcntl = OPT_FRAME_SIZE | 0x04;
344 345
345 dev->stats.tx_errors++; 346 /* Whack a reset. We should wait for this. */
347 writel(1, fep->hwp + FEC_ECNTRL);
348 udelay(10);
346 349
347 fec_restart(dev, fep->full_duplex); 350 /*
348 netif_wake_queue(dev); 351 * enet-mac reset will reset mac address registers too,
349} 352 * so need to reconfigure it.
353 */
354 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
355 memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
356 writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
357 writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
358 }
350 359
351static irqreturn_t 360 /* Clear any outstanding interrupt. */
352fec_enet_interrupt(int irq, void * dev_id) 361 writel(0xffc00000, fep->hwp + FEC_IEVENT);
353{
354 struct net_device *dev = dev_id;
355 struct fec_enet_private *fep = netdev_priv(dev);
356 uint int_events;
357 irqreturn_t ret = IRQ_NONE;
358 362
359 do { 363 /* Reset all multicast. */
360 int_events = readl(fep->hwp + FEC_IEVENT); 364 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
361 writel(int_events, fep->hwp + FEC_IEVENT); 365 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
366#ifndef CONFIG_M5272
367 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
368 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
369#endif
362 370
363 if (int_events & FEC_ENET_RXF) { 371 /* Set maximum receive buffer size. */
364 ret = IRQ_HANDLED; 372 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
365 fec_enet_rx(dev);
366 }
367 373
368 /* Transmit OK, or non-fatal error. Update the buffer 374 /* Set receive and transmit descriptor base. */
369 * descriptors. FEC handles all errors, we just discover 375 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
370 * them as part of the transmit process. 376 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
371 */ 377 fep->hwp + FEC_X_DES_START);
372 if (int_events & FEC_ENET_TXF) { 378
373 ret = IRQ_HANDLED; 379 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
374 fec_enet_tx(dev); 380 fep->cur_rx = fep->rx_bd_base;
381
382 /* Reset SKB transmit buffers. */
383 fep->skb_cur = fep->skb_dirty = 0;
384 for (i = 0; i <= TX_RING_MOD_MASK; i++) {
385 if (fep->tx_skbuff[i]) {
386 dev_kfree_skb_any(fep->tx_skbuff[i]);
387 fep->tx_skbuff[i] = NULL;
375 } 388 }
389 }
376 390
377 if (int_events & FEC_ENET_MII) { 391 /* Enable MII mode */
378 ret = IRQ_HANDLED; 392 if (duplex) {
379 complete(&fep->mdio_done); 393 /* FD enable */
394 writel(0x04, fep->hwp + FEC_X_CNTRL);
395 } else {
396 /* No Rcv on Xmit */
397 rcntl |= 0x02;
398 writel(0x0, fep->hwp + FEC_X_CNTRL);
399 }
400
401 fep->full_duplex = duplex;
402
403 /* Set MII speed */
404 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
405
406 /*
407 * The phy interface and speed need to get configured
408 * differently on enet-mac.
409 */
410 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
411 /* Enable flow control and length check */
412 rcntl |= 0x40000000 | 0x00000020;
413
414 /* MII or RMII */
415 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
416 rcntl |= (1 << 8);
417 else
418 rcntl &= ~(1 << 8);
419
420 /* 10M or 100M */
421 if (fep->phy_dev && fep->phy_dev->speed == SPEED_100)
422 rcntl &= ~(1 << 9);
423 else
424 rcntl |= (1 << 9);
425
426 } else {
427#ifdef FEC_MIIGSK_ENR
428 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
429 /* disable the gasket and wait */
430 writel(0, fep->hwp + FEC_MIIGSK_ENR);
431 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
432 udelay(1);
433
434 /*
435 * configure the gasket:
436 * RMII, 50 MHz, no loopback, no echo
437 */
438 writel(1, fep->hwp + FEC_MIIGSK_CFGR);
439
440 /* re-enable the gasket */
441 writel(2, fep->hwp + FEC_MIIGSK_ENR);
380 } 442 }
381 } while (int_events); 443#endif
444 }
445 writel(rcntl, fep->hwp + FEC_R_CNTRL);
382 446
383 return ret; 447 /* And last, enable the transmit and receive processing */
448 writel(2, fep->hwp + FEC_ECNTRL);
449 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
450
451 /* Enable interrupts we wish to service */
452 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
453}
454
455static void
456fec_stop(struct net_device *ndev)
457{
458 struct fec_enet_private *fep = netdev_priv(ndev);
459
460 /* We cannot expect a graceful transmit stop without link !!! */
461 if (fep->link) {
462 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
463 udelay(10);
464 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
465 printk("fec_stop : Graceful transmit stop did not complete !\n");
466 }
467
468 /* Whack a reset. We should wait for this. */
469 writel(1, fep->hwp + FEC_ECNTRL);
470 udelay(10);
471 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
472 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
384} 473}
385 474
386 475
387static void 476static void
388fec_enet_tx(struct net_device *dev) 477fec_timeout(struct net_device *ndev)
478{
479 struct fec_enet_private *fep = netdev_priv(ndev);
480
481 ndev->stats.tx_errors++;
482
483 fec_restart(ndev, fep->full_duplex);
484 netif_wake_queue(ndev);
485}
486
487static void
488fec_enet_tx(struct net_device *ndev)
389{ 489{
390 struct fec_enet_private *fep; 490 struct fec_enet_private *fep;
391 struct bufdesc *bdp; 491 struct bufdesc *bdp;
392 unsigned short status; 492 unsigned short status;
393 struct sk_buff *skb; 493 struct sk_buff *skb;
394 494
395 fep = netdev_priv(dev); 495 fep = netdev_priv(ndev);
396 spin_lock(&fep->hw_lock); 496 spin_lock(&fep->hw_lock);
397 bdp = fep->dirty_tx; 497 bdp = fep->dirty_tx;
398 498
@@ -400,7 +500,8 @@ fec_enet_tx(struct net_device *dev)
400 if (bdp == fep->cur_tx && fep->tx_full == 0) 500 if (bdp == fep->cur_tx && fep->tx_full == 0)
401 break; 501 break;
402 502
403 dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); 503 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
504 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
404 bdp->cbd_bufaddr = 0; 505 bdp->cbd_bufaddr = 0;
405 506
406 skb = fep->tx_skbuff[fep->skb_dirty]; 507 skb = fep->tx_skbuff[fep->skb_dirty];
@@ -408,19 +509,19 @@ fec_enet_tx(struct net_device *dev)
408 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 509 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
409 BD_ENET_TX_RL | BD_ENET_TX_UN | 510 BD_ENET_TX_RL | BD_ENET_TX_UN |
410 BD_ENET_TX_CSL)) { 511 BD_ENET_TX_CSL)) {
411 dev->stats.tx_errors++; 512 ndev->stats.tx_errors++;
412 if (status & BD_ENET_TX_HB) /* No heartbeat */ 513 if (status & BD_ENET_TX_HB) /* No heartbeat */
413 dev->stats.tx_heartbeat_errors++; 514 ndev->stats.tx_heartbeat_errors++;
414 if (status & BD_ENET_TX_LC) /* Late collision */ 515 if (status & BD_ENET_TX_LC) /* Late collision */
415 dev->stats.tx_window_errors++; 516 ndev->stats.tx_window_errors++;
416 if (status & BD_ENET_TX_RL) /* Retrans limit */ 517 if (status & BD_ENET_TX_RL) /* Retrans limit */
417 dev->stats.tx_aborted_errors++; 518 ndev->stats.tx_aborted_errors++;
418 if (status & BD_ENET_TX_UN) /* Underrun */ 519 if (status & BD_ENET_TX_UN) /* Underrun */
419 dev->stats.tx_fifo_errors++; 520 ndev->stats.tx_fifo_errors++;
420 if (status & BD_ENET_TX_CSL) /* Carrier lost */ 521 if (status & BD_ENET_TX_CSL) /* Carrier lost */
421 dev->stats.tx_carrier_errors++; 522 ndev->stats.tx_carrier_errors++;
422 } else { 523 } else {
423 dev->stats.tx_packets++; 524 ndev->stats.tx_packets++;
424 } 525 }
425 526
426 if (status & BD_ENET_TX_READY) 527 if (status & BD_ENET_TX_READY)
@@ -430,7 +531,7 @@ fec_enet_tx(struct net_device *dev)
430 * but we eventually sent the packet OK. 531 * but we eventually sent the packet OK.
431 */ 532 */
432 if (status & BD_ENET_TX_DEF) 533 if (status & BD_ENET_TX_DEF)
433 dev->stats.collisions++; 534 ndev->stats.collisions++;
434 535
435 /* Free the sk buffer associated with this last transmit */ 536 /* Free the sk buffer associated with this last transmit */
436 dev_kfree_skb_any(skb); 537 dev_kfree_skb_any(skb);
@@ -447,8 +548,8 @@ fec_enet_tx(struct net_device *dev)
447 */ 548 */
448 if (fep->tx_full) { 549 if (fep->tx_full) {
449 fep->tx_full = 0; 550 fep->tx_full = 0;
450 if (netif_queue_stopped(dev)) 551 if (netif_queue_stopped(ndev))
451 netif_wake_queue(dev); 552 netif_wake_queue(ndev);
452 } 553 }
453 } 554 }
454 fep->dirty_tx = bdp; 555 fep->dirty_tx = bdp;
@@ -462,9 +563,9 @@ fec_enet_tx(struct net_device *dev)
462 * effectively tossing the packet. 563 * effectively tossing the packet.
463 */ 564 */
464static void 565static void
465fec_enet_rx(struct net_device *dev) 566fec_enet_rx(struct net_device *ndev)
466{ 567{
467 struct fec_enet_private *fep = netdev_priv(dev); 568 struct fec_enet_private *fep = netdev_priv(ndev);
468 const struct platform_device_id *id_entry = 569 const struct platform_device_id *id_entry =
469 platform_get_device_id(fep->pdev); 570 platform_get_device_id(fep->pdev);
470 struct bufdesc *bdp; 571 struct bufdesc *bdp;
@@ -498,17 +599,17 @@ fec_enet_rx(struct net_device *dev)
498 /* Check for errors. */ 599 /* Check for errors. */
499 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | 600 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
500 BD_ENET_RX_CR | BD_ENET_RX_OV)) { 601 BD_ENET_RX_CR | BD_ENET_RX_OV)) {
501 dev->stats.rx_errors++; 602 ndev->stats.rx_errors++;
502 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { 603 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
503 /* Frame too long or too short. */ 604 /* Frame too long or too short. */
504 dev->stats.rx_length_errors++; 605 ndev->stats.rx_length_errors++;
505 } 606 }
506 if (status & BD_ENET_RX_NO) /* Frame alignment */ 607 if (status & BD_ENET_RX_NO) /* Frame alignment */
507 dev->stats.rx_frame_errors++; 608 ndev->stats.rx_frame_errors++;
508 if (status & BD_ENET_RX_CR) /* CRC Error */ 609 if (status & BD_ENET_RX_CR) /* CRC Error */
509 dev->stats.rx_crc_errors++; 610 ndev->stats.rx_crc_errors++;
510 if (status & BD_ENET_RX_OV) /* FIFO overrun */ 611 if (status & BD_ENET_RX_OV) /* FIFO overrun */
511 dev->stats.rx_fifo_errors++; 612 ndev->stats.rx_fifo_errors++;
512 } 613 }
513 614
514 /* Report late collisions as a frame error. 615 /* Report late collisions as a frame error.
@@ -516,19 +617,19 @@ fec_enet_rx(struct net_device *dev)
516 * have in the buffer. So, just drop this frame on the floor. 617 * have in the buffer. So, just drop this frame on the floor.
517 */ 618 */
518 if (status & BD_ENET_RX_CL) { 619 if (status & BD_ENET_RX_CL) {
519 dev->stats.rx_errors++; 620 ndev->stats.rx_errors++;
520 dev->stats.rx_frame_errors++; 621 ndev->stats.rx_frame_errors++;
521 goto rx_processing_done; 622 goto rx_processing_done;
522 } 623 }
523 624
524 /* Process the incoming frame. */ 625 /* Process the incoming frame. */
525 dev->stats.rx_packets++; 626 ndev->stats.rx_packets++;
526 pkt_len = bdp->cbd_datlen; 627 pkt_len = bdp->cbd_datlen;
527 dev->stats.rx_bytes += pkt_len; 628 ndev->stats.rx_bytes += pkt_len;
528 data = (__u8*)__va(bdp->cbd_bufaddr); 629 data = (__u8*)__va(bdp->cbd_bufaddr);
529 630
530 dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen, 631 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
531 DMA_FROM_DEVICE); 632 FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
532 633
533 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) 634 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
534 swap_buffer(data, pkt_len); 635 swap_buffer(data, pkt_len);
@@ -542,18 +643,18 @@ fec_enet_rx(struct net_device *dev)
542 643
543 if (unlikely(!skb)) { 644 if (unlikely(!skb)) {
544 printk("%s: Memory squeeze, dropping packet.\n", 645 printk("%s: Memory squeeze, dropping packet.\n",
545 dev->name); 646 ndev->name);
546 dev->stats.rx_dropped++; 647 ndev->stats.rx_dropped++;
547 } else { 648 } else {
548 skb_reserve(skb, NET_IP_ALIGN); 649 skb_reserve(skb, NET_IP_ALIGN);
549 skb_put(skb, pkt_len - 4); /* Make room */ 650 skb_put(skb, pkt_len - 4); /* Make room */
550 skb_copy_to_linear_data(skb, data, pkt_len - 4); 651 skb_copy_to_linear_data(skb, data, pkt_len - 4);
551 skb->protocol = eth_type_trans(skb, dev); 652 skb->protocol = eth_type_trans(skb, ndev);
552 netif_rx(skb); 653 netif_rx(skb);
553 } 654 }
554 655
555 bdp->cbd_bufaddr = dma_map_single(NULL, data, bdp->cbd_datlen, 656 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
556 DMA_FROM_DEVICE); 657 FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
557rx_processing_done: 658rx_processing_done:
558 /* Clear the status flags for this buffer */ 659 /* Clear the status flags for this buffer */
559 status &= ~BD_ENET_RX_STATS; 660 status &= ~BD_ENET_RX_STATS;
@@ -578,10 +679,47 @@ rx_processing_done:
578 spin_unlock(&fep->hw_lock); 679 spin_unlock(&fep->hw_lock);
579} 680}
580 681
682static irqreturn_t
683fec_enet_interrupt(int irq, void *dev_id)
684{
685 struct net_device *ndev = dev_id;
686 struct fec_enet_private *fep = netdev_priv(ndev);
687 uint int_events;
688 irqreturn_t ret = IRQ_NONE;
689
690 do {
691 int_events = readl(fep->hwp + FEC_IEVENT);
692 writel(int_events, fep->hwp + FEC_IEVENT);
693
694 if (int_events & FEC_ENET_RXF) {
695 ret = IRQ_HANDLED;
696 fec_enet_rx(ndev);
697 }
698
699 /* Transmit OK, or non-fatal error. Update the buffer
700 * descriptors. FEC handles all errors, we just discover
701 * them as part of the transmit process.
702 */
703 if (int_events & FEC_ENET_TXF) {
704 ret = IRQ_HANDLED;
705 fec_enet_tx(ndev);
706 }
707
708 if (int_events & FEC_ENET_MII) {
709 ret = IRQ_HANDLED;
710 complete(&fep->mdio_done);
711 }
712 } while (int_events);
713
714 return ret;
715}
716
717
718
581/* ------------------------------------------------------------------------- */ 719/* ------------------------------------------------------------------------- */
582static void __inline__ fec_get_mac(struct net_device *dev) 720static void __inline__ fec_get_mac(struct net_device *ndev)
583{ 721{
584 struct fec_enet_private *fep = netdev_priv(dev); 722 struct fec_enet_private *fep = netdev_priv(ndev);
585 struct fec_platform_data *pdata = fep->pdev->dev.platform_data; 723 struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
586 unsigned char *iap, tmpaddr[ETH_ALEN]; 724 unsigned char *iap, tmpaddr[ETH_ALEN];
587 725
@@ -617,11 +755,11 @@ static void __inline__ fec_get_mac(struct net_device *dev)
617 iap = &tmpaddr[0]; 755 iap = &tmpaddr[0];
618 } 756 }
619 757
620 memcpy(dev->dev_addr, iap, ETH_ALEN); 758 memcpy(ndev->dev_addr, iap, ETH_ALEN);
621 759
622 /* Adjust MAC if using macaddr */ 760 /* Adjust MAC if using macaddr */
623 if (iap == macaddr) 761 if (iap == macaddr)
624 dev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id; 762 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
625} 763}
626 764
627/* ------------------------------------------------------------------------- */ 765/* ------------------------------------------------------------------------- */
@@ -629,9 +767,9 @@ static void __inline__ fec_get_mac(struct net_device *dev)
629/* 767/*
630 * Phy section 768 * Phy section
631 */ 769 */
632static void fec_enet_adjust_link(struct net_device *dev) 770static void fec_enet_adjust_link(struct net_device *ndev)
633{ 771{
634 struct fec_enet_private *fep = netdev_priv(dev); 772 struct fec_enet_private *fep = netdev_priv(ndev);
635 struct phy_device *phy_dev = fep->phy_dev; 773 struct phy_device *phy_dev = fep->phy_dev;
636 unsigned long flags; 774 unsigned long flags;
637 775
@@ -648,7 +786,7 @@ static void fec_enet_adjust_link(struct net_device *dev)
648 /* Duplex link change */ 786 /* Duplex link change */
649 if (phy_dev->link) { 787 if (phy_dev->link) {
650 if (fep->full_duplex != phy_dev->duplex) { 788 if (fep->full_duplex != phy_dev->duplex) {
651 fec_restart(dev, phy_dev->duplex); 789 fec_restart(ndev, phy_dev->duplex);
652 status_change = 1; 790 status_change = 1;
653 } 791 }
654 } 792 }
@@ -657,9 +795,9 @@ static void fec_enet_adjust_link(struct net_device *dev)
657 if (phy_dev->link != fep->link) { 795 if (phy_dev->link != fep->link) {
658 fep->link = phy_dev->link; 796 fep->link = phy_dev->link;
659 if (phy_dev->link) 797 if (phy_dev->link)
660 fec_restart(dev, phy_dev->duplex); 798 fec_restart(ndev, phy_dev->duplex);
661 else 799 else
662 fec_stop(dev); 800 fec_stop(ndev);
663 status_change = 1; 801 status_change = 1;
664 } 802 }
665 803
@@ -728,9 +866,9 @@ static int fec_enet_mdio_reset(struct mii_bus *bus)
728 return 0; 866 return 0;
729} 867}
730 868
731static int fec_enet_mii_probe(struct net_device *dev) 869static int fec_enet_mii_probe(struct net_device *ndev)
732{ 870{
733 struct fec_enet_private *fep = netdev_priv(dev); 871 struct fec_enet_private *fep = netdev_priv(ndev);
734 struct phy_device *phy_dev = NULL; 872 struct phy_device *phy_dev = NULL;
735 char mdio_bus_id[MII_BUS_ID_SIZE]; 873 char mdio_bus_id[MII_BUS_ID_SIZE];
736 char phy_name[MII_BUS_ID_SIZE + 3]; 874 char phy_name[MII_BUS_ID_SIZE + 3];
@@ -755,16 +893,16 @@ static int fec_enet_mii_probe(struct net_device *dev)
755 893
756 if (phy_id >= PHY_MAX_ADDR) { 894 if (phy_id >= PHY_MAX_ADDR) {
757 printk(KERN_INFO "%s: no PHY, assuming direct connection " 895 printk(KERN_INFO "%s: no PHY, assuming direct connection "
758 "to switch\n", dev->name); 896 "to switch\n", ndev->name);
759 strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); 897 strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE);
760 phy_id = 0; 898 phy_id = 0;
761 } 899 }
762 900
763 snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); 901 snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
764 phy_dev = phy_connect(dev, phy_name, &fec_enet_adjust_link, 0, 902 phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 0,
765 PHY_INTERFACE_MODE_MII); 903 PHY_INTERFACE_MODE_MII);
766 if (IS_ERR(phy_dev)) { 904 if (IS_ERR(phy_dev)) {
767 printk(KERN_ERR "%s: could not attach to PHY\n", dev->name); 905 printk(KERN_ERR "%s: could not attach to PHY\n", ndev->name);
768 return PTR_ERR(phy_dev); 906 return PTR_ERR(phy_dev);
769 } 907 }
770 908
@@ -777,7 +915,7 @@ static int fec_enet_mii_probe(struct net_device *dev)
777 fep->full_duplex = 0; 915 fep->full_duplex = 0;
778 916
779 printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] " 917 printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
780 "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name, 918 "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name,
781 fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), 919 fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
782 fep->phy_dev->irq); 920 fep->phy_dev->irq);
783 921
@@ -787,8 +925,8 @@ static int fec_enet_mii_probe(struct net_device *dev)
787static int fec_enet_mii_init(struct platform_device *pdev) 925static int fec_enet_mii_init(struct platform_device *pdev)
788{ 926{
789 static struct mii_bus *fec0_mii_bus; 927 static struct mii_bus *fec0_mii_bus;
790 struct net_device *dev = platform_get_drvdata(pdev); 928 struct net_device *ndev = platform_get_drvdata(pdev);
791 struct fec_enet_private *fep = netdev_priv(dev); 929 struct fec_enet_private *fep = netdev_priv(ndev);
792 const struct platform_device_id *id_entry = 930 const struct platform_device_id *id_entry =
793 platform_get_device_id(fep->pdev); 931 platform_get_device_id(fep->pdev);
794 int err = -ENXIO, i; 932 int err = -ENXIO, i;
@@ -846,8 +984,6 @@ static int fec_enet_mii_init(struct platform_device *pdev)
846 for (i = 0; i < PHY_MAX_ADDR; i++) 984 for (i = 0; i < PHY_MAX_ADDR; i++)
847 fep->mii_bus->irq[i] = PHY_POLL; 985 fep->mii_bus->irq[i] = PHY_POLL;
848 986
849 platform_set_drvdata(dev, fep->mii_bus);
850
851 if (mdiobus_register(fep->mii_bus)) 987 if (mdiobus_register(fep->mii_bus))
852 goto err_out_free_mdio_irq; 988 goto err_out_free_mdio_irq;
853 989
@@ -874,10 +1010,10 @@ static void fec_enet_mii_remove(struct fec_enet_private *fep)
874 mdiobus_free(fep->mii_bus); 1010 mdiobus_free(fep->mii_bus);
875} 1011}
876 1012
877static int fec_enet_get_settings(struct net_device *dev, 1013static int fec_enet_get_settings(struct net_device *ndev,
878 struct ethtool_cmd *cmd) 1014 struct ethtool_cmd *cmd)
879{ 1015{
880 struct fec_enet_private *fep = netdev_priv(dev); 1016 struct fec_enet_private *fep = netdev_priv(ndev);
881 struct phy_device *phydev = fep->phy_dev; 1017 struct phy_device *phydev = fep->phy_dev;
882 1018
883 if (!phydev) 1019 if (!phydev)
@@ -886,10 +1022,10 @@ static int fec_enet_get_settings(struct net_device *dev,
886 return phy_ethtool_gset(phydev, cmd); 1022 return phy_ethtool_gset(phydev, cmd);
887} 1023}
888 1024
889static int fec_enet_set_settings(struct net_device *dev, 1025static int fec_enet_set_settings(struct net_device *ndev,
890 struct ethtool_cmd *cmd) 1026 struct ethtool_cmd *cmd)
891{ 1027{
892 struct fec_enet_private *fep = netdev_priv(dev); 1028 struct fec_enet_private *fep = netdev_priv(ndev);
893 struct phy_device *phydev = fep->phy_dev; 1029 struct phy_device *phydev = fep->phy_dev;
894 1030
895 if (!phydev) 1031 if (!phydev)
@@ -898,14 +1034,14 @@ static int fec_enet_set_settings(struct net_device *dev,
898 return phy_ethtool_sset(phydev, cmd); 1034 return phy_ethtool_sset(phydev, cmd);
899} 1035}
900 1036
901static void fec_enet_get_drvinfo(struct net_device *dev, 1037static void fec_enet_get_drvinfo(struct net_device *ndev,
902 struct ethtool_drvinfo *info) 1038 struct ethtool_drvinfo *info)
903{ 1039{
904 struct fec_enet_private *fep = netdev_priv(dev); 1040 struct fec_enet_private *fep = netdev_priv(ndev);
905 1041
906 strcpy(info->driver, fep->pdev->dev.driver->name); 1042 strcpy(info->driver, fep->pdev->dev.driver->name);
907 strcpy(info->version, "Revision: 1.0"); 1043 strcpy(info->version, "Revision: 1.0");
908 strcpy(info->bus_info, dev_name(&dev->dev)); 1044 strcpy(info->bus_info, dev_name(&ndev->dev));
909} 1045}
910 1046
911static struct ethtool_ops fec_enet_ethtool_ops = { 1047static struct ethtool_ops fec_enet_ethtool_ops = {
@@ -915,12 +1051,12 @@ static struct ethtool_ops fec_enet_ethtool_ops = {
915 .get_link = ethtool_op_get_link, 1051 .get_link = ethtool_op_get_link,
916}; 1052};
917 1053
918static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1054static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
919{ 1055{
920 struct fec_enet_private *fep = netdev_priv(dev); 1056 struct fec_enet_private *fep = netdev_priv(ndev);
921 struct phy_device *phydev = fep->phy_dev; 1057 struct phy_device *phydev = fep->phy_dev;
922 1058
923 if (!netif_running(dev)) 1059 if (!netif_running(ndev))
924 return -EINVAL; 1060 return -EINVAL;
925 1061
926 if (!phydev) 1062 if (!phydev)
@@ -929,9 +1065,9 @@ static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
929 return phy_mii_ioctl(phydev, rq, cmd); 1065 return phy_mii_ioctl(phydev, rq, cmd);
930} 1066}
931 1067
932static void fec_enet_free_buffers(struct net_device *dev) 1068static void fec_enet_free_buffers(struct net_device *ndev)
933{ 1069{
934 struct fec_enet_private *fep = netdev_priv(dev); 1070 struct fec_enet_private *fep = netdev_priv(ndev);
935 int i; 1071 int i;
936 struct sk_buff *skb; 1072 struct sk_buff *skb;
937 struct bufdesc *bdp; 1073 struct bufdesc *bdp;
@@ -941,7 +1077,7 @@ static void fec_enet_free_buffers(struct net_device *dev)
941 skb = fep->rx_skbuff[i]; 1077 skb = fep->rx_skbuff[i];
942 1078
943 if (bdp->cbd_bufaddr) 1079 if (bdp->cbd_bufaddr)
944 dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, 1080 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
945 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 1081 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
946 if (skb) 1082 if (skb)
947 dev_kfree_skb(skb); 1083 dev_kfree_skb(skb);
@@ -953,9 +1089,9 @@ static void fec_enet_free_buffers(struct net_device *dev)
953 kfree(fep->tx_bounce[i]); 1089 kfree(fep->tx_bounce[i]);
954} 1090}
955 1091
956static int fec_enet_alloc_buffers(struct net_device *dev) 1092static int fec_enet_alloc_buffers(struct net_device *ndev)
957{ 1093{
958 struct fec_enet_private *fep = netdev_priv(dev); 1094 struct fec_enet_private *fep = netdev_priv(ndev);
959 int i; 1095 int i;
960 struct sk_buff *skb; 1096 struct sk_buff *skb;
961 struct bufdesc *bdp; 1097 struct bufdesc *bdp;
@@ -964,12 +1100,12 @@ static int fec_enet_alloc_buffers(struct net_device *dev)
964 for (i = 0; i < RX_RING_SIZE; i++) { 1100 for (i = 0; i < RX_RING_SIZE; i++) {
965 skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE); 1101 skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE);
966 if (!skb) { 1102 if (!skb) {
967 fec_enet_free_buffers(dev); 1103 fec_enet_free_buffers(ndev);
968 return -ENOMEM; 1104 return -ENOMEM;
969 } 1105 }
970 fep->rx_skbuff[i] = skb; 1106 fep->rx_skbuff[i] = skb;
971 1107
972 bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data, 1108 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
973 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 1109 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
974 bdp->cbd_sc = BD_ENET_RX_EMPTY; 1110 bdp->cbd_sc = BD_ENET_RX_EMPTY;
975 bdp++; 1111 bdp++;
@@ -996,45 +1132,47 @@ static int fec_enet_alloc_buffers(struct net_device *dev)
996} 1132}
997 1133
998static int 1134static int
999fec_enet_open(struct net_device *dev) 1135fec_enet_open(struct net_device *ndev)
1000{ 1136{
1001 struct fec_enet_private *fep = netdev_priv(dev); 1137 struct fec_enet_private *fep = netdev_priv(ndev);
1002 int ret; 1138 int ret;
1003 1139
1004 /* I should reset the ring buffers here, but I don't yet know 1140 /* I should reset the ring buffers here, but I don't yet know
1005 * a simple way to do that. 1141 * a simple way to do that.
1006 */ 1142 */
1007 1143
1008 ret = fec_enet_alloc_buffers(dev); 1144 ret = fec_enet_alloc_buffers(ndev);
1009 if (ret) 1145 if (ret)
1010 return ret; 1146 return ret;
1011 1147
1012 /* Probe and connect to PHY when open the interface */ 1148 /* Probe and connect to PHY when open the interface */
1013 ret = fec_enet_mii_probe(dev); 1149 ret = fec_enet_mii_probe(ndev);
1014 if (ret) { 1150 if (ret) {
1015 fec_enet_free_buffers(dev); 1151 fec_enet_free_buffers(ndev);
1016 return ret; 1152 return ret;
1017 } 1153 }
1018 phy_start(fep->phy_dev); 1154 phy_start(fep->phy_dev);
1019 netif_start_queue(dev); 1155 netif_start_queue(ndev);
1020 fep->opened = 1; 1156 fep->opened = 1;
1021 return 0; 1157 return 0;
1022} 1158}
1023 1159
1024static int 1160static int
1025fec_enet_close(struct net_device *dev) 1161fec_enet_close(struct net_device *ndev)
1026{ 1162{
1027 struct fec_enet_private *fep = netdev_priv(dev); 1163 struct fec_enet_private *fep = netdev_priv(ndev);
1028 1164
1029 /* Don't know what to do yet. */ 1165 /* Don't know what to do yet. */
1030 fep->opened = 0; 1166 fep->opened = 0;
1031 netif_stop_queue(dev); 1167 netif_stop_queue(ndev);
1032 fec_stop(dev); 1168 fec_stop(ndev);
1033 1169
1034 if (fep->phy_dev) 1170 if (fep->phy_dev) {
1171 phy_stop(fep->phy_dev);
1035 phy_disconnect(fep->phy_dev); 1172 phy_disconnect(fep->phy_dev);
1173 }
1036 1174
1037 fec_enet_free_buffers(dev); 1175 fec_enet_free_buffers(ndev);
1038 1176
1039 return 0; 1177 return 0;
1040} 1178}
@@ -1052,14 +1190,14 @@ fec_enet_close(struct net_device *dev)
1052#define HASH_BITS 6 /* #bits in hash */ 1190#define HASH_BITS 6 /* #bits in hash */
1053#define CRC32_POLY 0xEDB88320 1191#define CRC32_POLY 0xEDB88320
1054 1192
1055static void set_multicast_list(struct net_device *dev) 1193static void set_multicast_list(struct net_device *ndev)
1056{ 1194{
1057 struct fec_enet_private *fep = netdev_priv(dev); 1195 struct fec_enet_private *fep = netdev_priv(ndev);
1058 struct netdev_hw_addr *ha; 1196 struct netdev_hw_addr *ha;
1059 unsigned int i, bit, data, crc, tmp; 1197 unsigned int i, bit, data, crc, tmp;
1060 unsigned char hash; 1198 unsigned char hash;
1061 1199
1062 if (dev->flags & IFF_PROMISC) { 1200 if (ndev->flags & IFF_PROMISC) {
1063 tmp = readl(fep->hwp + FEC_R_CNTRL); 1201 tmp = readl(fep->hwp + FEC_R_CNTRL);
1064 tmp |= 0x8; 1202 tmp |= 0x8;
1065 writel(tmp, fep->hwp + FEC_R_CNTRL); 1203 writel(tmp, fep->hwp + FEC_R_CNTRL);
@@ -1070,7 +1208,7 @@ static void set_multicast_list(struct net_device *dev)
1070 tmp &= ~0x8; 1208 tmp &= ~0x8;
1071 writel(tmp, fep->hwp + FEC_R_CNTRL); 1209 writel(tmp, fep->hwp + FEC_R_CNTRL);
1072 1210
1073 if (dev->flags & IFF_ALLMULTI) { 1211 if (ndev->flags & IFF_ALLMULTI) {
1074 /* Catch all multicast addresses, so set the 1212 /* Catch all multicast addresses, so set the
1075 * filter to all 1's 1213 * filter to all 1's
1076 */ 1214 */
@@ -1085,7 +1223,7 @@ static void set_multicast_list(struct net_device *dev)
1085 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 1223 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1086 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 1224 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1087 1225
1088 netdev_for_each_mc_addr(ha, dev) { 1226 netdev_for_each_mc_addr(ha, ndev) {
1089 /* Only support group multicast for now */ 1227 /* Only support group multicast for now */
1090 if (!(ha->addr[0] & 1)) 1228 if (!(ha->addr[0] & 1))
1091 continue; 1229 continue;
@@ -1093,7 +1231,7 @@ static void set_multicast_list(struct net_device *dev)
1093 /* calculate crc32 value of mac address */ 1231 /* calculate crc32 value of mac address */
1094 crc = 0xffffffff; 1232 crc = 0xffffffff;
1095 1233
1096 for (i = 0; i < dev->addr_len; i++) { 1234 for (i = 0; i < ndev->addr_len; i++) {
1097 data = ha->addr[i]; 1235 data = ha->addr[i];
1098 for (bit = 0; bit < 8; bit++, data >>= 1) { 1236 for (bit = 0; bit < 8; bit++, data >>= 1) {
1099 crc = (crc >> 1) ^ 1237 crc = (crc >> 1) ^
@@ -1120,20 +1258,20 @@ static void set_multicast_list(struct net_device *dev)
1120 1258
1121/* Set a MAC change in hardware. */ 1259/* Set a MAC change in hardware. */
1122static int 1260static int
1123fec_set_mac_address(struct net_device *dev, void *p) 1261fec_set_mac_address(struct net_device *ndev, void *p)
1124{ 1262{
1125 struct fec_enet_private *fep = netdev_priv(dev); 1263 struct fec_enet_private *fep = netdev_priv(ndev);
1126 struct sockaddr *addr = p; 1264 struct sockaddr *addr = p;
1127 1265
1128 if (!is_valid_ether_addr(addr->sa_data)) 1266 if (!is_valid_ether_addr(addr->sa_data))
1129 return -EADDRNOTAVAIL; 1267 return -EADDRNOTAVAIL;
1130 1268
1131 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1269 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
1132 1270
1133 writel(dev->dev_addr[3] | (dev->dev_addr[2] << 8) | 1271 writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
1134 (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24), 1272 (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
1135 fep->hwp + FEC_ADDR_LOW); 1273 fep->hwp + FEC_ADDR_LOW);
1136 writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24), 1274 writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
1137 fep->hwp + FEC_ADDR_HIGH); 1275 fep->hwp + FEC_ADDR_HIGH);
1138 return 0; 1276 return 0;
1139} 1277}
@@ -1147,16 +1285,16 @@ static const struct net_device_ops fec_netdev_ops = {
1147 .ndo_validate_addr = eth_validate_addr, 1285 .ndo_validate_addr = eth_validate_addr,
1148 .ndo_tx_timeout = fec_timeout, 1286 .ndo_tx_timeout = fec_timeout,
1149 .ndo_set_mac_address = fec_set_mac_address, 1287 .ndo_set_mac_address = fec_set_mac_address,
1150 .ndo_do_ioctl = fec_enet_ioctl, 1288 .ndo_do_ioctl = fec_enet_ioctl,
1151}; 1289};
1152 1290
1153 /* 1291 /*
1154 * XXX: We need to clean up on failure exits here. 1292 * XXX: We need to clean up on failure exits here.
1155 * 1293 *
1156 */ 1294 */
1157static int fec_enet_init(struct net_device *dev) 1295static int fec_enet_init(struct net_device *ndev)
1158{ 1296{
1159 struct fec_enet_private *fep = netdev_priv(dev); 1297 struct fec_enet_private *fep = netdev_priv(ndev);
1160 struct bufdesc *cbd_base; 1298 struct bufdesc *cbd_base;
1161 struct bufdesc *bdp; 1299 struct bufdesc *bdp;
1162 int i; 1300 int i;
@@ -1171,20 +1309,19 @@ static int fec_enet_init(struct net_device *dev)
1171 1309
1172 spin_lock_init(&fep->hw_lock); 1310 spin_lock_init(&fep->hw_lock);
1173 1311
1174 fep->hwp = (void __iomem *)dev->base_addr; 1312 fep->netdev = ndev;
1175 fep->netdev = dev;
1176 1313
1177 /* Get the Ethernet address */ 1314 /* Get the Ethernet address */
1178 fec_get_mac(dev); 1315 fec_get_mac(ndev);
1179 1316
1180 /* Set receive and transmit descriptor base. */ 1317 /* Set receive and transmit descriptor base. */
1181 fep->rx_bd_base = cbd_base; 1318 fep->rx_bd_base = cbd_base;
1182 fep->tx_bd_base = cbd_base + RX_RING_SIZE; 1319 fep->tx_bd_base = cbd_base + RX_RING_SIZE;
1183 1320
1184 /* The FEC Ethernet specific entries in the device structure */ 1321 /* The FEC Ethernet specific entries in the device structure */
1185 dev->watchdog_timeo = TX_TIMEOUT; 1322 ndev->watchdog_timeo = TX_TIMEOUT;
1186 dev->netdev_ops = &fec_netdev_ops; 1323 ndev->netdev_ops = &fec_netdev_ops;
1187 dev->ethtool_ops = &fec_enet_ethtool_ops; 1324 ndev->ethtool_ops = &fec_enet_ethtool_ops;
1188 1325
1189 /* Initialize the receive buffer descriptors. */ 1326 /* Initialize the receive buffer descriptors. */
1190 bdp = fep->rx_bd_base; 1327 bdp = fep->rx_bd_base;
@@ -1213,152 +1350,11 @@ static int fec_enet_init(struct net_device *dev)
1213 bdp--; 1350 bdp--;
1214 bdp->cbd_sc |= BD_SC_WRAP; 1351 bdp->cbd_sc |= BD_SC_WRAP;
1215 1352
1216 fec_restart(dev, 0); 1353 fec_restart(ndev, 0);
1217 1354
1218 return 0; 1355 return 0;
1219} 1356}
1220 1357
1221/* This function is called to start or restart the FEC during a link
1222 * change. This only happens when switching between half and full
1223 * duplex.
1224 */
1225static void
1226fec_restart(struct net_device *dev, int duplex)
1227{
1228 struct fec_enet_private *fep = netdev_priv(dev);
1229 const struct platform_device_id *id_entry =
1230 platform_get_device_id(fep->pdev);
1231 int i;
1232 u32 val, temp_mac[2];
1233
1234 /* Whack a reset. We should wait for this. */
1235 writel(1, fep->hwp + FEC_ECNTRL);
1236 udelay(10);
1237
1238 /*
1239 * enet-mac reset will reset mac address registers too,
1240 * so need to reconfigure it.
1241 */
1242 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
1243 memcpy(&temp_mac, dev->dev_addr, ETH_ALEN);
1244 writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
1245 writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
1246 }
1247
1248 /* Clear any outstanding interrupt. */
1249 writel(0xffc00000, fep->hwp + FEC_IEVENT);
1250
1251 /* Reset all multicast. */
1252 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1253 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1254#ifndef CONFIG_M5272
1255 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
1256 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
1257#endif
1258
1259 /* Set maximum receive buffer size. */
1260 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
1261
1262 /* Set receive and transmit descriptor base. */
1263 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
1264 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
1265 fep->hwp + FEC_X_DES_START);
1266
1267 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
1268 fep->cur_rx = fep->rx_bd_base;
1269
1270 /* Reset SKB transmit buffers. */
1271 fep->skb_cur = fep->skb_dirty = 0;
1272 for (i = 0; i <= TX_RING_MOD_MASK; i++) {
1273 if (fep->tx_skbuff[i]) {
1274 dev_kfree_skb_any(fep->tx_skbuff[i]);
1275 fep->tx_skbuff[i] = NULL;
1276 }
1277 }
1278
1279 /* Enable MII mode */
1280 if (duplex) {
1281 /* MII enable / FD enable */
1282 writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL);
1283 writel(0x04, fep->hwp + FEC_X_CNTRL);
1284 } else {
1285 /* MII enable / No Rcv on Xmit */
1286 writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL);
1287 writel(0x0, fep->hwp + FEC_X_CNTRL);
1288 }
1289 fep->full_duplex = duplex;
1290
1291 /* Set MII speed */
1292 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1293
1294 /*
1295 * The phy interface and speed need to get configured
1296 * differently on enet-mac.
1297 */
1298 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
1299 val = readl(fep->hwp + FEC_R_CNTRL);
1300
1301 /* MII or RMII */
1302 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1303 val |= (1 << 8);
1304 else
1305 val &= ~(1 << 8);
1306
1307 /* 10M or 100M */
1308 if (fep->phy_dev && fep->phy_dev->speed == SPEED_100)
1309 val &= ~(1 << 9);
1310 else
1311 val |= (1 << 9);
1312
1313 writel(val, fep->hwp + FEC_R_CNTRL);
1314 } else {
1315#ifdef FEC_MIIGSK_ENR
1316 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
1317 /* disable the gasket and wait */
1318 writel(0, fep->hwp + FEC_MIIGSK_ENR);
1319 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
1320 udelay(1);
1321
1322 /*
1323 * configure the gasket:
1324 * RMII, 50 MHz, no loopback, no echo
1325 */
1326 writel(1, fep->hwp + FEC_MIIGSK_CFGR);
1327
1328 /* re-enable the gasket */
1329 writel(2, fep->hwp + FEC_MIIGSK_ENR);
1330 }
1331#endif
1332 }
1333
1334 /* And last, enable the transmit and receive processing */
1335 writel(2, fep->hwp + FEC_ECNTRL);
1336 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
1337
1338 /* Enable interrupts we wish to service */
1339 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1340}
1341
1342static void
1343fec_stop(struct net_device *dev)
1344{
1345 struct fec_enet_private *fep = netdev_priv(dev);
1346
1347 /* We cannot expect a graceful transmit stop without link !!! */
1348 if (fep->link) {
1349 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
1350 udelay(10);
1351 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1352 printk("fec_stop : Graceful transmit stop did not complete !\n");
1353 }
1354
1355 /* Whack a reset. We should wait for this. */
1356 writel(1, fep->hwp + FEC_ECNTRL);
1357 udelay(10);
1358 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1359 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1360}
1361
1362static int __devinit 1358static int __devinit
1363fec_probe(struct platform_device *pdev) 1359fec_probe(struct platform_device *pdev)
1364{ 1360{
@@ -1378,19 +1374,20 @@ fec_probe(struct platform_device *pdev)
1378 1374
1379 /* Init network device */ 1375 /* Init network device */
1380 ndev = alloc_etherdev(sizeof(struct fec_enet_private)); 1376 ndev = alloc_etherdev(sizeof(struct fec_enet_private));
1381 if (!ndev) 1377 if (!ndev) {
1382 return -ENOMEM; 1378 ret = -ENOMEM;
1379 goto failed_alloc_etherdev;
1380 }
1383 1381
1384 SET_NETDEV_DEV(ndev, &pdev->dev); 1382 SET_NETDEV_DEV(ndev, &pdev->dev);
1385 1383
1386 /* setup board info structure */ 1384 /* setup board info structure */
1387 fep = netdev_priv(ndev); 1385 fep = netdev_priv(ndev);
1388 memset(fep, 0, sizeof(*fep));
1389 1386
1390 ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r)); 1387 fep->hwp = ioremap(r->start, resource_size(r));
1391 fep->pdev = pdev; 1388 fep->pdev = pdev;
1392 1389
1393 if (!ndev->base_addr) { 1390 if (!fep->hwp) {
1394 ret = -ENOMEM; 1391 ret = -ENOMEM;
1395 goto failed_ioremap; 1392 goto failed_ioremap;
1396 } 1393 }
@@ -1408,10 +1405,9 @@ fec_probe(struct platform_device *pdev)
1408 break; 1405 break;
1409 ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev); 1406 ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
1410 if (ret) { 1407 if (ret) {
1411 while (i >= 0) { 1408 while (--i >= 0) {
1412 irq = platform_get_irq(pdev, i); 1409 irq = platform_get_irq(pdev, i);
1413 free_irq(irq, ndev); 1410 free_irq(irq, ndev);
1414 i--;
1415 } 1411 }
1416 goto failed_irq; 1412 goto failed_irq;
1417 } 1413 }
@@ -1454,9 +1450,11 @@ failed_clk:
1454 free_irq(irq, ndev); 1450 free_irq(irq, ndev);
1455 } 1451 }
1456failed_irq: 1452failed_irq:
1457 iounmap((void __iomem *)ndev->base_addr); 1453 iounmap(fep->hwp);
1458failed_ioremap: 1454failed_ioremap:
1459 free_netdev(ndev); 1455 free_netdev(ndev);
1456failed_alloc_etherdev:
1457 release_mem_region(r->start, resource_size(r));
1460 1458
1461 return ret; 1459 return ret;
1462} 1460}
@@ -1466,16 +1464,22 @@ fec_drv_remove(struct platform_device *pdev)
1466{ 1464{
1467 struct net_device *ndev = platform_get_drvdata(pdev); 1465 struct net_device *ndev = platform_get_drvdata(pdev);
1468 struct fec_enet_private *fep = netdev_priv(ndev); 1466 struct fec_enet_private *fep = netdev_priv(ndev);
1469 1467 struct resource *r;
1470 platform_set_drvdata(pdev, NULL);
1471 1468
1472 fec_stop(ndev); 1469 fec_stop(ndev);
1473 fec_enet_mii_remove(fep); 1470 fec_enet_mii_remove(fep);
1474 clk_disable(fep->clk); 1471 clk_disable(fep->clk);
1475 clk_put(fep->clk); 1472 clk_put(fep->clk);
1476 iounmap((void __iomem *)ndev->base_addr); 1473 iounmap(fep->hwp);
1477 unregister_netdev(ndev); 1474 unregister_netdev(ndev);
1478 free_netdev(ndev); 1475 free_netdev(ndev);
1476
1477 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1478 BUG_ON(!r);
1479 release_mem_region(r->start, resource_size(r));
1480
1481 platform_set_drvdata(pdev, NULL);
1482
1479 return 0; 1483 return 0;
1480} 1484}
1481 1485
@@ -1484,16 +1488,14 @@ static int
1484fec_suspend(struct device *dev) 1488fec_suspend(struct device *dev)
1485{ 1489{
1486 struct net_device *ndev = dev_get_drvdata(dev); 1490 struct net_device *ndev = dev_get_drvdata(dev);
1487 struct fec_enet_private *fep; 1491 struct fec_enet_private *fep = netdev_priv(ndev);
1488 1492
1489 if (ndev) { 1493 if (netif_running(ndev)) {
1490 fep = netdev_priv(ndev); 1494 fec_stop(ndev);
1491 if (netif_running(ndev)) { 1495 netif_device_detach(ndev);
1492 fec_stop(ndev);
1493 netif_device_detach(ndev);
1494 }
1495 clk_disable(fep->clk);
1496 } 1496 }
1497 clk_disable(fep->clk);
1498
1497 return 0; 1499 return 0;
1498} 1500}
1499 1501
@@ -1501,16 +1503,14 @@ static int
1501fec_resume(struct device *dev) 1503fec_resume(struct device *dev)
1502{ 1504{
1503 struct net_device *ndev = dev_get_drvdata(dev); 1505 struct net_device *ndev = dev_get_drvdata(dev);
1504 struct fec_enet_private *fep; 1506 struct fec_enet_private *fep = netdev_priv(ndev);
1505 1507
1506 if (ndev) { 1508 clk_enable(fep->clk);
1507 fep = netdev_priv(ndev); 1509 if (netif_running(ndev)) {
1508 clk_enable(fep->clk); 1510 fec_restart(ndev, fep->full_duplex);
1509 if (netif_running(ndev)) { 1511 netif_device_attach(ndev);
1510 fec_restart(ndev, fep->full_duplex);
1511 netif_device_attach(ndev);
1512 }
1513 } 1512 }
1513
1514 return 0; 1514 return 0;
1515} 1515}
1516 1516
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 9c0b1bac6af6..7b92897ca66b 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -5744,7 +5744,7 @@ static void __devexit nv_remove(struct pci_dev *pci_dev)
5744 pci_set_drvdata(pci_dev, NULL); 5744 pci_set_drvdata(pci_dev, NULL);
5745} 5745}
5746 5746
5747#ifdef CONFIG_PM 5747#ifdef CONFIG_PM_SLEEP
5748static int nv_suspend(struct device *device) 5748static int nv_suspend(struct device *device)
5749{ 5749{
5750 struct pci_dev *pdev = to_pci_dev(device); 5750 struct pci_dev *pdev = to_pci_dev(device);
@@ -5795,6 +5795,11 @@ static int nv_resume(struct device *device)
5795static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume); 5795static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume);
5796#define NV_PM_OPS (&nv_pm_ops) 5796#define NV_PM_OPS (&nv_pm_ops)
5797 5797
5798#else
5799#define NV_PM_OPS NULL
5800#endif /* CONFIG_PM_SLEEP */
5801
5802#ifdef CONFIG_PM
5798static void nv_shutdown(struct pci_dev *pdev) 5803static void nv_shutdown(struct pci_dev *pdev)
5799{ 5804{
5800 struct net_device *dev = pci_get_drvdata(pdev); 5805 struct net_device *dev = pci_get_drvdata(pdev);
@@ -5822,7 +5827,6 @@ static void nv_shutdown(struct pci_dev *pdev)
5822 } 5827 }
5823} 5828}
5824#else 5829#else
5825#define NV_PM_OPS NULL
5826#define nv_shutdown NULL 5830#define nv_shutdown NULL
5827#endif /* CONFIG_PM */ 5831#endif /* CONFIG_PM */
5828 5832
diff --git a/drivers/net/ftmac100.c b/drivers/net/ftmac100.c
new file mode 100644
index 000000000000..1d6f4b8d393a
--- /dev/null
+++ b/drivers/net/ftmac100.c
@@ -0,0 +1,1198 @@
1/*
2 * Faraday FTMAC100 10/100 Ethernet
3 *
4 * (C) Copyright 2009-2011 Faraday Technology
5 * Po-Yu Chuang <ratbert@faraday-tech.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
24#include <linux/dma-mapping.h>
25#include <linux/etherdevice.h>
26#include <linux/ethtool.h>
27#include <linux/init.h>
28#include <linux/io.h>
29#include <linux/mii.h>
30#include <linux/module.h>
31#include <linux/netdevice.h>
32#include <linux/platform_device.h>
33
34#include "ftmac100.h"
35
36#define DRV_NAME "ftmac100"
37#define DRV_VERSION "0.2"
38
39#define RX_QUEUE_ENTRIES 128 /* must be power of 2 */
40#define TX_QUEUE_ENTRIES 16 /* must be power of 2 */
41
42#define MAX_PKT_SIZE 1518
43#define RX_BUF_SIZE 2044 /* must be smaller than 0x7ff */
44
45#if MAX_PKT_SIZE > 0x7ff
46#error invalid MAX_PKT_SIZE
47#endif
48
49#if RX_BUF_SIZE > 0x7ff || RX_BUF_SIZE > PAGE_SIZE
50#error invalid RX_BUF_SIZE
51#endif
52
53/******************************************************************************
54 * private data
55 *****************************************************************************/
56struct ftmac100_descs {
57 struct ftmac100_rxdes rxdes[RX_QUEUE_ENTRIES];
58 struct ftmac100_txdes txdes[TX_QUEUE_ENTRIES];
59};
60
61struct ftmac100 {
62 struct resource *res;
63 void __iomem *base;
64 int irq;
65
66 struct ftmac100_descs *descs;
67 dma_addr_t descs_dma_addr;
68
69 unsigned int rx_pointer;
70 unsigned int tx_clean_pointer;
71 unsigned int tx_pointer;
72 unsigned int tx_pending;
73
74 spinlock_t tx_lock;
75
76 struct net_device *netdev;
77 struct device *dev;
78 struct napi_struct napi;
79
80 struct mii_if_info mii;
81};
82
83static int ftmac100_alloc_rx_page(struct ftmac100 *priv,
84 struct ftmac100_rxdes *rxdes, gfp_t gfp);
85
86/******************************************************************************
87 * internal functions (hardware register access)
88 *****************************************************************************/
89#define INT_MASK_ALL_ENABLED (FTMAC100_INT_RPKT_FINISH | \
90 FTMAC100_INT_NORXBUF | \
91 FTMAC100_INT_XPKT_OK | \
92 FTMAC100_INT_XPKT_LOST | \
93 FTMAC100_INT_RPKT_LOST | \
94 FTMAC100_INT_AHB_ERR | \
95 FTMAC100_INT_PHYSTS_CHG)
96
97#define INT_MASK_ALL_DISABLED 0
98
99static void ftmac100_enable_all_int(struct ftmac100 *priv)
100{
101 iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTMAC100_OFFSET_IMR);
102}
103
104static void ftmac100_disable_all_int(struct ftmac100 *priv)
105{
106 iowrite32(INT_MASK_ALL_DISABLED, priv->base + FTMAC100_OFFSET_IMR);
107}
108
109static void ftmac100_set_rx_ring_base(struct ftmac100 *priv, dma_addr_t addr)
110{
111 iowrite32(addr, priv->base + FTMAC100_OFFSET_RXR_BADR);
112}
113
114static void ftmac100_set_tx_ring_base(struct ftmac100 *priv, dma_addr_t addr)
115{
116 iowrite32(addr, priv->base + FTMAC100_OFFSET_TXR_BADR);
117}
118
119static void ftmac100_txdma_start_polling(struct ftmac100 *priv)
120{
121 iowrite32(1, priv->base + FTMAC100_OFFSET_TXPD);
122}
123
124static int ftmac100_reset(struct ftmac100 *priv)
125{
126 struct net_device *netdev = priv->netdev;
127 int i;
128
129 /* NOTE: reset clears all registers */
130 iowrite32(FTMAC100_MACCR_SW_RST, priv->base + FTMAC100_OFFSET_MACCR);
131
132 for (i = 0; i < 5; i++) {
133 unsigned int maccr;
134
135 maccr = ioread32(priv->base + FTMAC100_OFFSET_MACCR);
136 if (!(maccr & FTMAC100_MACCR_SW_RST)) {
137 /*
138 * FTMAC100_MACCR_SW_RST cleared does not indicate
139 * that hardware reset completed (what the f*ck).
140 * We still need to wait for a while.
141 */
142 usleep_range(500, 1000);
143 return 0;
144 }
145
146 usleep_range(1000, 10000);
147 }
148
149 netdev_err(netdev, "software reset failed\n");
150 return -EIO;
151}
152
153static void ftmac100_set_mac(struct ftmac100 *priv, const unsigned char *mac)
154{
155 unsigned int maddr = mac[0] << 8 | mac[1];
156 unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
157
158 iowrite32(maddr, priv->base + FTMAC100_OFFSET_MAC_MADR);
159 iowrite32(laddr, priv->base + FTMAC100_OFFSET_MAC_LADR);
160}
161
162#define MACCR_ENABLE_ALL (FTMAC100_MACCR_XMT_EN | \
163 FTMAC100_MACCR_RCV_EN | \
164 FTMAC100_MACCR_XDMA_EN | \
165 FTMAC100_MACCR_RDMA_EN | \
166 FTMAC100_MACCR_CRC_APD | \
167 FTMAC100_MACCR_FULLDUP | \
168 FTMAC100_MACCR_RX_RUNT | \
169 FTMAC100_MACCR_RX_BROADPKT)
170
171static int ftmac100_start_hw(struct ftmac100 *priv)
172{
173 struct net_device *netdev = priv->netdev;
174
175 if (ftmac100_reset(priv))
176 return -EIO;
177
178 /* setup ring buffer base registers */
179 ftmac100_set_rx_ring_base(priv,
180 priv->descs_dma_addr +
181 offsetof(struct ftmac100_descs, rxdes));
182 ftmac100_set_tx_ring_base(priv,
183 priv->descs_dma_addr +
184 offsetof(struct ftmac100_descs, txdes));
185
186 iowrite32(FTMAC100_APTC_RXPOLL_CNT(1), priv->base + FTMAC100_OFFSET_APTC);
187
188 ftmac100_set_mac(priv, netdev->dev_addr);
189
190 iowrite32(MACCR_ENABLE_ALL, priv->base + FTMAC100_OFFSET_MACCR);
191 return 0;
192}
193
194static void ftmac100_stop_hw(struct ftmac100 *priv)
195{
196 iowrite32(0, priv->base + FTMAC100_OFFSET_MACCR);
197}
198
199/******************************************************************************
200 * internal functions (receive descriptor)
201 *****************************************************************************/
202static bool ftmac100_rxdes_first_segment(struct ftmac100_rxdes *rxdes)
203{
204 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_FRS);
205}
206
207static bool ftmac100_rxdes_last_segment(struct ftmac100_rxdes *rxdes)
208{
209 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_LRS);
210}
211
212static bool ftmac100_rxdes_owned_by_dma(struct ftmac100_rxdes *rxdes)
213{
214 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RXDMA_OWN);
215}
216
217static void ftmac100_rxdes_set_dma_own(struct ftmac100_rxdes *rxdes)
218{
219 /* clear status bits */
220 rxdes->rxdes0 = cpu_to_le32(FTMAC100_RXDES0_RXDMA_OWN);
221}
222
223static bool ftmac100_rxdes_rx_error(struct ftmac100_rxdes *rxdes)
224{
225 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RX_ERR);
226}
227
228static bool ftmac100_rxdes_crc_error(struct ftmac100_rxdes *rxdes)
229{
230 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_CRC_ERR);
231}
232
233static bool ftmac100_rxdes_frame_too_long(struct ftmac100_rxdes *rxdes)
234{
235 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_FTL);
236}
237
238static bool ftmac100_rxdes_runt(struct ftmac100_rxdes *rxdes)
239{
240 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RUNT);
241}
242
243static bool ftmac100_rxdes_odd_nibble(struct ftmac100_rxdes *rxdes)
244{
245 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RX_ODD_NB);
246}
247
248static unsigned int ftmac100_rxdes_frame_length(struct ftmac100_rxdes *rxdes)
249{
250 return le32_to_cpu(rxdes->rxdes0) & FTMAC100_RXDES0_RFL;
251}
252
253static bool ftmac100_rxdes_multicast(struct ftmac100_rxdes *rxdes)
254{
255 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_MULTICAST);
256}
257
258static void ftmac100_rxdes_set_buffer_size(struct ftmac100_rxdes *rxdes,
259 unsigned int size)
260{
261 rxdes->rxdes1 &= cpu_to_le32(FTMAC100_RXDES1_EDORR);
262 rxdes->rxdes1 |= cpu_to_le32(FTMAC100_RXDES1_RXBUF_SIZE(size));
263}
264
265static void ftmac100_rxdes_set_end_of_ring(struct ftmac100_rxdes *rxdes)
266{
267 rxdes->rxdes1 |= cpu_to_le32(FTMAC100_RXDES1_EDORR);
268}
269
270static void ftmac100_rxdes_set_dma_addr(struct ftmac100_rxdes *rxdes,
271 dma_addr_t addr)
272{
273 rxdes->rxdes2 = cpu_to_le32(addr);
274}
275
276static dma_addr_t ftmac100_rxdes_get_dma_addr(struct ftmac100_rxdes *rxdes)
277{
278 return le32_to_cpu(rxdes->rxdes2);
279}
280
281/*
282 * rxdes3 is not used by hardware. We use it to keep track of page.
283 * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
284 */
285static void ftmac100_rxdes_set_page(struct ftmac100_rxdes *rxdes, struct page *page)
286{
287 rxdes->rxdes3 = (unsigned int)page;
288}
289
290static struct page *ftmac100_rxdes_get_page(struct ftmac100_rxdes *rxdes)
291{
292 return (struct page *)rxdes->rxdes3;
293}
294
295/******************************************************************************
296 * internal functions (receive)
297 *****************************************************************************/
298static int ftmac100_next_rx_pointer(int pointer)
299{
300 return (pointer + 1) & (RX_QUEUE_ENTRIES - 1);
301}
302
303static void ftmac100_rx_pointer_advance(struct ftmac100 *priv)
304{
305 priv->rx_pointer = ftmac100_next_rx_pointer(priv->rx_pointer);
306}
307
308static struct ftmac100_rxdes *ftmac100_current_rxdes(struct ftmac100 *priv)
309{
310 return &priv->descs->rxdes[priv->rx_pointer];
311}
312
313static struct ftmac100_rxdes *
314ftmac100_rx_locate_first_segment(struct ftmac100 *priv)
315{
316 struct ftmac100_rxdes *rxdes = ftmac100_current_rxdes(priv);
317
318 while (!ftmac100_rxdes_owned_by_dma(rxdes)) {
319 if (ftmac100_rxdes_first_segment(rxdes))
320 return rxdes;
321
322 ftmac100_rxdes_set_dma_own(rxdes);
323 ftmac100_rx_pointer_advance(priv);
324 rxdes = ftmac100_current_rxdes(priv);
325 }
326
327 return NULL;
328}
329
330static bool ftmac100_rx_packet_error(struct ftmac100 *priv,
331 struct ftmac100_rxdes *rxdes)
332{
333 struct net_device *netdev = priv->netdev;
334 bool error = false;
335
336 if (unlikely(ftmac100_rxdes_rx_error(rxdes))) {
337 if (net_ratelimit())
338 netdev_info(netdev, "rx err\n");
339
340 netdev->stats.rx_errors++;
341 error = true;
342 }
343
344 if (unlikely(ftmac100_rxdes_crc_error(rxdes))) {
345 if (net_ratelimit())
346 netdev_info(netdev, "rx crc err\n");
347
348 netdev->stats.rx_crc_errors++;
349 error = true;
350 }
351
352 if (unlikely(ftmac100_rxdes_frame_too_long(rxdes))) {
353 if (net_ratelimit())
354 netdev_info(netdev, "rx frame too long\n");
355
356 netdev->stats.rx_length_errors++;
357 error = true;
358 } else if (unlikely(ftmac100_rxdes_runt(rxdes))) {
359 if (net_ratelimit())
360 netdev_info(netdev, "rx runt\n");
361
362 netdev->stats.rx_length_errors++;
363 error = true;
364 } else if (unlikely(ftmac100_rxdes_odd_nibble(rxdes))) {
365 if (net_ratelimit())
366 netdev_info(netdev, "rx odd nibble\n");
367
368 netdev->stats.rx_length_errors++;
369 error = true;
370 }
371
372 return error;
373}
374
375static void ftmac100_rx_drop_packet(struct ftmac100 *priv)
376{
377 struct net_device *netdev = priv->netdev;
378 struct ftmac100_rxdes *rxdes = ftmac100_current_rxdes(priv);
379 bool done = false;
380
381 if (net_ratelimit())
382 netdev_dbg(netdev, "drop packet %p\n", rxdes);
383
384 do {
385 if (ftmac100_rxdes_last_segment(rxdes))
386 done = true;
387
388 ftmac100_rxdes_set_dma_own(rxdes);
389 ftmac100_rx_pointer_advance(priv);
390 rxdes = ftmac100_current_rxdes(priv);
391 } while (!done && !ftmac100_rxdes_owned_by_dma(rxdes));
392
393 netdev->stats.rx_dropped++;
394}
395
396static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed)
397{
398 struct net_device *netdev = priv->netdev;
399 struct ftmac100_rxdes *rxdes;
400 struct sk_buff *skb;
401 struct page *page;
402 dma_addr_t map;
403 int length;
404
405 rxdes = ftmac100_rx_locate_first_segment(priv);
406 if (!rxdes)
407 return false;
408
409 if (unlikely(ftmac100_rx_packet_error(priv, rxdes))) {
410 ftmac100_rx_drop_packet(priv);
411 return true;
412 }
413
414 /*
415 * It is impossible to get multi-segment packets
416 * because we always provide big enough receive buffers.
417 */
418 if (unlikely(!ftmac100_rxdes_last_segment(rxdes)))
419 BUG();
420
421 /* start processing */
422 skb = netdev_alloc_skb_ip_align(netdev, 128);
423 if (unlikely(!skb)) {
424 if (net_ratelimit())
425 netdev_err(netdev, "rx skb alloc failed\n");
426
427 ftmac100_rx_drop_packet(priv);
428 return true;
429 }
430
431 if (unlikely(ftmac100_rxdes_multicast(rxdes)))
432 netdev->stats.multicast++;
433
434 map = ftmac100_rxdes_get_dma_addr(rxdes);
435 dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
436
437 length = ftmac100_rxdes_frame_length(rxdes);
438 page = ftmac100_rxdes_get_page(rxdes);
439 skb_fill_page_desc(skb, 0, page, 0, length);
440 skb->len += length;
441 skb->data_len += length;
442 skb->truesize += length;
443 __pskb_pull_tail(skb, min(length, 64));
444
445 ftmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC);
446
447 ftmac100_rx_pointer_advance(priv);
448
449 skb->protocol = eth_type_trans(skb, netdev);
450
451 netdev->stats.rx_packets++;
452 netdev->stats.rx_bytes += skb->len;
453
454 /* push packet to protocol stack */
455 netif_receive_skb(skb);
456
457 (*processed)++;
458 return true;
459}
460
461/******************************************************************************
462 * internal functions (transmit descriptor)
463 *****************************************************************************/
464static void ftmac100_txdes_reset(struct ftmac100_txdes *txdes)
465{
466 /* clear all except end of ring bit */
467 txdes->txdes0 = 0;
468 txdes->txdes1 &= cpu_to_le32(FTMAC100_TXDES1_EDOTR);
469 txdes->txdes2 = 0;
470 txdes->txdes3 = 0;
471}
472
473static bool ftmac100_txdes_owned_by_dma(struct ftmac100_txdes *txdes)
474{
475 return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXDMA_OWN);
476}
477
478static void ftmac100_txdes_set_dma_own(struct ftmac100_txdes *txdes)
479{
480 /*
481 * Make sure dma own bit will not be set before any other
482 * descriptor fields.
483 */
484 wmb();
485 txdes->txdes0 |= cpu_to_le32(FTMAC100_TXDES0_TXDMA_OWN);
486}
487
488static bool ftmac100_txdes_excessive_collision(struct ftmac100_txdes *txdes)
489{
490 return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXPKT_EXSCOL);
491}
492
493static bool ftmac100_txdes_late_collision(struct ftmac100_txdes *txdes)
494{
495 return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXPKT_LATECOL);
496}
497
498static void ftmac100_txdes_set_end_of_ring(struct ftmac100_txdes *txdes)
499{
500 txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_EDOTR);
501}
502
503static void ftmac100_txdes_set_first_segment(struct ftmac100_txdes *txdes)
504{
505 txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_FTS);
506}
507
508static void ftmac100_txdes_set_last_segment(struct ftmac100_txdes *txdes)
509{
510 txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_LTS);
511}
512
513static void ftmac100_txdes_set_txint(struct ftmac100_txdes *txdes)
514{
515 txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_TXIC);
516}
517
518static void ftmac100_txdes_set_buffer_size(struct ftmac100_txdes *txdes,
519 unsigned int len)
520{
521 txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_TXBUF_SIZE(len));
522}
523
524static void ftmac100_txdes_set_dma_addr(struct ftmac100_txdes *txdes,
525 dma_addr_t addr)
526{
527 txdes->txdes2 = cpu_to_le32(addr);
528}
529
530static dma_addr_t ftmac100_txdes_get_dma_addr(struct ftmac100_txdes *txdes)
531{
532 return le32_to_cpu(txdes->txdes2);
533}
534
535/*
536 * txdes3 is not used by hardware. We use it to keep track of socket buffer.
537 * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
538 */
539static void ftmac100_txdes_set_skb(struct ftmac100_txdes *txdes, struct sk_buff *skb)
540{
541 txdes->txdes3 = (unsigned int)skb;
542}
543
544static struct sk_buff *ftmac100_txdes_get_skb(struct ftmac100_txdes *txdes)
545{
546 return (struct sk_buff *)txdes->txdes3;
547}
548
549/******************************************************************************
550 * internal functions (transmit)
551 *****************************************************************************/
552static int ftmac100_next_tx_pointer(int pointer)
553{
554 return (pointer + 1) & (TX_QUEUE_ENTRIES - 1);
555}
556
557static void ftmac100_tx_pointer_advance(struct ftmac100 *priv)
558{
559 priv->tx_pointer = ftmac100_next_tx_pointer(priv->tx_pointer);
560}
561
562static void ftmac100_tx_clean_pointer_advance(struct ftmac100 *priv)
563{
564 priv->tx_clean_pointer = ftmac100_next_tx_pointer(priv->tx_clean_pointer);
565}
566
567static struct ftmac100_txdes *ftmac100_current_txdes(struct ftmac100 *priv)
568{
569 return &priv->descs->txdes[priv->tx_pointer];
570}
571
572static struct ftmac100_txdes *ftmac100_current_clean_txdes(struct ftmac100 *priv)
573{
574 return &priv->descs->txdes[priv->tx_clean_pointer];
575}
576
577static bool ftmac100_tx_complete_packet(struct ftmac100 *priv)
578{
579 struct net_device *netdev = priv->netdev;
580 struct ftmac100_txdes *txdes;
581 struct sk_buff *skb;
582 dma_addr_t map;
583
584 if (priv->tx_pending == 0)
585 return false;
586
587 txdes = ftmac100_current_clean_txdes(priv);
588
589 if (ftmac100_txdes_owned_by_dma(txdes))
590 return false;
591
592 skb = ftmac100_txdes_get_skb(txdes);
593 map = ftmac100_txdes_get_dma_addr(txdes);
594
595 if (unlikely(ftmac100_txdes_excessive_collision(txdes) ||
596 ftmac100_txdes_late_collision(txdes))) {
597 /*
598 * packet transmitted to ethernet lost due to late collision
599 * or excessive collision
600 */
601 netdev->stats.tx_aborted_errors++;
602 } else {
603 netdev->stats.tx_packets++;
604 netdev->stats.tx_bytes += skb->len;
605 }
606
607 dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
608 dev_kfree_skb(skb);
609
610 ftmac100_txdes_reset(txdes);
611
612 ftmac100_tx_clean_pointer_advance(priv);
613
614 spin_lock(&priv->tx_lock);
615 priv->tx_pending--;
616 spin_unlock(&priv->tx_lock);
617 netif_wake_queue(netdev);
618
619 return true;
620}
621
622static void ftmac100_tx_complete(struct ftmac100 *priv)
623{
624 while (ftmac100_tx_complete_packet(priv))
625 ;
626}
627
628static int ftmac100_xmit(struct ftmac100 *priv, struct sk_buff *skb,
629 dma_addr_t map)
630{
631 struct net_device *netdev = priv->netdev;
632 struct ftmac100_txdes *txdes;
633 unsigned int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
634
635 txdes = ftmac100_current_txdes(priv);
636 ftmac100_tx_pointer_advance(priv);
637
638 /* setup TX descriptor */
639 ftmac100_txdes_set_skb(txdes, skb);
640 ftmac100_txdes_set_dma_addr(txdes, map);
641
642 ftmac100_txdes_set_first_segment(txdes);
643 ftmac100_txdes_set_last_segment(txdes);
644 ftmac100_txdes_set_txint(txdes);
645 ftmac100_txdes_set_buffer_size(txdes, len);
646
647 spin_lock(&priv->tx_lock);
648 priv->tx_pending++;
649 if (priv->tx_pending == TX_QUEUE_ENTRIES)
650 netif_stop_queue(netdev);
651
652 /* start transmit */
653 ftmac100_txdes_set_dma_own(txdes);
654 spin_unlock(&priv->tx_lock);
655
656 ftmac100_txdma_start_polling(priv);
657 return NETDEV_TX_OK;
658}
659
660/******************************************************************************
661 * internal functions (buffer)
662 *****************************************************************************/
663static int ftmac100_alloc_rx_page(struct ftmac100 *priv,
664 struct ftmac100_rxdes *rxdes, gfp_t gfp)
665{
666 struct net_device *netdev = priv->netdev;
667 struct page *page;
668 dma_addr_t map;
669
670 page = alloc_page(gfp);
671 if (!page) {
672 if (net_ratelimit())
673 netdev_err(netdev, "failed to allocate rx page\n");
674 return -ENOMEM;
675 }
676
677 map = dma_map_page(priv->dev, page, 0, RX_BUF_SIZE, DMA_FROM_DEVICE);
678 if (unlikely(dma_mapping_error(priv->dev, map))) {
679 if (net_ratelimit())
680 netdev_err(netdev, "failed to map rx page\n");
681 __free_page(page);
682 return -ENOMEM;
683 }
684
685 ftmac100_rxdes_set_page(rxdes, page);
686 ftmac100_rxdes_set_dma_addr(rxdes, map);
687 ftmac100_rxdes_set_buffer_size(rxdes, RX_BUF_SIZE);
688 ftmac100_rxdes_set_dma_own(rxdes);
689 return 0;
690}
691
692static void ftmac100_free_buffers(struct ftmac100 *priv)
693{
694 int i;
695
696 for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
697 struct ftmac100_rxdes *rxdes = &priv->descs->rxdes[i];
698 struct page *page = ftmac100_rxdes_get_page(rxdes);
699 dma_addr_t map = ftmac100_rxdes_get_dma_addr(rxdes);
700
701 if (!page)
702 continue;
703
704 dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
705 __free_page(page);
706 }
707
708 for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
709 struct ftmac100_txdes *txdes = &priv->descs->txdes[i];
710 struct sk_buff *skb = ftmac100_txdes_get_skb(txdes);
711 dma_addr_t map = ftmac100_txdes_get_dma_addr(txdes);
712
713 if (!skb)
714 continue;
715
716 dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
717 dev_kfree_skb(skb);
718 }
719
720 dma_free_coherent(priv->dev, sizeof(struct ftmac100_descs),
721 priv->descs, priv->descs_dma_addr);
722}
723
724static int ftmac100_alloc_buffers(struct ftmac100 *priv)
725{
726 int i;
727
728 priv->descs = dma_alloc_coherent(priv->dev, sizeof(struct ftmac100_descs),
729 &priv->descs_dma_addr, GFP_KERNEL);
730 if (!priv->descs)
731 return -ENOMEM;
732
733 memset(priv->descs, 0, sizeof(struct ftmac100_descs));
734
735 /* initialize RX ring */
736 ftmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
737
738 for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
739 struct ftmac100_rxdes *rxdes = &priv->descs->rxdes[i];
740
741 if (ftmac100_alloc_rx_page(priv, rxdes, GFP_KERNEL))
742 goto err;
743 }
744
745 /* initialize TX ring */
746 ftmac100_txdes_set_end_of_ring(&priv->descs->txdes[TX_QUEUE_ENTRIES - 1]);
747 return 0;
748
749err:
750 ftmac100_free_buffers(priv);
751 return -ENOMEM;
752}
753
754/******************************************************************************
755 * struct mii_if_info functions
756 *****************************************************************************/
757static int ftmac100_mdio_read(struct net_device *netdev, int phy_id, int reg)
758{
759 struct ftmac100 *priv = netdev_priv(netdev);
760 unsigned int phycr;
761 int i;
762
763 phycr = FTMAC100_PHYCR_PHYAD(phy_id) |
764 FTMAC100_PHYCR_REGAD(reg) |
765 FTMAC100_PHYCR_MIIRD;
766
767 iowrite32(phycr, priv->base + FTMAC100_OFFSET_PHYCR);
768
769 for (i = 0; i < 10; i++) {
770 phycr = ioread32(priv->base + FTMAC100_OFFSET_PHYCR);
771
772 if ((phycr & FTMAC100_PHYCR_MIIRD) == 0)
773 return phycr & FTMAC100_PHYCR_MIIRDATA;
774
775 usleep_range(100, 1000);
776 }
777
778 netdev_err(netdev, "mdio read timed out\n");
779 return 0;
780}
781
782static void ftmac100_mdio_write(struct net_device *netdev, int phy_id, int reg,
783 int data)
784{
785 struct ftmac100 *priv = netdev_priv(netdev);
786 unsigned int phycr;
787 int i;
788
789 phycr = FTMAC100_PHYCR_PHYAD(phy_id) |
790 FTMAC100_PHYCR_REGAD(reg) |
791 FTMAC100_PHYCR_MIIWR;
792
793 data = FTMAC100_PHYWDATA_MIIWDATA(data);
794
795 iowrite32(data, priv->base + FTMAC100_OFFSET_PHYWDATA);
796 iowrite32(phycr, priv->base + FTMAC100_OFFSET_PHYCR);
797
798 for (i = 0; i < 10; i++) {
799 phycr = ioread32(priv->base + FTMAC100_OFFSET_PHYCR);
800
801 if ((phycr & FTMAC100_PHYCR_MIIWR) == 0)
802 return;
803
804 usleep_range(100, 1000);
805 }
806
807 netdev_err(netdev, "mdio write timed out\n");
808}
809
810/******************************************************************************
811 * struct ethtool_ops functions
812 *****************************************************************************/
813static void ftmac100_get_drvinfo(struct net_device *netdev,
814 struct ethtool_drvinfo *info)
815{
816 strcpy(info->driver, DRV_NAME);
817 strcpy(info->version, DRV_VERSION);
818 strcpy(info->bus_info, dev_name(&netdev->dev));
819}
820
821static int ftmac100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
822{
823 struct ftmac100 *priv = netdev_priv(netdev);
824 return mii_ethtool_gset(&priv->mii, cmd);
825}
826
827static int ftmac100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
828{
829 struct ftmac100 *priv = netdev_priv(netdev);
830 return mii_ethtool_sset(&priv->mii, cmd);
831}
832
833static int ftmac100_nway_reset(struct net_device *netdev)
834{
835 struct ftmac100 *priv = netdev_priv(netdev);
836 return mii_nway_restart(&priv->mii);
837}
838
839static u32 ftmac100_get_link(struct net_device *netdev)
840{
841 struct ftmac100 *priv = netdev_priv(netdev);
842 return mii_link_ok(&priv->mii);
843}
844
845static const struct ethtool_ops ftmac100_ethtool_ops = {
846 .set_settings = ftmac100_set_settings,
847 .get_settings = ftmac100_get_settings,
848 .get_drvinfo = ftmac100_get_drvinfo,
849 .nway_reset = ftmac100_nway_reset,
850 .get_link = ftmac100_get_link,
851};
852
853/******************************************************************************
854 * interrupt handler
855 *****************************************************************************/
856static irqreturn_t ftmac100_interrupt(int irq, void *dev_id)
857{
858 struct net_device *netdev = dev_id;
859 struct ftmac100 *priv = netdev_priv(netdev);
860
861 if (likely(netif_running(netdev))) {
862 /* Disable interrupts for polling */
863 ftmac100_disable_all_int(priv);
864 napi_schedule(&priv->napi);
865 }
866
867 return IRQ_HANDLED;
868}
869
870/******************************************************************************
871 * struct napi_struct functions
872 *****************************************************************************/
873static int ftmac100_poll(struct napi_struct *napi, int budget)
874{
875 struct ftmac100 *priv = container_of(napi, struct ftmac100, napi);
876 struct net_device *netdev = priv->netdev;
877 unsigned int status;
878 bool completed = true;
879 int rx = 0;
880
881 status = ioread32(priv->base + FTMAC100_OFFSET_ISR);
882
883 if (status & (FTMAC100_INT_RPKT_FINISH | FTMAC100_INT_NORXBUF)) {
884 /*
885 * FTMAC100_INT_RPKT_FINISH:
886 * RX DMA has received packets into RX buffer successfully
887 *
888 * FTMAC100_INT_NORXBUF:
889 * RX buffer unavailable
890 */
891 bool retry;
892
893 do {
894 retry = ftmac100_rx_packet(priv, &rx);
895 } while (retry && rx < budget);
896
897 if (retry && rx == budget)
898 completed = false;
899 }
900
901 if (status & (FTMAC100_INT_XPKT_OK | FTMAC100_INT_XPKT_LOST)) {
902 /*
903 * FTMAC100_INT_XPKT_OK:
904 * packet transmitted to ethernet successfully
905 *
906 * FTMAC100_INT_XPKT_LOST:
907 * packet transmitted to ethernet lost due to late
908 * collision or excessive collision
909 */
910 ftmac100_tx_complete(priv);
911 }
912
913 if (status & (FTMAC100_INT_NORXBUF | FTMAC100_INT_RPKT_LOST |
914 FTMAC100_INT_AHB_ERR | FTMAC100_INT_PHYSTS_CHG)) {
915 if (net_ratelimit())
916 netdev_info(netdev, "[ISR] = 0x%x: %s%s%s%s\n", status,
917 status & FTMAC100_INT_NORXBUF ? "NORXBUF " : "",
918 status & FTMAC100_INT_RPKT_LOST ? "RPKT_LOST " : "",
919 status & FTMAC100_INT_AHB_ERR ? "AHB_ERR " : "",
920 status & FTMAC100_INT_PHYSTS_CHG ? "PHYSTS_CHG" : "");
921
922 if (status & FTMAC100_INT_NORXBUF) {
923 /* RX buffer unavailable */
924 netdev->stats.rx_over_errors++;
925 }
926
927 if (status & FTMAC100_INT_RPKT_LOST) {
928 /* received packet lost due to RX FIFO full */
929 netdev->stats.rx_fifo_errors++;
930 }
931
932 if (status & FTMAC100_INT_PHYSTS_CHG) {
933 /* PHY link status change */
934 mii_check_link(&priv->mii);
935 }
936 }
937
938 if (completed) {
939 /* stop polling */
940 napi_complete(napi);
941 ftmac100_enable_all_int(priv);
942 }
943
944 return rx;
945}
946
947/******************************************************************************
948 * struct net_device_ops functions
949 *****************************************************************************/
950static int ftmac100_open(struct net_device *netdev)
951{
952 struct ftmac100 *priv = netdev_priv(netdev);
953 int err;
954
955 err = ftmac100_alloc_buffers(priv);
956 if (err) {
957 netdev_err(netdev, "failed to allocate buffers\n");
958 goto err_alloc;
959 }
960
961 err = request_irq(priv->irq, ftmac100_interrupt, 0, netdev->name, netdev);
962 if (err) {
963 netdev_err(netdev, "failed to request irq %d\n", priv->irq);
964 goto err_irq;
965 }
966
967 priv->rx_pointer = 0;
968 priv->tx_clean_pointer = 0;
969 priv->tx_pointer = 0;
970 priv->tx_pending = 0;
971
972 err = ftmac100_start_hw(priv);
973 if (err)
974 goto err_hw;
975
976 napi_enable(&priv->napi);
977 netif_start_queue(netdev);
978
979 ftmac100_enable_all_int(priv);
980
981 return 0;
982
983err_hw:
984 free_irq(priv->irq, netdev);
985err_irq:
986 ftmac100_free_buffers(priv);
987err_alloc:
988 return err;
989}
990
991static int ftmac100_stop(struct net_device *netdev)
992{
993 struct ftmac100 *priv = netdev_priv(netdev);
994
995 ftmac100_disable_all_int(priv);
996 netif_stop_queue(netdev);
997 napi_disable(&priv->napi);
998 ftmac100_stop_hw(priv);
999 free_irq(priv->irq, netdev);
1000 ftmac100_free_buffers(priv);
1001
1002 return 0;
1003}
1004
1005static int ftmac100_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1006{
1007 struct ftmac100 *priv = netdev_priv(netdev);
1008 dma_addr_t map;
1009
1010 if (unlikely(skb->len > MAX_PKT_SIZE)) {
1011 if (net_ratelimit())
1012 netdev_dbg(netdev, "tx packet too big\n");
1013
1014 netdev->stats.tx_dropped++;
1015 dev_kfree_skb(skb);
1016 return NETDEV_TX_OK;
1017 }
1018
1019 map = dma_map_single(priv->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
1020 if (unlikely(dma_mapping_error(priv->dev, map))) {
1021 /* drop packet */
1022 if (net_ratelimit())
1023 netdev_err(netdev, "map socket buffer failed\n");
1024
1025 netdev->stats.tx_dropped++;
1026 dev_kfree_skb(skb);
1027 return NETDEV_TX_OK;
1028 }
1029
1030 return ftmac100_xmit(priv, skb, map);
1031}
1032
1033/* optional */
1034static int ftmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1035{
1036 struct ftmac100 *priv = netdev_priv(netdev);
1037 struct mii_ioctl_data *data = if_mii(ifr);
1038
1039 return generic_mii_ioctl(&priv->mii, data, cmd, NULL);
1040}
1041
1042static const struct net_device_ops ftmac100_netdev_ops = {
1043 .ndo_open = ftmac100_open,
1044 .ndo_stop = ftmac100_stop,
1045 .ndo_start_xmit = ftmac100_hard_start_xmit,
1046 .ndo_set_mac_address = eth_mac_addr,
1047 .ndo_validate_addr = eth_validate_addr,
1048 .ndo_do_ioctl = ftmac100_do_ioctl,
1049};
1050
1051/******************************************************************************
1052 * struct platform_driver functions
1053 *****************************************************************************/
1054static int ftmac100_probe(struct platform_device *pdev)
1055{
1056 struct resource *res;
1057 int irq;
1058 struct net_device *netdev;
1059 struct ftmac100 *priv;
1060 int err;
1061
1062 if (!pdev)
1063 return -ENODEV;
1064
1065 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1066 if (!res)
1067 return -ENXIO;
1068
1069 irq = platform_get_irq(pdev, 0);
1070 if (irq < 0)
1071 return irq;
1072
1073 /* setup net_device */
1074 netdev = alloc_etherdev(sizeof(*priv));
1075 if (!netdev) {
1076 err = -ENOMEM;
1077 goto err_alloc_etherdev;
1078 }
1079
1080 SET_NETDEV_DEV(netdev, &pdev->dev);
1081 SET_ETHTOOL_OPS(netdev, &ftmac100_ethtool_ops);
1082 netdev->netdev_ops = &ftmac100_netdev_ops;
1083
1084 platform_set_drvdata(pdev, netdev);
1085
1086 /* setup private data */
1087 priv = netdev_priv(netdev);
1088 priv->netdev = netdev;
1089 priv->dev = &pdev->dev;
1090
1091 spin_lock_init(&priv->tx_lock);
1092
1093 /* initialize NAPI */
1094 netif_napi_add(netdev, &priv->napi, ftmac100_poll, 64);
1095
1096 /* map io memory */
1097 priv->res = request_mem_region(res->start, resource_size(res),
1098 dev_name(&pdev->dev));
1099 if (!priv->res) {
1100 dev_err(&pdev->dev, "Could not reserve memory region\n");
1101 err = -ENOMEM;
1102 goto err_req_mem;
1103 }
1104
1105 priv->base = ioremap(res->start, res->end - res->start);
1106 if (!priv->base) {
1107 dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
1108 err = -EIO;
1109 goto err_ioremap;
1110 }
1111
1112 priv->irq = irq;
1113
1114 /* initialize struct mii_if_info */
1115 priv->mii.phy_id = 0;
1116 priv->mii.phy_id_mask = 0x1f;
1117 priv->mii.reg_num_mask = 0x1f;
1118 priv->mii.dev = netdev;
1119 priv->mii.mdio_read = ftmac100_mdio_read;
1120 priv->mii.mdio_write = ftmac100_mdio_write;
1121
1122 /* register network device */
1123 err = register_netdev(netdev);
1124 if (err) {
1125 dev_err(&pdev->dev, "Failed to register netdev\n");
1126 goto err_register_netdev;
1127 }
1128
1129 netdev_info(netdev, "irq %d, mapped at %p\n", priv->irq, priv->base);
1130
1131 if (!is_valid_ether_addr(netdev->dev_addr)) {
1132 random_ether_addr(netdev->dev_addr);
1133 netdev_info(netdev, "generated random MAC address %pM\n",
1134 netdev->dev_addr);
1135 }
1136
1137 return 0;
1138
1139err_register_netdev:
1140 iounmap(priv->base);
1141err_ioremap:
1142 release_resource(priv->res);
1143err_req_mem:
1144 netif_napi_del(&priv->napi);
1145 platform_set_drvdata(pdev, NULL);
1146 free_netdev(netdev);
1147err_alloc_etherdev:
1148 return err;
1149}
1150
1151static int __exit ftmac100_remove(struct platform_device *pdev)
1152{
1153 struct net_device *netdev;
1154 struct ftmac100 *priv;
1155
1156 netdev = platform_get_drvdata(pdev);
1157 priv = netdev_priv(netdev);
1158
1159 unregister_netdev(netdev);
1160
1161 iounmap(priv->base);
1162 release_resource(priv->res);
1163
1164 netif_napi_del(&priv->napi);
1165 platform_set_drvdata(pdev, NULL);
1166 free_netdev(netdev);
1167 return 0;
1168}
1169
1170static struct platform_driver ftmac100_driver = {
1171 .probe = ftmac100_probe,
1172 .remove = __exit_p(ftmac100_remove),
1173 .driver = {
1174 .name = DRV_NAME,
1175 .owner = THIS_MODULE,
1176 },
1177};
1178
1179/******************************************************************************
1180 * initialization / finalization
1181 *****************************************************************************/
1182static int __init ftmac100_init(void)
1183{
1184 pr_info("Loading version " DRV_VERSION " ...\n");
1185 return platform_driver_register(&ftmac100_driver);
1186}
1187
1188static void __exit ftmac100_exit(void)
1189{
1190 platform_driver_unregister(&ftmac100_driver);
1191}
1192
1193module_init(ftmac100_init);
1194module_exit(ftmac100_exit);
1195
1196MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>");
1197MODULE_DESCRIPTION("FTMAC100 driver");
1198MODULE_LICENSE("GPL");
diff --git a/drivers/net/ftmac100.h b/drivers/net/ftmac100.h
new file mode 100644
index 000000000000..46a0c47b1ee1
--- /dev/null
+++ b/drivers/net/ftmac100.h
@@ -0,0 +1,180 @@
1/*
2 * Faraday FTMAC100 10/100 Ethernet
3 *
4 * (C) Copyright 2009-2011 Faraday Technology
5 * Po-Yu Chuang <ratbert@faraday-tech.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#ifndef __FTMAC100_H
23#define __FTMAC100_H
24
25#define FTMAC100_OFFSET_ISR 0x00
26#define FTMAC100_OFFSET_IMR 0x04
27#define FTMAC100_OFFSET_MAC_MADR 0x08
28#define FTMAC100_OFFSET_MAC_LADR 0x0c
29#define FTMAC100_OFFSET_MAHT0 0x10
30#define FTMAC100_OFFSET_MAHT1 0x14
31#define FTMAC100_OFFSET_TXPD 0x18
32#define FTMAC100_OFFSET_RXPD 0x1c
33#define FTMAC100_OFFSET_TXR_BADR 0x20
34#define FTMAC100_OFFSET_RXR_BADR 0x24
35#define FTMAC100_OFFSET_ITC 0x28
36#define FTMAC100_OFFSET_APTC 0x2c
37#define FTMAC100_OFFSET_DBLAC 0x30
38#define FTMAC100_OFFSET_MACCR 0x88
39#define FTMAC100_OFFSET_MACSR 0x8c
40#define FTMAC100_OFFSET_PHYCR 0x90
41#define FTMAC100_OFFSET_PHYWDATA 0x94
42#define FTMAC100_OFFSET_FCR 0x98
43#define FTMAC100_OFFSET_BPR 0x9c
44#define FTMAC100_OFFSET_TS 0xc4
45#define FTMAC100_OFFSET_DMAFIFOS 0xc8
46#define FTMAC100_OFFSET_TM 0xcc
47#define FTMAC100_OFFSET_TX_MCOL_SCOL 0xd4
48#define FTMAC100_OFFSET_RPF_AEP 0xd8
49#define FTMAC100_OFFSET_XM_PG 0xdc
50#define FTMAC100_OFFSET_RUNT_TLCC 0xe0
51#define FTMAC100_OFFSET_CRCER_FTL 0xe4
52#define FTMAC100_OFFSET_RLC_RCC 0xe8
53#define FTMAC100_OFFSET_BROC 0xec
54#define FTMAC100_OFFSET_MULCA 0xf0
55#define FTMAC100_OFFSET_RP 0xf4
56#define FTMAC100_OFFSET_XP 0xf8
57
58/*
59 * Interrupt status register & interrupt mask register
60 */
61#define FTMAC100_INT_RPKT_FINISH (1 << 0)
62#define FTMAC100_INT_NORXBUF (1 << 1)
63#define FTMAC100_INT_XPKT_FINISH (1 << 2)
64#define FTMAC100_INT_NOTXBUF (1 << 3)
65#define FTMAC100_INT_XPKT_OK (1 << 4)
66#define FTMAC100_INT_XPKT_LOST (1 << 5)
67#define FTMAC100_INT_RPKT_SAV (1 << 6)
68#define FTMAC100_INT_RPKT_LOST (1 << 7)
69#define FTMAC100_INT_AHB_ERR (1 << 8)
70#define FTMAC100_INT_PHYSTS_CHG (1 << 9)
71
72/*
73 * Interrupt timer control register
74 */
75#define FTMAC100_ITC_RXINT_CNT(x) (((x) & 0xf) << 0)
76#define FTMAC100_ITC_RXINT_THR(x) (((x) & 0x7) << 4)
77#define FTMAC100_ITC_RXINT_TIME_SEL (1 << 7)
78#define FTMAC100_ITC_TXINT_CNT(x) (((x) & 0xf) << 8)
79#define FTMAC100_ITC_TXINT_THR(x) (((x) & 0x7) << 12)
80#define FTMAC100_ITC_TXINT_TIME_SEL (1 << 15)
81
82/*
83 * Automatic polling timer control register
84 */
85#define FTMAC100_APTC_RXPOLL_CNT(x) (((x) & 0xf) << 0)
86#define FTMAC100_APTC_RXPOLL_TIME_SEL (1 << 4)
87#define FTMAC100_APTC_TXPOLL_CNT(x) (((x) & 0xf) << 8)
88#define FTMAC100_APTC_TXPOLL_TIME_SEL (1 << 12)
89
90/*
91 * DMA burst length and arbitration control register
92 */
93#define FTMAC100_DBLAC_INCR4_EN (1 << 0)
94#define FTMAC100_DBLAC_INCR8_EN (1 << 1)
95#define FTMAC100_DBLAC_INCR16_EN (1 << 2)
96#define FTMAC100_DBLAC_RXFIFO_LTHR(x) (((x) & 0x7) << 3)
97#define FTMAC100_DBLAC_RXFIFO_HTHR(x) (((x) & 0x7) << 6)
98#define FTMAC100_DBLAC_RX_THR_EN (1 << 9)
99
100/*
101 * MAC control register
102 */
103#define FTMAC100_MACCR_XDMA_EN (1 << 0)
104#define FTMAC100_MACCR_RDMA_EN (1 << 1)
105#define FTMAC100_MACCR_SW_RST (1 << 2)
106#define FTMAC100_MACCR_LOOP_EN (1 << 3)
107#define FTMAC100_MACCR_CRC_DIS (1 << 4)
108#define FTMAC100_MACCR_XMT_EN (1 << 5)
109#define FTMAC100_MACCR_ENRX_IN_HALFTX (1 << 6)
110#define FTMAC100_MACCR_RCV_EN (1 << 8)
111#define FTMAC100_MACCR_HT_MULTI_EN (1 << 9)
112#define FTMAC100_MACCR_RX_RUNT (1 << 10)
113#define FTMAC100_MACCR_RX_FTL (1 << 11)
114#define FTMAC100_MACCR_RCV_ALL (1 << 12)
115#define FTMAC100_MACCR_CRC_APD (1 << 14)
116#define FTMAC100_MACCR_FULLDUP (1 << 15)
117#define FTMAC100_MACCR_RX_MULTIPKT (1 << 16)
118#define FTMAC100_MACCR_RX_BROADPKT (1 << 17)
119
120/*
121 * PHY control register
122 */
123#define FTMAC100_PHYCR_MIIRDATA 0xffff
124#define FTMAC100_PHYCR_PHYAD(x) (((x) & 0x1f) << 16)
125#define FTMAC100_PHYCR_REGAD(x) (((x) & 0x1f) << 21)
126#define FTMAC100_PHYCR_MIIRD (1 << 26)
127#define FTMAC100_PHYCR_MIIWR (1 << 27)
128
129/*
130 * PHY write data register
131 */
132#define FTMAC100_PHYWDATA_MIIWDATA(x) ((x) & 0xffff)
133
134/*
135 * Transmit descriptor, aligned to 16 bytes
136 */
137struct ftmac100_txdes {
138 unsigned int txdes0;
139 unsigned int txdes1;
140 unsigned int txdes2; /* TXBUF_BADR */
141 unsigned int txdes3; /* not used by HW */
142} __attribute__ ((aligned(16)));
143
144#define FTMAC100_TXDES0_TXPKT_LATECOL (1 << 0)
145#define FTMAC100_TXDES0_TXPKT_EXSCOL (1 << 1)
146#define FTMAC100_TXDES0_TXDMA_OWN (1 << 31)
147
148#define FTMAC100_TXDES1_TXBUF_SIZE(x) ((x) & 0x7ff)
149#define FTMAC100_TXDES1_LTS (1 << 27)
150#define FTMAC100_TXDES1_FTS (1 << 28)
151#define FTMAC100_TXDES1_TX2FIC (1 << 29)
152#define FTMAC100_TXDES1_TXIC (1 << 30)
153#define FTMAC100_TXDES1_EDOTR (1 << 31)
154
155/*
156 * Receive descriptor, aligned to 16 bytes
157 */
158struct ftmac100_rxdes {
159 unsigned int rxdes0;
160 unsigned int rxdes1;
161 unsigned int rxdes2; /* RXBUF_BADR */
162 unsigned int rxdes3; /* not used by HW */
163} __attribute__ ((aligned(16)));
164
165#define FTMAC100_RXDES0_RFL 0x7ff
166#define FTMAC100_RXDES0_MULTICAST (1 << 16)
167#define FTMAC100_RXDES0_BROADCAST (1 << 17)
168#define FTMAC100_RXDES0_RX_ERR (1 << 18)
169#define FTMAC100_RXDES0_CRC_ERR (1 << 19)
170#define FTMAC100_RXDES0_FTL (1 << 20)
171#define FTMAC100_RXDES0_RUNT (1 << 21)
172#define FTMAC100_RXDES0_RX_ODD_NB (1 << 22)
173#define FTMAC100_RXDES0_LRS (1 << 28)
174#define FTMAC100_RXDES0_FRS (1 << 29)
175#define FTMAC100_RXDES0_RXDMA_OWN (1 << 31)
176
177#define FTMAC100_RXDES1_RXBUF_SIZE(x) ((x) & 0x7ff)
178#define FTMAC100_RXDES1_EDORR (1 << 31)
179
180#endif /* __FTMAC100_H */
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index ac1d323c5eb5..8931168d3e74 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -400,13 +400,14 @@ static void *bpq_seq_start(struct seq_file *seq, loff_t *pos)
400static void *bpq_seq_next(struct seq_file *seq, void *v, loff_t *pos) 400static void *bpq_seq_next(struct seq_file *seq, void *v, loff_t *pos)
401{ 401{
402 struct list_head *p; 402 struct list_head *p;
403 struct bpqdev *bpqdev = v;
403 404
404 ++*pos; 405 ++*pos;
405 406
406 if (v == SEQ_START_TOKEN) 407 if (v == SEQ_START_TOKEN)
407 p = rcu_dereference(bpq_devices.next); 408 p = rcu_dereference(list_next_rcu(&bpq_devices));
408 else 409 else
409 p = rcu_dereference(((struct bpqdev *)v)->bpq_list.next); 410 p = rcu_dereference(list_next_rcu(&bpqdev->bpq_list));
410 411
411 return (p == &bpq_devices) ? NULL 412 return (p == &bpq_devices) ? NULL
412 : list_entry(p, struct bpqdev, bpq_list); 413 : list_entry(p, struct bpqdev, bpq_list);
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 0a2368fa6bc6..6b256c275e10 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -64,7 +64,14 @@ static s32 igb_reset_init_script_82575(struct e1000_hw *);
64static s32 igb_read_mac_addr_82575(struct e1000_hw *); 64static s32 igb_read_mac_addr_82575(struct e1000_hw *);
65static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); 65static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw);
66static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw); 66static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw);
67 67static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
68static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw);
69static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw,
70 u16 offset);
71static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
72 u16 offset);
73static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
74static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
68static const u16 e1000_82580_rxpbs_table[] = 75static const u16 e1000_82580_rxpbs_table[] =
69 { 36, 72, 144, 1, 2, 4, 8, 16, 76 { 36, 72, 144, 1, 2, 4, 8, 16,
70 35, 70, 140 }; 77 35, 70, 140 };
@@ -129,6 +136,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
129 break; 136 break;
130 case E1000_DEV_ID_82580_COPPER: 137 case E1000_DEV_ID_82580_COPPER:
131 case E1000_DEV_ID_82580_FIBER: 138 case E1000_DEV_ID_82580_FIBER:
139 case E1000_DEV_ID_82580_QUAD_FIBER:
132 case E1000_DEV_ID_82580_SERDES: 140 case E1000_DEV_ID_82580_SERDES:
133 case E1000_DEV_ID_82580_SGMII: 141 case E1000_DEV_ID_82580_SGMII:
134 case E1000_DEV_ID_82580_COPPER_DUAL: 142 case E1000_DEV_ID_82580_COPPER_DUAL:
@@ -194,7 +202,11 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
194 mac->arc_subsystem_valid = 202 mac->arc_subsystem_valid =
195 (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK) 203 (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
196 ? true : false; 204 ? true : false;
197 205 /* enable EEE on i350 parts */
206 if (mac->type == e1000_i350)
207 dev_spec->eee_disable = false;
208 else
209 dev_spec->eee_disable = true;
198 /* physical interface link setup */ 210 /* physical interface link setup */
199 mac->ops.setup_physical_interface = 211 mac->ops.setup_physical_interface =
200 (hw->phy.media_type == e1000_media_type_copper) 212 (hw->phy.media_type == e1000_media_type_copper)
@@ -232,14 +244,42 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
232 */ 244 */
233 size += NVM_WORD_SIZE_BASE_SHIFT; 245 size += NVM_WORD_SIZE_BASE_SHIFT;
234 246
235 /* EEPROM access above 16k is unsupported */
236 if (size > 14)
237 size = 14;
238 nvm->word_size = 1 << size; 247 nvm->word_size = 1 << size;
248 if (nvm->word_size == (1 << 15))
249 nvm->page_size = 128;
239 250
240 /* if 82576 then initialize mailbox parameters */ 251 /* NVM Function Pointers */
241 if (mac->type == e1000_82576) 252 nvm->ops.acquire = igb_acquire_nvm_82575;
253 if (nvm->word_size < (1 << 15))
254 nvm->ops.read = igb_read_nvm_eerd;
255 else
256 nvm->ops.read = igb_read_nvm_spi;
257
258 nvm->ops.release = igb_release_nvm_82575;
259 switch (hw->mac.type) {
260 case e1000_82580:
261 nvm->ops.validate = igb_validate_nvm_checksum_82580;
262 nvm->ops.update = igb_update_nvm_checksum_82580;
263 break;
264 case e1000_i350:
265 nvm->ops.validate = igb_validate_nvm_checksum_i350;
266 nvm->ops.update = igb_update_nvm_checksum_i350;
267 break;
268 default:
269 nvm->ops.validate = igb_validate_nvm_checksum;
270 nvm->ops.update = igb_update_nvm_checksum;
271 }
272 nvm->ops.write = igb_write_nvm_spi;
273
274 /* if part supports SR-IOV then initialize mailbox parameters */
275 switch (mac->type) {
276 case e1000_82576:
277 case e1000_i350:
242 igb_init_mbx_params_pf(hw); 278 igb_init_mbx_params_pf(hw);
279 break;
280 default:
281 break;
282 }
243 283
244 /* setup PHY parameters */ 284 /* setup PHY parameters */
245 if (phy->media_type != e1000_media_type_copper) { 285 if (phy->media_type != e1000_media_type_copper) {
@@ -1747,6 +1787,248 @@ u16 igb_rxpbs_adjust_82580(u32 data)
1747 return ret_val; 1787 return ret_val;
1748} 1788}
1749 1789
1790/**
1791 * igb_validate_nvm_checksum_with_offset - Validate EEPROM
1792 * checksum
1793 * @hw: pointer to the HW structure
1794 * @offset: offset in words of the checksum protected region
1795 *
1796 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
1797 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
1798 **/
1799s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
1800{
1801 s32 ret_val = 0;
1802 u16 checksum = 0;
1803 u16 i, nvm_data;
1804
1805 for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
1806 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
1807 if (ret_val) {
1808 hw_dbg("NVM Read Error\n");
1809 goto out;
1810 }
1811 checksum += nvm_data;
1812 }
1813
1814 if (checksum != (u16) NVM_SUM) {
1815 hw_dbg("NVM Checksum Invalid\n");
1816 ret_val = -E1000_ERR_NVM;
1817 goto out;
1818 }
1819
1820out:
1821 return ret_val;
1822}
1823
1824/**
1825 * igb_update_nvm_checksum_with_offset - Update EEPROM
1826 * checksum
1827 * @hw: pointer to the HW structure
1828 * @offset: offset in words of the checksum protected region
1829 *
1830 * Updates the EEPROM checksum by reading/adding each word of the EEPROM
1831 * up to the checksum. Then calculates the EEPROM checksum and writes the
1832 * value to the EEPROM.
1833 **/
1834s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
1835{
1836 s32 ret_val;
1837 u16 checksum = 0;
1838 u16 i, nvm_data;
1839
1840 for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
1841 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
1842 if (ret_val) {
1843 hw_dbg("NVM Read Error while updating checksum.\n");
1844 goto out;
1845 }
1846 checksum += nvm_data;
1847 }
1848 checksum = (u16) NVM_SUM - checksum;
1849 ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
1850 &checksum);
1851 if (ret_val)
1852 hw_dbg("NVM Write Error while updating checksum.\n");
1853
1854out:
1855 return ret_val;
1856}
1857
1858/**
1859 * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum
1860 * @hw: pointer to the HW structure
1861 *
1862 * Calculates the EEPROM section checksum by reading/adding each word of
1863 * the EEPROM and then verifies that the sum of the EEPROM is
1864 * equal to 0xBABA.
1865 **/
1866static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw)
1867{
1868 s32 ret_val = 0;
1869 u16 eeprom_regions_count = 1;
1870 u16 j, nvm_data;
1871 u16 nvm_offset;
1872
1873 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
1874 if (ret_val) {
1875 hw_dbg("NVM Read Error\n");
1876 goto out;
1877 }
1878
1879 if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
1880 /* if chekcsums compatibility bit is set validate checksums
1881 * for all 4 ports. */
1882 eeprom_regions_count = 4;
1883 }
1884
1885 for (j = 0; j < eeprom_regions_count; j++) {
1886 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
1887 ret_val = igb_validate_nvm_checksum_with_offset(hw,
1888 nvm_offset);
1889 if (ret_val != 0)
1890 goto out;
1891 }
1892
1893out:
1894 return ret_val;
1895}
1896
1897/**
1898 * igb_update_nvm_checksum_82580 - Update EEPROM checksum
1899 * @hw: pointer to the HW structure
1900 *
1901 * Updates the EEPROM section checksums for all 4 ports by reading/adding
1902 * each word of the EEPROM up to the checksum. Then calculates the EEPROM
1903 * checksum and writes the value to the EEPROM.
1904 **/
1905static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
1906{
1907 s32 ret_val;
1908 u16 j, nvm_data;
1909 u16 nvm_offset;
1910
1911 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
1912 if (ret_val) {
1913 hw_dbg("NVM Read Error while updating checksum"
1914 " compatibility bit.\n");
1915 goto out;
1916 }
1917
1918 if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) {
1919 /* set compatibility bit to validate checksums appropriately */
1920 nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
1921 ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
1922 &nvm_data);
1923 if (ret_val) {
1924 hw_dbg("NVM Write Error while updating checksum"
1925 " compatibility bit.\n");
1926 goto out;
1927 }
1928 }
1929
1930 for (j = 0; j < 4; j++) {
1931 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
1932 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
1933 if (ret_val)
1934 goto out;
1935 }
1936
1937out:
1938 return ret_val;
1939}
1940
1941/**
1942 * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum
1943 * @hw: pointer to the HW structure
1944 *
1945 * Calculates the EEPROM section checksum by reading/adding each word of
1946 * the EEPROM and then verifies that the sum of the EEPROM is
1947 * equal to 0xBABA.
1948 **/
1949static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw)
1950{
1951 s32 ret_val = 0;
1952 u16 j;
1953 u16 nvm_offset;
1954
1955 for (j = 0; j < 4; j++) {
1956 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
1957 ret_val = igb_validate_nvm_checksum_with_offset(hw,
1958 nvm_offset);
1959 if (ret_val != 0)
1960 goto out;
1961 }
1962
1963out:
1964 return ret_val;
1965}
1966
1967/**
1968 * igb_update_nvm_checksum_i350 - Update EEPROM checksum
1969 * @hw: pointer to the HW structure
1970 *
1971 * Updates the EEPROM section checksums for all 4 ports by reading/adding
1972 * each word of the EEPROM up to the checksum. Then calculates the EEPROM
1973 * checksum and writes the value to the EEPROM.
1974 **/
1975static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw)
1976{
1977 s32 ret_val = 0;
1978 u16 j;
1979 u16 nvm_offset;
1980
1981 for (j = 0; j < 4; j++) {
1982 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
1983 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
1984 if (ret_val != 0)
1985 goto out;
1986 }
1987
1988out:
1989 return ret_val;
1990}
1991/**
1992 * igb_set_eee_i350 - Enable/disable EEE support
1993 * @hw: pointer to the HW structure
1994 *
1995 * Enable/disable EEE based on setting in dev_spec structure.
1996 *
1997 **/
1998s32 igb_set_eee_i350(struct e1000_hw *hw)
1999{
2000 s32 ret_val = 0;
2001 u32 ipcnfg, eeer, ctrl_ext;
2002
2003 ctrl_ext = rd32(E1000_CTRL_EXT);
2004 if ((hw->mac.type != e1000_i350) ||
2005 (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK))
2006 goto out;
2007 ipcnfg = rd32(E1000_IPCNFG);
2008 eeer = rd32(E1000_EEER);
2009
2010 /* enable or disable per user setting */
2011 if (!(hw->dev_spec._82575.eee_disable)) {
2012 ipcnfg |= (E1000_IPCNFG_EEE_1G_AN |
2013 E1000_IPCNFG_EEE_100M_AN);
2014 eeer |= (E1000_EEER_TX_LPI_EN |
2015 E1000_EEER_RX_LPI_EN |
2016 E1000_EEER_LPI_FC);
2017
2018 } else {
2019 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
2020 E1000_IPCNFG_EEE_100M_AN);
2021 eeer &= ~(E1000_EEER_TX_LPI_EN |
2022 E1000_EEER_RX_LPI_EN |
2023 E1000_EEER_LPI_FC);
2024 }
2025 wr32(E1000_IPCNFG, ipcnfg);
2026 wr32(E1000_EEER, eeer);
2027out:
2028
2029 return ret_val;
2030}
2031
1750static struct e1000_mac_operations e1000_mac_ops_82575 = { 2032static struct e1000_mac_operations e1000_mac_ops_82575 = {
1751 .init_hw = igb_init_hw_82575, 2033 .init_hw = igb_init_hw_82575,
1752 .check_for_link = igb_check_for_link_82575, 2034 .check_for_link = igb_check_for_link_82575,
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index 1d01af2472e7..dd6df3498998 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -251,5 +251,6 @@ void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int);
251void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool); 251void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);
252void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); 252void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);
253u16 igb_rxpbs_adjust_82580(u32 data); 253u16 igb_rxpbs_adjust_82580(u32 data);
254s32 igb_set_eee_i350(struct e1000_hw *);
254 255
255#endif 256#endif
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index 6319ed902bc0..6b80d40110ca 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -51,6 +51,7 @@
51#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 51#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
52#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 52#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
53#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 53#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
54#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
54#define E1000_CTRL_EXT_EIAME 0x01000000 55#define E1000_CTRL_EXT_EIAME 0x01000000
55#define E1000_CTRL_EXT_IRCA 0x00000001 56#define E1000_CTRL_EXT_IRCA 0x00000001
56/* Interrupt delay cancellation */ 57/* Interrupt delay cancellation */
@@ -110,6 +111,7 @@
110/* Management Control */ 111/* Management Control */
111#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ 112#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
112#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ 113#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
114#define E1000_MANC_EN_BMC2OS 0x10000000 /* OSBMC is Enabled or not */
113/* Enable Neighbor Discovery Filtering */ 115/* Enable Neighbor Discovery Filtering */
114#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ 116#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
115#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ 117#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
@@ -286,7 +288,34 @@
286#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ 288#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
287#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ 289#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
288 290
289/* Transmit Arbitration Count */ 291/* DMA Coalescing register fields */
292#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coalescing
293 * Watchdog Timer */
294#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coalescing Receive
295 * Threshold */
296#define E1000_DMACR_DMACTHR_SHIFT 16
297#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe
298 * transactions */
299#define E1000_DMACR_DMAC_LX_SHIFT 28
300#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
301
302#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coalescing Transmit
303 * Threshold */
304
305#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */
306
307#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Receive Traffic Rate
308 * Threshold */
309#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rcv packet rate in
310 * current window */
311
312#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rcv Traffic
313 * Current Cnt */
314
315#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* Flow ctrl Rcv Threshold
316 * High val */
317#define E1000_FCRTC_RTH_COAL_SHIFT 4
318#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */
290 319
291/* SerDes Control */ 320/* SerDes Control */
292#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 321#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
@@ -565,6 +594,8 @@
565#define NVM_INIT_CONTROL3_PORT_A 0x0024 594#define NVM_INIT_CONTROL3_PORT_A 0x0024
566#define NVM_ALT_MAC_ADDR_PTR 0x0037 595#define NVM_ALT_MAC_ADDR_PTR 0x0037
567#define NVM_CHECKSUM_REG 0x003F 596#define NVM_CHECKSUM_REG 0x003F
597#define NVM_COMPATIBILITY_REG_3 0x0003
598#define NVM_COMPATIBILITY_BIT_MASK 0x8000
568 599
569#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ 600#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
570#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ 601#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
@@ -599,6 +630,7 @@
599/* NVM Commands - SPI */ 630/* NVM Commands - SPI */
600#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ 631#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
601#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ 632#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */
633#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */
602#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ 634#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
603#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ 635#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */
604#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ 636#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */
@@ -757,6 +789,17 @@
757#define E1000_MDIC_ERROR 0x40000000 789#define E1000_MDIC_ERROR 0x40000000
758#define E1000_MDIC_DEST 0x80000000 790#define E1000_MDIC_DEST 0x80000000
759 791
792/* Thermal Sensor */
793#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */
794#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Speed Throttle Event */
795
796/* Energy Efficient Ethernet */
797#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* EEE Enable 1G AN */
798#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */
799#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */
800#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */
801#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */
802
760/* SerDes Control */ 803/* SerDes Control */
761#define E1000_GEN_CTL_READY 0x80000000 804#define E1000_GEN_CTL_READY 0x80000000
762#define E1000_GEN_CTL_ADDRESS_SHIFT 8 805#define E1000_GEN_CTL_ADDRESS_SHIFT 8
@@ -770,4 +813,11 @@
770#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based 813#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based
771 on DMA coal */ 814 on DMA coal */
772 815
816/* Tx Rate-Scheduler Config fields */
817#define E1000_RTTBCNRC_RS_ENA 0x80000000
818#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF
819#define E1000_RTTBCNRC_RF_INT_SHIFT 14
820#define E1000_RTTBCNRC_RF_INT_MASK \
821 (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT)
822
773#endif 823#endif
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index e2638afb8cdc..27153e8d7b16 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -54,6 +54,7 @@ struct e1000_hw;
54#define E1000_DEV_ID_82580_SERDES 0x1510 54#define E1000_DEV_ID_82580_SERDES 0x1510
55#define E1000_DEV_ID_82580_SGMII 0x1511 55#define E1000_DEV_ID_82580_SGMII 0x1511
56#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 56#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
57#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527
57#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 58#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438
58#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A 59#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A
59#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C 60#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C
@@ -247,6 +248,10 @@ struct e1000_hw_stats {
247 u64 scvpc; 248 u64 scvpc;
248 u64 hrmpc; 249 u64 hrmpc;
249 u64 doosync; 250 u64 doosync;
251 u64 o2bgptc;
252 u64 o2bspc;
253 u64 b2ospc;
254 u64 b2ogprc;
250}; 255};
251 256
252struct e1000_phy_stats { 257struct e1000_phy_stats {
@@ -331,6 +336,8 @@ struct e1000_nvm_operations {
331 s32 (*read)(struct e1000_hw *, u16, u16, u16 *); 336 s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
332 void (*release)(struct e1000_hw *); 337 void (*release)(struct e1000_hw *);
333 s32 (*write)(struct e1000_hw *, u16, u16, u16 *); 338 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
339 s32 (*update)(struct e1000_hw *);
340 s32 (*validate)(struct e1000_hw *);
334}; 341};
335 342
336struct e1000_info { 343struct e1000_info {
@@ -417,7 +424,6 @@ struct e1000_phy_info {
417 424
418struct e1000_nvm_info { 425struct e1000_nvm_info {
419 struct e1000_nvm_operations ops; 426 struct e1000_nvm_operations ops;
420
421 enum e1000_nvm_type type; 427 enum e1000_nvm_type type;
422 enum e1000_nvm_override override; 428 enum e1000_nvm_override override;
423 429
@@ -483,6 +489,7 @@ struct e1000_mbx_info {
483struct e1000_dev_spec_82575 { 489struct e1000_dev_spec_82575 {
484 bool sgmii_active; 490 bool sgmii_active;
485 bool global_device_reset; 491 bool global_device_reset;
492 bool eee_disable;
486}; 493};
487 494
488struct e1000_hw { 495struct e1000_hw {
diff --git a/drivers/net/igb/e1000_mbx.c b/drivers/net/igb/e1000_mbx.c
index c474cdb70047..78d48c7fa859 100644
--- a/drivers/net/igb/e1000_mbx.c
+++ b/drivers/net/igb/e1000_mbx.c
@@ -422,26 +422,24 @@ s32 igb_init_mbx_params_pf(struct e1000_hw *hw)
422{ 422{
423 struct e1000_mbx_info *mbx = &hw->mbx; 423 struct e1000_mbx_info *mbx = &hw->mbx;
424 424
425 if (hw->mac.type == e1000_82576) { 425 mbx->timeout = 0;
426 mbx->timeout = 0; 426 mbx->usec_delay = 0;
427 mbx->usec_delay = 0; 427
428 428 mbx->size = E1000_VFMAILBOX_SIZE;
429 mbx->size = E1000_VFMAILBOX_SIZE; 429
430 430 mbx->ops.read = igb_read_mbx_pf;
431 mbx->ops.read = igb_read_mbx_pf; 431 mbx->ops.write = igb_write_mbx_pf;
432 mbx->ops.write = igb_write_mbx_pf; 432 mbx->ops.read_posted = igb_read_posted_mbx;
433 mbx->ops.read_posted = igb_read_posted_mbx; 433 mbx->ops.write_posted = igb_write_posted_mbx;
434 mbx->ops.write_posted = igb_write_posted_mbx; 434 mbx->ops.check_for_msg = igb_check_for_msg_pf;
435 mbx->ops.check_for_msg = igb_check_for_msg_pf; 435 mbx->ops.check_for_ack = igb_check_for_ack_pf;
436 mbx->ops.check_for_ack = igb_check_for_ack_pf; 436 mbx->ops.check_for_rst = igb_check_for_rst_pf;
437 mbx->ops.check_for_rst = igb_check_for_rst_pf; 437
438 438 mbx->stats.msgs_tx = 0;
439 mbx->stats.msgs_tx = 0; 439 mbx->stats.msgs_rx = 0;
440 mbx->stats.msgs_rx = 0; 440 mbx->stats.reqs = 0;
441 mbx->stats.reqs = 0; 441 mbx->stats.acks = 0;
442 mbx->stats.acks = 0; 442 mbx->stats.rsts = 0;
443 mbx->stats.rsts = 0;
444 }
445 443
446 return 0; 444 return 0;
447} 445}
diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/igb/e1000_nvm.c
index 6b5cc2cc453d..75bf36a4baee 100644
--- a/drivers/net/igb/e1000_nvm.c
+++ b/drivers/net/igb/e1000_nvm.c
@@ -318,6 +318,68 @@ out:
318} 318}
319 319
320/** 320/**
321 * igb_read_nvm_spi - Read EEPROM's using SPI
322 * @hw: pointer to the HW structure
323 * @offset: offset of word in the EEPROM to read
324 * @words: number of words to read
325 * @data: word read from the EEPROM
326 *
327 * Reads a 16 bit word from the EEPROM.
328 **/
329s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
330{
331 struct e1000_nvm_info *nvm = &hw->nvm;
332 u32 i = 0;
333 s32 ret_val;
334 u16 word_in;
335 u8 read_opcode = NVM_READ_OPCODE_SPI;
336
337 /*
338 * A check for invalid values: offset too large, too many words,
339 * and not enough words.
340 */
341 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
342 (words == 0)) {
343 hw_dbg("nvm parameter(s) out of bounds\n");
344 ret_val = -E1000_ERR_NVM;
345 goto out;
346 }
347
348 ret_val = nvm->ops.acquire(hw);
349 if (ret_val)
350 goto out;
351
352 ret_val = igb_ready_nvm_eeprom(hw);
353 if (ret_val)
354 goto release;
355
356 igb_standby_nvm(hw);
357
358 if ((nvm->address_bits == 8) && (offset >= 128))
359 read_opcode |= NVM_A8_OPCODE_SPI;
360
361 /* Send the READ command (opcode + addr) */
362 igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
363 igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
364
365 /*
366 * Read the data. SPI NVMs increment the address with each byte
367 * read and will roll over if reading beyond the end. This allows
368 * us to read the whole NVM from any offset
369 */
370 for (i = 0; i < words; i++) {
371 word_in = igb_shift_in_eec_bits(hw, 16);
372 data[i] = (word_in >> 8) | (word_in << 8);
373 }
374
375release:
376 nvm->ops.release(hw);
377
378out:
379 return ret_val;
380}
381
382/**
321 * igb_read_nvm_eerd - Reads EEPROM using EERD register 383 * igb_read_nvm_eerd - Reads EEPROM using EERD register
322 * @hw: pointer to the HW structure 384 * @hw: pointer to the HW structure
323 * @offset: offset of word in the EEPROM to read 385 * @offset: offset of word in the EEPROM to read
@@ -353,7 +415,7 @@ s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
353 break; 415 break;
354 416
355 data[i] = (rd32(E1000_EERD) >> 417 data[i] = (rd32(E1000_EERD) >>
356 E1000_NVM_RW_REG_DATA); 418 E1000_NVM_RW_REG_DATA);
357 } 419 }
358 420
359out: 421out:
diff --git a/drivers/net/igb/e1000_nvm.h b/drivers/net/igb/e1000_nvm.h
index 29c956a84bd0..7f43564c4bcc 100644
--- a/drivers/net/igb/e1000_nvm.h
+++ b/drivers/net/igb/e1000_nvm.h
@@ -35,6 +35,7 @@ s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
35s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, 35s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num,
36 u32 part_num_size); 36 u32 part_num_size);
37s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); 37s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
38s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
38s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); 39s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
39s32 igb_validate_nvm_checksum(struct e1000_hw *hw); 40s32 igb_validate_nvm_checksum(struct e1000_hw *hw);
40s32 igb_update_nvm_checksum(struct e1000_hw *hw); 41s32 igb_update_nvm_checksum(struct e1000_hw *hw);
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index 8ac83c5190d5..958ca3bda482 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -106,6 +106,19 @@
106 106
107#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) 107#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
108 108
109/* DMA Coalescing registers */
110#define E1000_DMACR 0x02508 /* Control Register */
111#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */
112#define E1000_DMCTLX 0x02514 /* Time to Lx Request */
113#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */
114#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */
115#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */
116#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
117
118/* TX Rate Limit Registers */
119#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */
120#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */
121
109/* Split and Replication RX Control - RW */ 122/* Split and Replication RX Control - RW */
110#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ 123#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
111/* 124/*
@@ -324,4 +337,18 @@
324 337
325/* DMA Coalescing registers */ 338/* DMA Coalescing registers */
326#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ 339#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
340
341/* Energy Efficient Ethernet "EEE" register */
342#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */
343#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */
344
345/* Thermal Sensor Register */
346#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */
347
348/* OS2BMC Registers */
349#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */
350#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */
351#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */
352#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */
353
327#endif 354#endif
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 92a4ef09e55c..1c687e298d5e 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -77,6 +77,7 @@ struct vf_data_storage {
77 unsigned long last_nack; 77 unsigned long last_nack;
78 u16 pf_vlan; /* When set, guest VLAN config not allowed. */ 78 u16 pf_vlan; /* When set, guest VLAN config not allowed. */
79 u16 pf_qos; 79 u16 pf_qos;
80 u16 tx_rate;
80}; 81};
81 82
82#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ 83#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
@@ -323,6 +324,7 @@ struct igb_adapter {
323 u16 rx_ring_count; 324 u16 rx_ring_count;
324 unsigned int vfs_allocated_count; 325 unsigned int vfs_allocated_count;
325 struct vf_data_storage *vf_data; 326 struct vf_data_storage *vf_data;
327 int vf_rate_link_speed;
326 u32 rss_queues; 328 u32 rss_queues;
327 u32 wvbr; 329 u32 wvbr;
328}; 330};
@@ -331,6 +333,12 @@ struct igb_adapter {
331#define IGB_FLAG_DCA_ENABLED (1 << 1) 333#define IGB_FLAG_DCA_ENABLED (1 << 1)
332#define IGB_FLAG_QUAD_PORT_A (1 << 2) 334#define IGB_FLAG_QUAD_PORT_A (1 << 2)
333#define IGB_FLAG_QUEUE_PAIRS (1 << 3) 335#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
336#define IGB_FLAG_DMAC (1 << 4)
337
338/* DMA Coalescing defines */
339#define IGB_MIN_TXPBSIZE 20408
340#define IGB_TX_BUF_4096 4096
341#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */
334 342
335#define IGB_82576_TSYNC_SHIFT 19 343#define IGB_82576_TSYNC_SHIFT 19
336#define IGB_82580_TSYNC_SHIFT 24 344#define IGB_82580_TSYNC_SHIFT 24
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index a70e16bcfa7e..d976733bbcc2 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -86,6 +86,10 @@ static const struct igb_stats igb_gstrings_stats[] = {
86 IGB_STAT("tx_smbus", stats.mgptc), 86 IGB_STAT("tx_smbus", stats.mgptc),
87 IGB_STAT("rx_smbus", stats.mgprc), 87 IGB_STAT("rx_smbus", stats.mgprc),
88 IGB_STAT("dropped_smbus", stats.mgpdc), 88 IGB_STAT("dropped_smbus", stats.mgpdc),
89 IGB_STAT("os2bmc_rx_by_bmc", stats.o2bgptc),
90 IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
91 IGB_STAT("os2bmc_tx_by_host", stats.o2bspc),
92 IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc),
89}; 93};
90 94
91#define IGB_NETDEV_STAT(_net_stat) { \ 95#define IGB_NETDEV_STAT(_net_stat) { \
@@ -603,7 +607,10 @@ static void igb_get_regs(struct net_device *netdev,
603 regs_buff[548] = rd32(E1000_TDFT); 607 regs_buff[548] = rd32(E1000_TDFT);
604 regs_buff[549] = rd32(E1000_TDFHS); 608 regs_buff[549] = rd32(E1000_TDFHS);
605 regs_buff[550] = rd32(E1000_TDFPC); 609 regs_buff[550] = rd32(E1000_TDFPC);
606 610 regs_buff[551] = adapter->stats.o2bgptc;
611 regs_buff[552] = adapter->stats.b2ospc;
612 regs_buff[553] = adapter->stats.o2bspc;
613 regs_buff[554] = adapter->stats.b2ogprc;
607} 614}
608 615
609static int igb_get_eeprom_len(struct net_device *netdev) 616static int igb_get_eeprom_len(struct net_device *netdev)
@@ -714,7 +721,7 @@ static int igb_set_eeprom(struct net_device *netdev,
714 /* Update the checksum over the first part of the EEPROM if needed 721 /* Update the checksum over the first part of the EEPROM if needed
715 * and flush shadow RAM for 82573 controllers */ 722 * and flush shadow RAM for 82573 controllers */
716 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG))) 723 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
717 igb_update_nvm_checksum(hw); 724 hw->nvm.ops.update(hw);
718 725
719 kfree(eeprom_buff); 726 kfree(eeprom_buff);
720 return ret_val; 727 return ret_val;
@@ -727,8 +734,9 @@ static void igb_get_drvinfo(struct net_device *netdev,
727 char firmware_version[32]; 734 char firmware_version[32];
728 u16 eeprom_data; 735 u16 eeprom_data;
729 736
730 strncpy(drvinfo->driver, igb_driver_name, 32); 737 strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1);
731 strncpy(drvinfo->version, igb_driver_version, 32); 738 strncpy(drvinfo->version, igb_driver_version,
739 sizeof(drvinfo->version) - 1);
732 740
733 /* EEPROM image version # is reported as firmware version # for 741 /* EEPROM image version # is reported as firmware version # for
734 * 82575 controllers */ 742 * 82575 controllers */
@@ -738,8 +746,10 @@ static void igb_get_drvinfo(struct net_device *netdev,
738 (eeprom_data & 0x0FF0) >> 4, 746 (eeprom_data & 0x0FF0) >> 4,
739 eeprom_data & 0x000F); 747 eeprom_data & 0x000F);
740 748
741 strncpy(drvinfo->fw_version, firmware_version, 32); 749 strncpy(drvinfo->fw_version, firmware_version,
742 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); 750 sizeof(drvinfo->fw_version) - 1);
751 strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
752 sizeof(drvinfo->bus_info) - 1);
743 drvinfo->n_stats = IGB_STATS_LEN; 753 drvinfo->n_stats = IGB_STATS_LEN;
744 drvinfo->testinfo_len = IGB_TEST_LEN; 754 drvinfo->testinfo_len = IGB_TEST_LEN;
745 drvinfo->regdump_len = igb_get_regs_len(netdev); 755 drvinfo->regdump_len = igb_get_regs_len(netdev);
@@ -1070,7 +1080,7 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
1070 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 1080 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1071 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { 1081 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
1072 wr32(reg, (_test[pat] & write)); 1082 wr32(reg, (_test[pat] & write));
1073 val = rd32(reg); 1083 val = rd32(reg) & mask;
1074 if (val != (_test[pat] & write & mask)) { 1084 if (val != (_test[pat] & write & mask)) {
1075 dev_err(&adapter->pdev->dev, "pattern test reg %04X " 1085 dev_err(&adapter->pdev->dev, "pattern test reg %04X "
1076 "failed: got 0x%08X expected 0x%08X\n", 1086 "failed: got 0x%08X expected 0x%08X\n",
@@ -1999,6 +2009,12 @@ static int igb_set_coalesce(struct net_device *netdev,
1999 if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) 2009 if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
2000 return -EINVAL; 2010 return -EINVAL;
2001 2011
2012 /* If ITR is disabled, disable DMAC */
2013 if (ec->rx_coalesce_usecs == 0) {
2014 if (adapter->flags & IGB_FLAG_DMAC)
2015 adapter->flags &= ~IGB_FLAG_DMAC;
2016 }
2017
2002 /* convert to rate of irq's per second */ 2018 /* convert to rate of irq's per second */
2003 if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) 2019 if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3)
2004 adapter->rx_itr_setting = ec->rx_coalesce_usecs; 2020 adapter->rx_itr_setting = ec->rx_coalesce_usecs;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 58c665b7513d..3d850af0cdda 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -50,12 +50,17 @@
50#endif 50#endif
51#include "igb.h" 51#include "igb.h"
52 52
53#define DRV_VERSION "2.1.0-k2" 53#define MAJ 3
54#define MIN 0
55#define BUILD 6
56#define KFIX 2
57#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
58__stringify(BUILD) "-k" __stringify(KFIX)
54char igb_driver_name[] = "igb"; 59char igb_driver_name[] = "igb";
55char igb_driver_version[] = DRV_VERSION; 60char igb_driver_version[] = DRV_VERSION;
56static const char igb_driver_string[] = 61static const char igb_driver_string[] =
57 "Intel(R) Gigabit Ethernet Network Driver"; 62 "Intel(R) Gigabit Ethernet Network Driver";
58static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation."; 63static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation.";
59 64
60static const struct e1000_info *igb_info_tbl[] = { 65static const struct e1000_info *igb_info_tbl[] = {
61 [board_82575] = &e1000_82575_info, 66 [board_82575] = &e1000_82575_info,
@@ -68,6 +73,7 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 }, 73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, 74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, 75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, 77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 }, 78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 }, 79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
@@ -100,6 +106,7 @@ static void igb_free_all_rx_resources(struct igb_adapter *);
100static void igb_setup_mrqc(struct igb_adapter *); 106static void igb_setup_mrqc(struct igb_adapter *);
101static int igb_probe(struct pci_dev *, const struct pci_device_id *); 107static int igb_probe(struct pci_dev *, const struct pci_device_id *);
102static void __devexit igb_remove(struct pci_dev *pdev); 108static void __devexit igb_remove(struct pci_dev *pdev);
109static void igb_init_hw_timer(struct igb_adapter *adapter);
103static int igb_sw_init(struct igb_adapter *); 110static int igb_sw_init(struct igb_adapter *);
104static int igb_open(struct net_device *); 111static int igb_open(struct net_device *);
105static int igb_close(struct net_device *); 112static int igb_close(struct net_device *);
@@ -149,6 +156,7 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev,
149static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); 156static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
150static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, 157static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
151 struct ifla_vf_info *ivi); 158 struct ifla_vf_info *ivi);
159static void igb_check_vf_rate_limit(struct igb_adapter *);
152 160
153#ifdef CONFIG_PM 161#ifdef CONFIG_PM
154static int igb_suspend(struct pci_dev *, pm_message_t); 162static int igb_suspend(struct pci_dev *, pm_message_t);
@@ -1672,7 +1680,58 @@ void igb_reset(struct igb_adapter *adapter)
1672 1680
1673 if (hw->mac.ops.init_hw(hw)) 1681 if (hw->mac.ops.init_hw(hw))
1674 dev_err(&pdev->dev, "Hardware Error\n"); 1682 dev_err(&pdev->dev, "Hardware Error\n");
1683 if (hw->mac.type > e1000_82580) {
1684 if (adapter->flags & IGB_FLAG_DMAC) {
1685 u32 reg;
1675 1686
1687 /*
1688 * DMA Coalescing high water mark needs to be higher
1689 * than * the * Rx threshold. The Rx threshold is
1690 * currently * pba - 6, so we * should use a high water
1691 * mark of pba * - 4. */
1692 hwm = (pba - 4) << 10;
1693
1694 reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT)
1695 & E1000_DMACR_DMACTHR_MASK);
1696
1697 /* transition to L0x or L1 if available..*/
1698 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
1699
1700 /* watchdog timer= +-1000 usec in 32usec intervals */
1701 reg |= (1000 >> 5);
1702 wr32(E1000_DMACR, reg);
1703
1704 /* no lower threshold to disable coalescing(smart fifb)
1705 * -UTRESH=0*/
1706 wr32(E1000_DMCRTRH, 0);
1707
1708 /* set hwm to PBA - 2 * max frame size */
1709 wr32(E1000_FCRTC, hwm);
1710
1711 /*
1712 * This sets the time to wait before requesting tran-
1713 * sition to * low power state to number of usecs needed
1714 * to receive 1 512 * byte frame at gigabit line rate
1715 */
1716 reg = rd32(E1000_DMCTLX);
1717 reg |= IGB_DMCTLX_DCFLUSH_DIS;
1718
1719 /* Delay 255 usec before entering Lx state. */
1720 reg |= 0xFF;
1721 wr32(E1000_DMCTLX, reg);
1722
1723 /* free space in Tx packet buffer to wake from DMAC */
1724 wr32(E1000_DMCTXTH,
1725 (IGB_MIN_TXPBSIZE -
1726 (IGB_TX_BUF_4096 + adapter->max_frame_size))
1727 >> 6);
1728
1729 /* make low power state decision controlled by DMAC */
1730 reg = rd32(E1000_PCIEMISC);
1731 reg |= E1000_PCIEMISC_LX_DECISION;
1732 wr32(E1000_PCIEMISC, reg);
1733 } /* end if IGB_FLAG_DMAC set */
1734 }
1676 if (hw->mac.type == e1000_82580) { 1735 if (hw->mac.type == e1000_82580) {
1677 u32 reg = rd32(E1000_PCIEMISC); 1736 u32 reg = rd32(E1000_PCIEMISC);
1678 wr32(E1000_PCIEMISC, 1737 wr32(E1000_PCIEMISC,
@@ -1882,7 +1941,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1882 hw->mac.ops.reset_hw(hw); 1941 hw->mac.ops.reset_hw(hw);
1883 1942
1884 /* make sure the NVM is good */ 1943 /* make sure the NVM is good */
1885 if (igb_validate_nvm_checksum(hw) < 0) { 1944 if (hw->nvm.ops.validate(hw) < 0) {
1886 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); 1945 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1887 err = -EIO; 1946 err = -EIO;
1888 goto err_eeprom; 1947 goto err_eeprom;
@@ -1990,6 +2049,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1990 } 2049 }
1991 2050
1992#endif 2051#endif
2052 /* do hw tstamp init after resetting */
2053 igb_init_hw_timer(adapter);
2054
1993 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); 2055 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1994 /* print bus type/speed/width info */ 2056 /* print bus type/speed/width info */
1995 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", 2057 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
@@ -2012,7 +2074,13 @@ static int __devinit igb_probe(struct pci_dev *pdev,
2012 adapter->msix_entries ? "MSI-X" : 2074 adapter->msix_entries ? "MSI-X" :
2013 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", 2075 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
2014 adapter->num_rx_queues, adapter->num_tx_queues); 2076 adapter->num_rx_queues, adapter->num_tx_queues);
2015 2077 switch (hw->mac.type) {
2078 case e1000_i350:
2079 igb_set_eee_i350(hw);
2080 break;
2081 default:
2082 break;
2083 }
2016 return 0; 2084 return 0;
2017 2085
2018err_register: 2086err_register:
@@ -2149,6 +2217,9 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2149 random_ether_addr(mac_addr); 2217 random_ether_addr(mac_addr);
2150 igb_set_vf_mac(adapter, i, mac_addr); 2218 igb_set_vf_mac(adapter, i, mac_addr);
2151 } 2219 }
2220 /* DMA Coalescing is not supported in IOV mode. */
2221 if (adapter->flags & IGB_FLAG_DMAC)
2222 adapter->flags &= ~IGB_FLAG_DMAC;
2152 } 2223 }
2153#endif /* CONFIG_PCI_IOV */ 2224#endif /* CONFIG_PCI_IOV */
2154} 2225}
@@ -2286,9 +2357,19 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
2286 2357
2287 spin_lock_init(&adapter->stats64_lock); 2358 spin_lock_init(&adapter->stats64_lock);
2288#ifdef CONFIG_PCI_IOV 2359#ifdef CONFIG_PCI_IOV
2289 if (hw->mac.type == e1000_82576) 2360 switch (hw->mac.type) {
2290 adapter->vfs_allocated_count = (max_vfs > 7) ? 7 : max_vfs; 2361 case e1000_82576:
2291 2362 case e1000_i350:
2363 if (max_vfs > 7) {
2364 dev_warn(&pdev->dev,
2365 "Maximum of 7 VFs per PF, using max\n");
2366 adapter->vfs_allocated_count = 7;
2367 } else
2368 adapter->vfs_allocated_count = max_vfs;
2369 break;
2370 default:
2371 break;
2372 }
2292#endif /* CONFIG_PCI_IOV */ 2373#endif /* CONFIG_PCI_IOV */
2293 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); 2374 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
2294 2375
@@ -2307,12 +2388,14 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
2307 return -ENOMEM; 2388 return -ENOMEM;
2308 } 2389 }
2309 2390
2310 igb_init_hw_timer(adapter);
2311 igb_probe_vfs(adapter); 2391 igb_probe_vfs(adapter);
2312 2392
2313 /* Explicitly disable IRQ since the NIC can be in any state. */ 2393 /* Explicitly disable IRQ since the NIC can be in any state. */
2314 igb_irq_disable(adapter); 2394 igb_irq_disable(adapter);
2315 2395
2396 if (hw->mac.type == e1000_i350)
2397 adapter->flags &= ~IGB_FLAG_DMAC;
2398
2316 set_bit(__IGB_DOWN, &adapter->state); 2399 set_bit(__IGB_DOWN, &adapter->state);
2317 return 0; 2400 return 0;
2318} 2401}
@@ -3467,7 +3550,7 @@ static void igb_watchdog_task(struct work_struct *work)
3467 watchdog_task); 3550 watchdog_task);
3468 struct e1000_hw *hw = &adapter->hw; 3551 struct e1000_hw *hw = &adapter->hw;
3469 struct net_device *netdev = adapter->netdev; 3552 struct net_device *netdev = adapter->netdev;
3470 u32 link; 3553 u32 link, ctrl_ext, thstat;
3471 int i; 3554 int i;
3472 3555
3473 link = igb_has_link(adapter); 3556 link = igb_has_link(adapter);
@@ -3491,6 +3574,25 @@ static void igb_watchdog_task(struct work_struct *work)
3491 ((ctrl & E1000_CTRL_RFCE) ? "RX" : 3574 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3492 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None"))); 3575 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
3493 3576
3577 /* check for thermal sensor event on i350,
3578 * copper only */
3579 if (hw->mac.type == e1000_i350) {
3580 thstat = rd32(E1000_THSTAT);
3581 ctrl_ext = rd32(E1000_CTRL_EXT);
3582 if ((hw->phy.media_type ==
3583 e1000_media_type_copper) && !(ctrl_ext &
3584 E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3585 if (thstat &
3586 E1000_THSTAT_LINK_THROTTLE) {
3587 printk(KERN_INFO "igb: %s The "
3588 "network adapter link "
3589 "speed was downshifted "
3590 "because it "
3591 "overheated.\n",
3592 netdev->name);
3593 }
3594 }
3595 }
3494 /* adjust timeout factor according to speed/duplex */ 3596 /* adjust timeout factor according to speed/duplex */
3495 adapter->tx_timeout_factor = 1; 3597 adapter->tx_timeout_factor = 1;
3496 switch (adapter->link_speed) { 3598 switch (adapter->link_speed) {
@@ -3505,6 +3607,7 @@ static void igb_watchdog_task(struct work_struct *work)
3505 netif_carrier_on(netdev); 3607 netif_carrier_on(netdev);
3506 3608
3507 igb_ping_all_vfs(adapter); 3609 igb_ping_all_vfs(adapter);
3610 igb_check_vf_rate_limit(adapter);
3508 3611
3509 /* link state has changed, schedule phy info update */ 3612 /* link state has changed, schedule phy info update */
3510 if (!test_bit(__IGB_DOWN, &adapter->state)) 3613 if (!test_bit(__IGB_DOWN, &adapter->state))
@@ -3515,6 +3618,22 @@ static void igb_watchdog_task(struct work_struct *work)
3515 if (netif_carrier_ok(netdev)) { 3618 if (netif_carrier_ok(netdev)) {
3516 adapter->link_speed = 0; 3619 adapter->link_speed = 0;
3517 adapter->link_duplex = 0; 3620 adapter->link_duplex = 0;
3621 /* check for thermal sensor event on i350
3622 * copper only*/
3623 if (hw->mac.type == e1000_i350) {
3624 thstat = rd32(E1000_THSTAT);
3625 ctrl_ext = rd32(E1000_CTRL_EXT);
3626 if ((hw->phy.media_type ==
3627 e1000_media_type_copper) && !(ctrl_ext &
3628 E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3629 if (thstat & E1000_THSTAT_PWR_DOWN) {
3630 printk(KERN_ERR "igb: %s The "
3631 "network adapter was stopped "
3632 "because it overheated.\n",
3633 netdev->name);
3634 }
3635 }
3636 }
3518 /* Links status message must follow this format */ 3637 /* Links status message must follow this format */
3519 printk(KERN_INFO "igb: %s NIC Link is Down\n", 3638 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3520 netdev->name); 3639 netdev->name);
@@ -4547,6 +4666,15 @@ void igb_update_stats(struct igb_adapter *adapter,
4547 adapter->stats.mgptc += rd32(E1000_MGTPTC); 4666 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4548 adapter->stats.mgprc += rd32(E1000_MGTPRC); 4667 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4549 adapter->stats.mgpdc += rd32(E1000_MGTPDC); 4668 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
4669
4670 /* OS2BMC Stats */
4671 reg = rd32(E1000_MANC);
4672 if (reg & E1000_MANC_EN_BMC2OS) {
4673 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
4674 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
4675 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
4676 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
4677 }
4550} 4678}
4551 4679
4552static irqreturn_t igb_msix_other(int irq, void *data) 4680static irqreturn_t igb_msix_other(int irq, void *data)
@@ -6593,9 +6721,91 @@ static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6593 return igb_set_vf_mac(adapter, vf, mac); 6721 return igb_set_vf_mac(adapter, vf, mac);
6594} 6722}
6595 6723
6724static int igb_link_mbps(int internal_link_speed)
6725{
6726 switch (internal_link_speed) {
6727 case SPEED_100:
6728 return 100;
6729 case SPEED_1000:
6730 return 1000;
6731 default:
6732 return 0;
6733 }
6734}
6735
6736static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
6737 int link_speed)
6738{
6739 int rf_dec, rf_int;
6740 u32 bcnrc_val;
6741
6742 if (tx_rate != 0) {
6743 /* Calculate the rate factor values to set */
6744 rf_int = link_speed / tx_rate;
6745 rf_dec = (link_speed - (rf_int * tx_rate));
6746 rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
6747
6748 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
6749 bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
6750 E1000_RTTBCNRC_RF_INT_MASK);
6751 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
6752 } else {
6753 bcnrc_val = 0;
6754 }
6755
6756 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
6757 wr32(E1000_RTTBCNRC, bcnrc_val);
6758}
6759
6760static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
6761{
6762 int actual_link_speed, i;
6763 bool reset_rate = false;
6764
6765 /* VF TX rate limit was not set or not supported */
6766 if ((adapter->vf_rate_link_speed == 0) ||
6767 (adapter->hw.mac.type != e1000_82576))
6768 return;
6769
6770 actual_link_speed = igb_link_mbps(adapter->link_speed);
6771 if (actual_link_speed != adapter->vf_rate_link_speed) {
6772 reset_rate = true;
6773 adapter->vf_rate_link_speed = 0;
6774 dev_info(&adapter->pdev->dev,
6775 "Link speed has been changed. VF Transmit "
6776 "rate is disabled\n");
6777 }
6778
6779 for (i = 0; i < adapter->vfs_allocated_count; i++) {
6780 if (reset_rate)
6781 adapter->vf_data[i].tx_rate = 0;
6782
6783 igb_set_vf_rate_limit(&adapter->hw, i,
6784 adapter->vf_data[i].tx_rate,
6785 actual_link_speed);
6786 }
6787}
6788
6596static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) 6789static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
6597{ 6790{
6598 return -EOPNOTSUPP; 6791 struct igb_adapter *adapter = netdev_priv(netdev);
6792 struct e1000_hw *hw = &adapter->hw;
6793 int actual_link_speed;
6794
6795 if (hw->mac.type != e1000_82576)
6796 return -EOPNOTSUPP;
6797
6798 actual_link_speed = igb_link_mbps(adapter->link_speed);
6799 if ((vf >= adapter->vfs_allocated_count) ||
6800 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
6801 (tx_rate < 0) || (tx_rate > actual_link_speed))
6802 return -EINVAL;
6803
6804 adapter->vf_rate_link_speed = actual_link_speed;
6805 adapter->vf_data[vf].tx_rate = (u16)tx_rate;
6806 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
6807
6808 return 0;
6599} 6809}
6600 6810
6601static int igb_ndo_get_vf_config(struct net_device *netdev, 6811static int igb_ndo_get_vf_config(struct net_device *netdev,
@@ -6606,7 +6816,7 @@ static int igb_ndo_get_vf_config(struct net_device *netdev,
6606 return -EINVAL; 6816 return -EINVAL;
6607 ivi->vf = vf; 6817 ivi->vf = vf;
6608 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN); 6818 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
6609 ivi->tx_rate = 0; 6819 ivi->tx_rate = adapter->vf_data[vf].tx_rate;
6610 ivi->vlan = adapter->vf_data[vf].pf_vlan; 6820 ivi->vlan = adapter->vf_data[vf].pf_vlan;
6611 ivi->qos = adapter->vf_data[vf].pf_qos; 6821 ivi->qos = adapter->vf_data[vf].pf_qos;
6612 return 0; 6822 return 0;
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index ed6e3d910247..1d943aa7c7a6 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -201,13 +201,11 @@ static void igbvf_get_regs(struct net_device *netdev,
201 struct igbvf_adapter *adapter = netdev_priv(netdev); 201 struct igbvf_adapter *adapter = netdev_priv(netdev);
202 struct e1000_hw *hw = &adapter->hw; 202 struct e1000_hw *hw = &adapter->hw;
203 u32 *regs_buff = p; 203 u32 *regs_buff = p;
204 u8 revision_id;
205 204
206 memset(p, 0, IGBVF_REGS_LEN * sizeof(u32)); 205 memset(p, 0, IGBVF_REGS_LEN * sizeof(u32));
207 206
208 pci_read_config_byte(adapter->pdev, PCI_REVISION_ID, &revision_id); 207 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
209 208 adapter->pdev->device;
210 regs->version = (1 << 24) | (revision_id << 16) | adapter->pdev->device;
211 209
212 regs_buff[0] = er32(CTRL); 210 regs_buff[0] = er32(CTRL);
213 regs_buff[1] = er32(STATUS); 211 regs_buff[1] = er32(STATUS);
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h
index 990c329e6c3b..d5dad5d607d6 100644
--- a/drivers/net/igbvf/igbvf.h
+++ b/drivers/net/igbvf/igbvf.h
@@ -201,9 +201,6 @@ struct igbvf_adapter {
201 unsigned int restart_queue; 201 unsigned int restart_queue;
202 u32 txd_cmd; 202 u32 txd_cmd;
203 203
204 bool detect_tx_hung;
205 u8 tx_timeout_factor;
206
207 u32 tx_int_delay; 204 u32 tx_int_delay;
208 u32 tx_abs_int_delay; 205 u32 tx_abs_int_delay;
209 206
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 6352c8158e6d..6ccc32fd7338 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -396,35 +396,6 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
396 buffer_info->time_stamp = 0; 396 buffer_info->time_stamp = 0;
397} 397}
398 398
399static void igbvf_print_tx_hang(struct igbvf_adapter *adapter)
400{
401 struct igbvf_ring *tx_ring = adapter->tx_ring;
402 unsigned int i = tx_ring->next_to_clean;
403 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
404 union e1000_adv_tx_desc *eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
405
406 /* detected Tx unit hang */
407 dev_err(&adapter->pdev->dev,
408 "Detected Tx Unit Hang:\n"
409 " TDH <%x>\n"
410 " TDT <%x>\n"
411 " next_to_use <%x>\n"
412 " next_to_clean <%x>\n"
413 "buffer_info[next_to_clean]:\n"
414 " time_stamp <%lx>\n"
415 " next_to_watch <%x>\n"
416 " jiffies <%lx>\n"
417 " next_to_watch.status <%x>\n",
418 readl(adapter->hw.hw_addr + tx_ring->head),
419 readl(adapter->hw.hw_addr + tx_ring->tail),
420 tx_ring->next_to_use,
421 tx_ring->next_to_clean,
422 tx_ring->buffer_info[eop].time_stamp,
423 eop,
424 jiffies,
425 eop_desc->wb.status);
426}
427
428/** 399/**
429 * igbvf_setup_tx_resources - allocate Tx resources (Descriptors) 400 * igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
430 * @adapter: board private structure 401 * @adapter: board private structure
@@ -771,7 +742,6 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter)
771static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) 742static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
772{ 743{
773 struct igbvf_adapter *adapter = tx_ring->adapter; 744 struct igbvf_adapter *adapter = tx_ring->adapter;
774 struct e1000_hw *hw = &adapter->hw;
775 struct net_device *netdev = adapter->netdev; 745 struct net_device *netdev = adapter->netdev;
776 struct igbvf_buffer *buffer_info; 746 struct igbvf_buffer *buffer_info;
777 struct sk_buff *skb; 747 struct sk_buff *skb;
@@ -832,22 +802,6 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
832 } 802 }
833 } 803 }
834 804
835 if (adapter->detect_tx_hung) {
836 /* Detect a transmit hang in hardware, this serializes the
837 * check with the clearing of time_stamp and movement of i */
838 adapter->detect_tx_hung = false;
839 if (tx_ring->buffer_info[i].time_stamp &&
840 time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
841 (adapter->tx_timeout_factor * HZ)) &&
842 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
843
844 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
845 /* detected Tx unit hang */
846 igbvf_print_tx_hang(adapter);
847
848 netif_stop_queue(netdev);
849 }
850 }
851 adapter->net_stats.tx_bytes += total_bytes; 805 adapter->net_stats.tx_bytes += total_bytes;
852 adapter->net_stats.tx_packets += total_packets; 806 adapter->net_stats.tx_packets += total_packets;
853 return count < tx_ring->count; 807 return count < tx_ring->count;
@@ -1863,17 +1817,6 @@ static void igbvf_watchdog_task(struct work_struct *work)
1863 &adapter->link_duplex); 1817 &adapter->link_duplex);
1864 igbvf_print_link_info(adapter); 1818 igbvf_print_link_info(adapter);
1865 1819
1866 /* adjust timeout factor according to speed/duplex */
1867 adapter->tx_timeout_factor = 1;
1868 switch (adapter->link_speed) {
1869 case SPEED_10:
1870 adapter->tx_timeout_factor = 16;
1871 break;
1872 case SPEED_100:
1873 /* maybe add some timeout factor ? */
1874 break;
1875 }
1876
1877 netif_carrier_on(netdev); 1820 netif_carrier_on(netdev);
1878 netif_wake_queue(netdev); 1821 netif_wake_queue(netdev);
1879 } 1822 }
@@ -1907,9 +1850,6 @@ static void igbvf_watchdog_task(struct work_struct *work)
1907 /* Cause software interrupt to ensure Rx ring is cleaned */ 1850 /* Cause software interrupt to ensure Rx ring is cleaned */
1908 ew32(EICS, adapter->rx_ring->eims_value); 1851 ew32(EICS, adapter->rx_ring->eims_value);
1909 1852
1910 /* Force detection of hung controller every watchdog period */
1911 adapter->detect_tx_hung = 1;
1912
1913 /* Reset the timer */ 1853 /* Reset the timer */
1914 if (!test_bit(__IGBVF_DOWN, &adapter->state)) 1854 if (!test_bit(__IGBVF_DOWN, &adapter->state))
1915 mod_timer(&adapter->watchdog_timer, 1855 mod_timer(&adapter->watchdog_timer,
@@ -2699,8 +2639,7 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
2699 hw->device_id = pdev->device; 2639 hw->device_id = pdev->device;
2700 hw->subsystem_vendor_id = pdev->subsystem_vendor; 2640 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2701 hw->subsystem_device_id = pdev->subsystem_device; 2641 hw->subsystem_device_id = pdev->subsystem_device;
2702 2642 hw->revision_id = pdev->revision;
2703 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
2704 2643
2705 err = -EIO; 2644 err = -EIO;
2706 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), 2645 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0),
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index aa93655c3aa7..a5b0f0e194bb 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -2025,7 +2025,6 @@ static void ipg_init_mii(struct net_device *dev)
2025 2025
2026 if (phyaddr != 0x1f) { 2026 if (phyaddr != 0x1f) {
2027 u16 mii_phyctrl, mii_1000cr; 2027 u16 mii_phyctrl, mii_1000cr;
2028 u8 revisionid = 0;
2029 2028
2030 mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000); 2029 mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000);
2031 mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF | 2030 mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF |
@@ -2035,8 +2034,7 @@ static void ipg_init_mii(struct net_device *dev)
2035 mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR); 2034 mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR);
2036 2035
2037 /* Set default phyparam */ 2036 /* Set default phyparam */
2038 pci_read_config_byte(sp->pdev, PCI_REVISION_ID, &revisionid); 2037 ipg_set_phy_default_param(sp->pdev->revision, dev, phyaddr);
2039 ipg_set_phy_default_param(revisionid, dev, phyaddr);
2040 2038
2041 /* Reset PHY */ 2039 /* Reset PHY */
2042 mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART; 2040 mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART;
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 521c0c732998..8f3df044e81e 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -149,7 +149,7 @@ struct ixgb_desc_ring {
149 149
150struct ixgb_adapter { 150struct ixgb_adapter {
151 struct timer_list watchdog_timer; 151 struct timer_list watchdog_timer;
152 struct vlan_group *vlgrp; 152 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
153 u32 bd_number; 153 u32 bd_number;
154 u32 rx_buffer_len; 154 u32 rx_buffer_len;
155 u32 part_num; 155 u32 part_num;
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index 43994c199991..cc53aa1541ba 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -706,6 +706,43 @@ ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
706 } 706 }
707} 707}
708 708
709static int ixgb_set_flags(struct net_device *netdev, u32 data)
710{
711 struct ixgb_adapter *adapter = netdev_priv(netdev);
712 bool need_reset;
713 int rc;
714
715 /*
716 * Tx VLAN insertion does not work per HW design when Rx stripping is
717 * disabled. Disable txvlan when rxvlan is turned off, and enable
718 * rxvlan when txvlan is turned on.
719 */
720 if (!(data & ETH_FLAG_RXVLAN) &&
721 (netdev->features & NETIF_F_HW_VLAN_TX))
722 data &= ~ETH_FLAG_TXVLAN;
723 else if (data & ETH_FLAG_TXVLAN)
724 data |= ETH_FLAG_RXVLAN;
725
726 need_reset = (data & ETH_FLAG_RXVLAN) !=
727 (netdev->features & NETIF_F_HW_VLAN_RX);
728
729 rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_RXVLAN |
730 ETH_FLAG_TXVLAN);
731 if (rc)
732 return rc;
733
734 if (need_reset) {
735 if (netif_running(netdev)) {
736 ixgb_down(adapter, true);
737 ixgb_up(adapter);
738 ixgb_set_speed_duplex(netdev);
739 } else
740 ixgb_reset(adapter);
741 }
742
743 return 0;
744}
745
709static const struct ethtool_ops ixgb_ethtool_ops = { 746static const struct ethtool_ops ixgb_ethtool_ops = {
710 .get_settings = ixgb_get_settings, 747 .get_settings = ixgb_get_settings,
711 .set_settings = ixgb_set_settings, 748 .set_settings = ixgb_set_settings,
@@ -732,6 +769,8 @@ static const struct ethtool_ops ixgb_ethtool_ops = {
732 .phys_id = ixgb_phys_id, 769 .phys_id = ixgb_phys_id,
733 .get_sset_count = ixgb_get_sset_count, 770 .get_sset_count = ixgb_get_sset_count,
734 .get_ethtool_stats = ixgb_get_ethtool_stats, 771 .get_ethtool_stats = ixgb_get_ethtool_stats,
772 .get_flags = ethtool_op_get_flags,
773 .set_flags = ixgb_set_flags,
735}; 774};
736 775
737void ixgb_set_ethtool_ops(struct net_device *netdev) 776void ixgb_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 5639cccb4935..0f681ac2da8d 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -100,8 +100,6 @@ static void ixgb_tx_timeout_task(struct work_struct *work);
100 100
101static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter); 101static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
102static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter); 102static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
103static void ixgb_vlan_rx_register(struct net_device *netdev,
104 struct vlan_group *grp);
105static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid); 103static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
106static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); 104static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
107static void ixgb_restore_vlan(struct ixgb_adapter *adapter); 105static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
@@ -336,7 +334,6 @@ static const struct net_device_ops ixgb_netdev_ops = {
336 .ndo_set_mac_address = ixgb_set_mac, 334 .ndo_set_mac_address = ixgb_set_mac,
337 .ndo_change_mtu = ixgb_change_mtu, 335 .ndo_change_mtu = ixgb_change_mtu,
338 .ndo_tx_timeout = ixgb_tx_timeout, 336 .ndo_tx_timeout = ixgb_tx_timeout,
339 .ndo_vlan_rx_register = ixgb_vlan_rx_register,
340 .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid, 337 .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid,
341 .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid, 338 .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid,
342#ifdef CONFIG_NET_POLL_CONTROLLER 339#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1508,7 +1505,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1508 DESC_NEEDED))) 1505 DESC_NEEDED)))
1509 return NETDEV_TX_BUSY; 1506 return NETDEV_TX_BUSY;
1510 1507
1511 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 1508 if (vlan_tx_tag_present(skb)) {
1512 tx_flags |= IXGB_TX_FLAGS_VLAN; 1509 tx_flags |= IXGB_TX_FLAGS_VLAN;
1513 vlan_id = vlan_tx_tag_get(skb); 1510 vlan_id = vlan_tx_tag_get(skb);
1514 } 1511 }
@@ -2049,12 +2046,11 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
2049 ixgb_rx_checksum(adapter, rx_desc, skb); 2046 ixgb_rx_checksum(adapter, rx_desc, skb);
2050 2047
2051 skb->protocol = eth_type_trans(skb, netdev); 2048 skb->protocol = eth_type_trans(skb, netdev);
2052 if (adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) { 2049 if (status & IXGB_RX_DESC_STATUS_VP)
2053 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 2050 __vlan_hwaccel_put_tag(skb,
2054 le16_to_cpu(rx_desc->special)); 2051 le16_to_cpu(rx_desc->special));
2055 } else { 2052
2056 netif_receive_skb(skb); 2053 netif_receive_skb(skb);
2057 }
2058 2054
2059rxdesc_done: 2055rxdesc_done:
2060 /* clean up descriptor, might be written over by hw */ 2056 /* clean up descriptor, might be written over by hw */
@@ -2152,20 +2148,6 @@ map_skb:
2152 } 2148 }
2153} 2149}
2154 2150
2155/**
2156 * ixgb_vlan_rx_register - enables or disables vlan tagging/stripping.
2157 *
2158 * @param netdev network interface device structure
2159 * @param grp indicates to enable or disable tagging/stripping
2160 **/
2161static void
2162ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2163{
2164 struct ixgb_adapter *adapter = netdev_priv(netdev);
2165
2166 adapter->vlgrp = grp;
2167}
2168
2169static void 2151static void
2170ixgb_vlan_strip_enable(struct ixgb_adapter *adapter) 2152ixgb_vlan_strip_enable(struct ixgb_adapter *adapter)
2171{ 2153{
@@ -2200,6 +2182,7 @@ ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2200 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index); 2182 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2201 vfta |= (1 << (vid & 0x1F)); 2183 vfta |= (1 << (vid & 0x1F));
2202 ixgb_write_vfta(&adapter->hw, index, vfta); 2184 ixgb_write_vfta(&adapter->hw, index, vfta);
2185 set_bit(vid, adapter->active_vlans);
2203} 2186}
2204 2187
2205static void 2188static void
@@ -2208,35 +2191,22 @@ ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2208 struct ixgb_adapter *adapter = netdev_priv(netdev); 2191 struct ixgb_adapter *adapter = netdev_priv(netdev);
2209 u32 vfta, index; 2192 u32 vfta, index;
2210 2193
2211 ixgb_irq_disable(adapter);
2212
2213 vlan_group_set_device(adapter->vlgrp, vid, NULL);
2214
2215 /* don't enable interrupts unless we are UP */
2216 if (adapter->netdev->flags & IFF_UP)
2217 ixgb_irq_enable(adapter);
2218
2219 /* remove VID from filter table */ 2194 /* remove VID from filter table */
2220 2195
2221 index = (vid >> 5) & 0x7F; 2196 index = (vid >> 5) & 0x7F;
2222 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index); 2197 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2223 vfta &= ~(1 << (vid & 0x1F)); 2198 vfta &= ~(1 << (vid & 0x1F));
2224 ixgb_write_vfta(&adapter->hw, index, vfta); 2199 ixgb_write_vfta(&adapter->hw, index, vfta);
2200 clear_bit(vid, adapter->active_vlans);
2225} 2201}
2226 2202
2227static void 2203static void
2228ixgb_restore_vlan(struct ixgb_adapter *adapter) 2204ixgb_restore_vlan(struct ixgb_adapter *adapter)
2229{ 2205{
2230 ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp); 2206 u16 vid;
2231 2207
2232 if (adapter->vlgrp) { 2208 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2233 u16 vid; 2209 ixgb_vlan_rx_add_vid(adapter->netdev, vid);
2234 for (vid = 0; vid < VLAN_N_VID; vid++) {
2235 if (!vlan_group_get_device(adapter->vlgrp, vid))
2236 continue;
2237 ixgb_vlan_rx_add_vid(adapter->netdev, vid);
2238 }
2239 }
2240} 2210}
2241 2211
2242#ifdef CONFIG_NET_POLL_CONTROLLER 2212#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 3b8c92463617..8d468028bb55 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -118,6 +118,7 @@ struct vf_data_storage {
118 bool pf_set_mac; 118 bool pf_set_mac;
119 u16 pf_vlan; /* When set, guest VLAN config not allowed. */ 119 u16 pf_vlan; /* When set, guest VLAN config not allowed. */
120 u16 pf_qos; 120 u16 pf_qos;
121 u16 tx_rate;
121}; 122};
122 123
123/* wrapper around a pointer to a socket buffer, 124/* wrapper around a pointer to a socket buffer,
@@ -209,6 +210,7 @@ struct ixgbe_ring {
209 * associated with this ring, which is 210 * associated with this ring, which is
210 * different for DCB and RSS modes 211 * different for DCB and RSS modes
211 */ 212 */
213 u8 dcb_tc;
212 214
213 u16 work_limit; /* max work per interrupt */ 215 u16 work_limit; /* max work per interrupt */
214 216
@@ -243,7 +245,7 @@ enum ixgbe_ring_f_enum {
243 RING_F_ARRAY_SIZE /* must be last in enum set */ 245 RING_F_ARRAY_SIZE /* must be last in enum set */
244}; 246};
245 247
246#define IXGBE_MAX_DCB_INDICES 8 248#define IXGBE_MAX_DCB_INDICES 64
247#define IXGBE_MAX_RSS_INDICES 16 249#define IXGBE_MAX_RSS_INDICES 16
248#define IXGBE_MAX_VMDQ_INDICES 64 250#define IXGBE_MAX_VMDQ_INDICES 64
249#define IXGBE_MAX_FDIR_INDICES 64 251#define IXGBE_MAX_FDIR_INDICES 64
@@ -334,9 +336,14 @@ struct ixgbe_adapter {
334 u16 bd_number; 336 u16 bd_number;
335 struct work_struct reset_task; 337 struct work_struct reset_task;
336 struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; 338 struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
339
340 /* DCB parameters */
341 struct ieee_pfc *ixgbe_ieee_pfc;
342 struct ieee_ets *ixgbe_ieee_ets;
337 struct ixgbe_dcb_config dcb_cfg; 343 struct ixgbe_dcb_config dcb_cfg;
338 struct ixgbe_dcb_config temp_dcb_cfg; 344 struct ixgbe_dcb_config temp_dcb_cfg;
339 u8 dcb_set_bitmap; 345 u8 dcb_set_bitmap;
346 u8 dcbx_cap;
340 enum ixgbe_fc_mode last_lfc_mode; 347 enum ixgbe_fc_mode last_lfc_mode;
341 348
342 /* Interrupt Throttle Rate */ 349 /* Interrupt Throttle Rate */
@@ -462,6 +469,7 @@ struct ixgbe_adapter {
462 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); 469 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
463 unsigned int num_vfs; 470 unsigned int num_vfs;
464 struct vf_data_storage *vfinfo; 471 struct vf_data_storage *vfinfo;
472 int vf_rate_link_speed;
465}; 473};
466 474
467enum ixbge_state_t { 475enum ixbge_state_t {
@@ -521,7 +529,6 @@ extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
521extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); 529extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
522extern void ixgbe_write_eitr(struct ixgbe_q_vector *); 530extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
523extern int ethtool_ioctl(struct ifreq *ifr); 531extern int ethtool_ioctl(struct ifreq *ifr);
524extern u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 index);
525extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); 532extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
526extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc); 533extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
527extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc); 534extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
@@ -538,6 +545,7 @@ extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
538extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, 545extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
539 struct ixgbe_ring *ring); 546 struct ixgbe_ring *ring);
540extern void ixgbe_set_rx_mode(struct net_device *netdev); 547extern void ixgbe_set_rx_mode(struct net_device *netdev);
548extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
541#ifdef IXGBE_FCOE 549#ifdef IXGBE_FCOE
542extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); 550extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
543extern int ixgbe_fso(struct ixgbe_adapter *adapter, 551extern int ixgbe_fso(struct ixgbe_adapter *adapter,
@@ -549,6 +557,8 @@ extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
549 struct sk_buff *skb); 557 struct sk_buff *skb);
550extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, 558extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
551 struct scatterlist *sgl, unsigned int sgc); 559 struct scatterlist *sgl, unsigned int sgc);
560extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
561 struct scatterlist *sgl, unsigned int sgc);
552extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid); 562extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
553extern int ixgbe_fcoe_enable(struct net_device *netdev); 563extern int ixgbe_fcoe_enable(struct net_device *netdev);
554extern int ixgbe_fcoe_disable(struct net_device *netdev); 564extern int ixgbe_fcoe_disable(struct net_device *netdev);
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index d0f1d9d2c416..845c679c8b87 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -158,6 +158,7 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
158 158
159 switch (hw->phy.type) { 159 switch (hw->phy.type) {
160 case ixgbe_phy_tn: 160 case ixgbe_phy_tn:
161 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
161 phy->ops.check_link = &ixgbe_check_phy_link_tnx; 162 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
162 phy->ops.get_firmware_version = 163 phy->ops.get_firmware_version =
163 &ixgbe_get_phy_firmware_version_tnx; 164 &ixgbe_get_phy_firmware_version_tnx;
@@ -280,10 +281,22 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
280{ 281{
281 enum ixgbe_media_type media_type; 282 enum ixgbe_media_type media_type;
282 283
284 /* Detect if there is a copper PHY attached. */
285 switch (hw->phy.type) {
286 case ixgbe_phy_cu_unknown:
287 case ixgbe_phy_tn:
288 case ixgbe_phy_aq:
289 media_type = ixgbe_media_type_copper;
290 goto out;
291 default:
292 break;
293 }
294
283 /* Media type for I82598 is based on device ID */ 295 /* Media type for I82598 is based on device ID */
284 switch (hw->device_id) { 296 switch (hw->device_id) {
285 case IXGBE_DEV_ID_82598: 297 case IXGBE_DEV_ID_82598:
286 case IXGBE_DEV_ID_82598_BX: 298 case IXGBE_DEV_ID_82598_BX:
299 /* Default device ID is mezzanine card KX/KX4 */
287 media_type = ixgbe_media_type_backplane; 300 media_type = ixgbe_media_type_backplane;
288 break; 301 break;
289 case IXGBE_DEV_ID_82598AF_DUAL_PORT: 302 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
@@ -306,7 +319,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
306 media_type = ixgbe_media_type_unknown; 319 media_type = ixgbe_media_type_unknown;
307 break; 320 break;
308 } 321 }
309 322out:
310 return media_type; 323 return media_type;
311} 324}
312 325
@@ -354,7 +367,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
354 367
355 /* Negotiate the fc mode to use */ 368 /* Negotiate the fc mode to use */
356 ret_val = ixgbe_fc_autoneg(hw); 369 ret_val = ixgbe_fc_autoneg(hw);
357 if (ret_val) 370 if (ret_val == IXGBE_ERR_FLOW_CONTROL)
358 goto out; 371 goto out;
359 372
360 /* Disable any previous flow control settings */ 373 /* Disable any previous flow control settings */
@@ -372,10 +385,10 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
372 * 2: Tx flow control is enabled (we can send pause frames but 385 * 2: Tx flow control is enabled (we can send pause frames but
373 * we do not support receiving pause frames). 386 * we do not support receiving pause frames).
374 * 3: Both Rx and Tx flow control (symmetric) are enabled. 387 * 3: Both Rx and Tx flow control (symmetric) are enabled.
375 * other: Invalid.
376#ifdef CONFIG_DCB 388#ifdef CONFIG_DCB
377 * 4: Priority Flow Control is enabled. 389 * 4: Priority Flow Control is enabled.
378#endif 390#endif
391 * other: Invalid.
379 */ 392 */
380 switch (hw->fc.current_mode) { 393 switch (hw->fc.current_mode) {
381 case ixgbe_fc_none: 394 case ixgbe_fc_none:
@@ -432,9 +445,10 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
432 reg = (rx_pba_size - hw->fc.low_water) << 6; 445 reg = (rx_pba_size - hw->fc.low_water) << 6;
433 if (hw->fc.send_xon) 446 if (hw->fc.send_xon)
434 reg |= IXGBE_FCRTL_XONE; 447 reg |= IXGBE_FCRTL_XONE;
448
435 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg); 449 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
436 450
437 reg = (rx_pba_size - hw->fc.high_water) << 10; 451 reg = (rx_pba_size - hw->fc.high_water) << 6;
438 reg |= IXGBE_FCRTH_FCEN; 452 reg |= IXGBE_FCRTH_FCEN;
439 453
440 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg); 454 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
@@ -627,13 +641,12 @@ out:
627 return 0; 641 return 0;
628} 642}
629 643
630
631/** 644/**
632 * ixgbe_setup_mac_link_82598 - Set MAC link speed 645 * ixgbe_setup_mac_link_82598 - Set MAC link speed
633 * @hw: pointer to hardware structure 646 * @hw: pointer to hardware structure
634 * @speed: new link speed 647 * @speed: new link speed
635 * @autoneg: true if auto-negotiation enabled 648 * @autoneg: true if auto-negotiation enabled
636 * @autoneg_wait_to_complete: true if waiting is needed to complete 649 * @autoneg_wait_to_complete: true when waiting for completion is needed
637 * 650 *
638 * Set the link speed in the AUTOC register and restarts link. 651 * Set the link speed in the AUTOC register and restarts link.
639 **/ 652 **/
@@ -672,7 +685,8 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
672 * ixgbe_hw This will write the AUTOC register based on the new 685 * ixgbe_hw This will write the AUTOC register based on the new
673 * stored values 686 * stored values
674 */ 687 */
675 status = ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); 688 status = ixgbe_start_mac_link_82598(hw,
689 autoneg_wait_to_complete);
676 } 690 }
677 691
678 return status; 692 return status;
@@ -698,7 +712,6 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
698 /* Setup the PHY according to input speed */ 712 /* Setup the PHY according to input speed */
699 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, 713 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
700 autoneg_wait_to_complete); 714 autoneg_wait_to_complete);
701
702 /* Set up MAC */ 715 /* Set up MAC */
703 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); 716 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
704 717
@@ -770,7 +783,6 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
770 else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT) 783 else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
771 goto no_phy_reset; 784 goto no_phy_reset;
772 785
773
774 hw->phy.ops.reset(hw); 786 hw->phy.ops.reset(hw);
775 } 787 }
776 788
@@ -779,12 +791,9 @@ no_phy_reset:
779 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 791 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
780 * access and verify no pending requests before reset 792 * access and verify no pending requests before reset
781 */ 793 */
782 status = ixgbe_disable_pcie_master(hw); 794 ixgbe_disable_pcie_master(hw);
783 if (status != 0) {
784 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
785 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
786 }
787 795
796mac_reset_top:
788 /* 797 /*
789 * Issue global reset to the MAC. This needs to be a SW reset. 798 * Issue global reset to the MAC. This needs to be a SW reset.
790 * If link reset is used, it might reset the MAC when mng is using it 799 * If link reset is used, it might reset the MAC when mng is using it
@@ -805,6 +814,19 @@ no_phy_reset:
805 hw_dbg(hw, "Reset polling failed to complete.\n"); 814 hw_dbg(hw, "Reset polling failed to complete.\n");
806 } 815 }
807 816
817 /*
818 * Double resets are required for recovery from certain error
819 * conditions. Between resets, it is necessary to stall to allow time
820 * for any pending HW events to complete. We use 1usec since that is
821 * what is needed for ixgbe_disable_pcie_master(). The second reset
822 * then clears out any effects of those events.
823 */
824 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
825 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
826 udelay(1);
827 goto mac_reset_top;
828 }
829
808 msleep(50); 830 msleep(50);
809 831
810 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); 832 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
@@ -824,15 +846,15 @@ no_phy_reset:
824 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); 846 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
825 } 847 }
826 848
849 /* Store the permanent mac address */
850 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
851
827 /* 852 /*
828 * Store MAC address from RAR0, clear receive address registers, and 853 * Store MAC address from RAR0, clear receive address registers, and
829 * clear the multicast table 854 * clear the multicast table
830 */ 855 */
831 hw->mac.ops.init_rx_addrs(hw); 856 hw->mac.ops.init_rx_addrs(hw);
832 857
833 /* Store the permanent mac address */
834 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
835
836reset_hw_out: 858reset_hw_out:
837 if (phy_status) 859 if (phy_status)
838 status = phy_status; 860 status = phy_status;
@@ -849,6 +871,13 @@ reset_hw_out:
849static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 871static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
850{ 872{
851 u32 rar_high; 873 u32 rar_high;
874 u32 rar_entries = hw->mac.num_rar_entries;
875
876 /* Make sure we are using a valid rar index range */
877 if (rar >= rar_entries) {
878 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
879 return IXGBE_ERR_INVALID_ARGUMENT;
880 }
852 881
853 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 882 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
854 rar_high &= ~IXGBE_RAH_VIND_MASK; 883 rar_high &= ~IXGBE_RAH_VIND_MASK;
@@ -868,14 +897,17 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
868 u32 rar_high; 897 u32 rar_high;
869 u32 rar_entries = hw->mac.num_rar_entries; 898 u32 rar_entries = hw->mac.num_rar_entries;
870 899
871 if (rar < rar_entries) { 900
872 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 901 /* Make sure we are using a valid rar index range */
873 if (rar_high & IXGBE_RAH_VIND_MASK) { 902 if (rar >= rar_entries) {
874 rar_high &= ~IXGBE_RAH_VIND_MASK;
875 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
876 }
877 } else {
878 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 903 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
904 return IXGBE_ERR_INVALID_ARGUMENT;
905 }
906
907 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
908 if (rar_high & IXGBE_RAH_VIND_MASK) {
909 rar_high &= ~IXGBE_RAH_VIND_MASK;
910 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
879 } 911 }
880 912
881 return 0; 913 return 0;
@@ -994,13 +1026,12 @@ static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
994} 1026}
995 1027
996/** 1028/**
997 * ixgbe_read_i2c_eeprom_82598 - Read 8 bit EEPROM word of an SFP+ module 1029 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
998 * over I2C interface through an intermediate phy.
999 * @hw: pointer to hardware structure 1030 * @hw: pointer to hardware structure
1000 * @byte_offset: EEPROM byte offset to read 1031 * @byte_offset: EEPROM byte offset to read
1001 * @eeprom_data: value read 1032 * @eeprom_data: value read
1002 * 1033 *
1003 * Performs byte read operation to SFP module's EEPROM over I2C interface. 1034 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1004 **/ 1035 **/
1005static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, 1036static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1006 u8 *eeprom_data) 1037 u8 *eeprom_data)
@@ -1074,10 +1105,12 @@ static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1074 1105
1075 /* Copper PHY must be checked before AUTOC LMS to determine correct 1106 /* Copper PHY must be checked before AUTOC LMS to determine correct
1076 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */ 1107 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1077 if (hw->phy.type == ixgbe_phy_tn || 1108 switch (hw->phy.type) {
1078 hw->phy.type == ixgbe_phy_cu_unknown) { 1109 case ixgbe_phy_tn:
1079 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, 1110 case ixgbe_phy_aq:
1080 &ext_ability); 1111 case ixgbe_phy_cu_unknown:
1112 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE,
1113 MDIO_MMD_PMAPMD, &ext_ability);
1081 if (ext_ability & MDIO_PMA_EXTABLE_10GBT) 1114 if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
1082 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; 1115 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1083 if (ext_ability & MDIO_PMA_EXTABLE_1000BT) 1116 if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
@@ -1085,6 +1118,8 @@ static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1085 if (ext_ability & MDIO_PMA_EXTABLE_100BTX) 1118 if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
1086 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; 1119 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1087 goto out; 1120 goto out;
1121 default:
1122 break;
1088 } 1123 }
1089 1124
1090 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 1125 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
@@ -1179,13 +1214,14 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
1179 .set_vmdq = &ixgbe_set_vmdq_82598, 1214 .set_vmdq = &ixgbe_set_vmdq_82598,
1180 .clear_vmdq = &ixgbe_clear_vmdq_82598, 1215 .clear_vmdq = &ixgbe_clear_vmdq_82598,
1181 .init_rx_addrs = &ixgbe_init_rx_addrs_generic, 1216 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
1182 .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
1183 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, 1217 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
1184 .enable_mc = &ixgbe_enable_mc_generic, 1218 .enable_mc = &ixgbe_enable_mc_generic,
1185 .disable_mc = &ixgbe_disable_mc_generic, 1219 .disable_mc = &ixgbe_disable_mc_generic,
1186 .clear_vfta = &ixgbe_clear_vfta_82598, 1220 .clear_vfta = &ixgbe_clear_vfta_82598,
1187 .set_vfta = &ixgbe_set_vfta_82598, 1221 .set_vfta = &ixgbe_set_vfta_82598,
1188 .fc_enable = &ixgbe_fc_enable_82598, 1222 .fc_enable = &ixgbe_fc_enable_82598,
1223 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
1224 .release_swfw_sync = &ixgbe_release_swfw_sync,
1189}; 1225};
1190 1226
1191static struct ixgbe_eeprom_operations eeprom_ops_82598 = { 1227static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index a21f5817685b..00aeba385a2f 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -112,7 +112,8 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
112 goto setup_sfp_out; 112 goto setup_sfp_out;
113 113
114 /* PHY config will finish before releasing the semaphore */ 114 /* PHY config will finish before releasing the semaphore */
115 ret_val = ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 115 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
116 IXGBE_GSSR_MAC_CSR_SM);
116 if (ret_val != 0) { 117 if (ret_val != 0) {
117 ret_val = IXGBE_ERR_SWFW_SYNC; 118 ret_val = IXGBE_ERR_SWFW_SYNC;
118 goto setup_sfp_out; 119 goto setup_sfp_out;
@@ -329,11 +330,14 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
329 enum ixgbe_media_type media_type; 330 enum ixgbe_media_type media_type;
330 331
331 /* Detect if there is a copper PHY attached. */ 332 /* Detect if there is a copper PHY attached. */
332 if (hw->phy.type == ixgbe_phy_cu_unknown || 333 switch (hw->phy.type) {
333 hw->phy.type == ixgbe_phy_tn || 334 case ixgbe_phy_cu_unknown:
334 hw->phy.type == ixgbe_phy_aq) { 335 case ixgbe_phy_tn:
336 case ixgbe_phy_aq:
335 media_type = ixgbe_media_type_copper; 337 media_type = ixgbe_media_type_copper;
336 goto out; 338 goto out;
339 default:
340 break;
337 } 341 }
338 342
339 switch (hw->device_id) { 343 switch (hw->device_id) {
@@ -354,6 +358,9 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
354 case IXGBE_DEV_ID_82599_CX4: 358 case IXGBE_DEV_ID_82599_CX4:
355 media_type = ixgbe_media_type_cx4; 359 media_type = ixgbe_media_type_cx4;
356 break; 360 break;
361 case IXGBE_DEV_ID_82599_T3_LOM:
362 media_type = ixgbe_media_type_copper;
363 break;
357 default: 364 default:
358 media_type = ixgbe_media_type_unknown; 365 media_type = ixgbe_media_type_unknown;
359 break; 366 break;
@@ -411,14 +418,14 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
411 return status; 418 return status;
412} 419}
413 420
414 /** 421/**
415 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser 422 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
416 * @hw: pointer to hardware structure 423 * @hw: pointer to hardware structure
417 * 424 *
418 * The base drivers may require better control over SFP+ module 425 * The base drivers may require better control over SFP+ module
419 * PHY states. This includes selectively shutting down the Tx 426 * PHY states. This includes selectively shutting down the Tx
420 * laser on the PHY, effectively halting physical link. 427 * laser on the PHY, effectively halting physical link.
421 **/ 428 **/
422static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 429static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
423{ 430{
424 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 431 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
@@ -463,8 +470,6 @@ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
463 **/ 470 **/
464static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 471static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
465{ 472{
466 hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n");
467
468 if (hw->mac.autotry_restart) { 473 if (hw->mac.autotry_restart) {
469 ixgbe_disable_tx_laser_multispeed_fiber(hw); 474 ixgbe_disable_tx_laser_multispeed_fiber(hw);
470 ixgbe_enable_tx_laser_multispeed_fiber(hw); 475 ixgbe_enable_tx_laser_multispeed_fiber(hw);
@@ -487,17 +492,21 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
487 bool autoneg_wait_to_complete) 492 bool autoneg_wait_to_complete)
488{ 493{
489 s32 status = 0; 494 s32 status = 0;
490 ixgbe_link_speed phy_link_speed; 495 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
491 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; 496 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
492 u32 speedcnt = 0; 497 u32 speedcnt = 0;
493 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 498 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
499 u32 i = 0;
494 bool link_up = false; 500 bool link_up = false;
495 bool negotiation; 501 bool negotiation;
496 int i;
497 502
498 /* Mask off requested but non-supported speeds */ 503 /* Mask off requested but non-supported speeds */
499 hw->mac.ops.get_link_capabilities(hw, &phy_link_speed, &negotiation); 504 status = hw->mac.ops.get_link_capabilities(hw, &link_speed,
500 speed &= phy_link_speed; 505 &negotiation);
506 if (status != 0)
507 return status;
508
509 speed &= link_speed;
501 510
502 /* 511 /*
503 * Try each speed one by one, highest priority first. We do this in 512 * Try each speed one by one, highest priority first. We do this in
@@ -508,9 +517,12 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
508 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; 517 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
509 518
510 /* If we already have link at this speed, just jump out */ 519 /* If we already have link at this speed, just jump out */
511 hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false); 520 status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
521 false);
522 if (status != 0)
523 return status;
512 524
513 if ((phy_link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) 525 if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
514 goto out; 526 goto out;
515 527
516 /* Set the module link speed */ 528 /* Set the module link speed */
@@ -522,9 +534,9 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
522 msleep(40); 534 msleep(40);
523 535
524 status = ixgbe_setup_mac_link_82599(hw, 536 status = ixgbe_setup_mac_link_82599(hw,
525 IXGBE_LINK_SPEED_10GB_FULL, 537 IXGBE_LINK_SPEED_10GB_FULL,
526 autoneg, 538 autoneg,
527 autoneg_wait_to_complete); 539 autoneg_wait_to_complete);
528 if (status != 0) 540 if (status != 0)
529 return status; 541 return status;
530 542
@@ -536,14 +548,16 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
536 * Section 73.10.2, we may have to wait up to 500ms if KR is 548 * Section 73.10.2, we may have to wait up to 500ms if KR is
537 * attempted. 82599 uses the same timing for 10g SFI. 549 * attempted. 82599 uses the same timing for 10g SFI.
538 */ 550 */
539
540 for (i = 0; i < 5; i++) { 551 for (i = 0; i < 5; i++) {
541 /* Wait for the link partner to also set speed */ 552 /* Wait for the link partner to also set speed */
542 msleep(100); 553 msleep(100);
543 554
544 /* If we have link, just jump out */ 555 /* If we have link, just jump out */
545 hw->mac.ops.check_link(hw, &phy_link_speed, 556 status = hw->mac.ops.check_link(hw, &link_speed,
546 &link_up, false); 557 &link_up, false);
558 if (status != 0)
559 return status;
560
547 if (link_up) 561 if (link_up)
548 goto out; 562 goto out;
549 } 563 }
@@ -555,9 +569,12 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
555 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; 569 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
556 570
557 /* If we already have link at this speed, just jump out */ 571 /* If we already have link at this speed, just jump out */
558 hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false); 572 status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
573 false);
574 if (status != 0)
575 return status;
559 576
560 if ((phy_link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) 577 if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
561 goto out; 578 goto out;
562 579
563 /* Set the module link speed */ 580 /* Set the module link speed */
@@ -570,9 +587,9 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
570 msleep(40); 587 msleep(40);
571 588
572 status = ixgbe_setup_mac_link_82599(hw, 589 status = ixgbe_setup_mac_link_82599(hw,
573 IXGBE_LINK_SPEED_1GB_FULL, 590 IXGBE_LINK_SPEED_1GB_FULL,
574 autoneg, 591 autoneg,
575 autoneg_wait_to_complete); 592 autoneg_wait_to_complete);
576 if (status != 0) 593 if (status != 0)
577 return status; 594 return status;
578 595
@@ -583,7 +600,11 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
583 msleep(100); 600 msleep(100);
584 601
585 /* If we have link, just jump out */ 602 /* If we have link, just jump out */
586 hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false); 603 status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
604 false);
605 if (status != 0)
606 return status;
607
587 if (link_up) 608 if (link_up)
588 goto out; 609 goto out;
589 } 610 }
@@ -626,13 +647,10 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
626 bool autoneg_wait_to_complete) 647 bool autoneg_wait_to_complete)
627{ 648{
628 s32 status = 0; 649 s32 status = 0;
629 ixgbe_link_speed link_speed; 650 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
630 s32 i, j; 651 s32 i, j;
631 bool link_up = false; 652 bool link_up = false;
632 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 653 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
633 struct ixgbe_adapter *adapter = hw->back;
634
635 hw_dbg(hw, "ixgbe_setup_mac_link_smartspeed.\n");
636 654
637 /* Set autoneg_advertised value based on input link speed */ 655 /* Set autoneg_advertised value based on input link speed */
638 hw->phy.autoneg_advertised = 0; 656 hw->phy.autoneg_advertised = 0;
@@ -658,7 +676,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
658 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { 676 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
659 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, 677 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
660 autoneg_wait_to_complete); 678 autoneg_wait_to_complete);
661 if (status) 679 if (status != 0)
662 goto out; 680 goto out;
663 681
664 /* 682 /*
@@ -671,8 +689,11 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
671 mdelay(100); 689 mdelay(100);
672 690
673 /* If we have link, just jump out */ 691 /* If we have link, just jump out */
674 hw->mac.ops.check_link(hw, &link_speed, 692 status = hw->mac.ops.check_link(hw, &link_speed,
675 &link_up, false); 693 &link_up, false);
694 if (status != 0)
695 goto out;
696
676 if (link_up) 697 if (link_up)
677 goto out; 698 goto out;
678 } 699 }
@@ -690,7 +711,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
690 hw->phy.smart_speed_active = true; 711 hw->phy.smart_speed_active = true;
691 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, 712 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
692 autoneg_wait_to_complete); 713 autoneg_wait_to_complete);
693 if (status) 714 if (status != 0)
694 goto out; 715 goto out;
695 716
696 /* 717 /*
@@ -703,8 +724,11 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
703 mdelay(100); 724 mdelay(100);
704 725
705 /* If we have link, just jump out */ 726 /* If we have link, just jump out */
706 hw->mac.ops.check_link(hw, &link_speed, 727 status = hw->mac.ops.check_link(hw, &link_speed,
707 &link_up, false); 728 &link_up, false);
729 if (status != 0)
730 goto out;
731
708 if (link_up) 732 if (link_up)
709 goto out; 733 goto out;
710 } 734 }
@@ -716,7 +740,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
716 740
717out: 741out:
718 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) 742 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
719 e_info(hw, "Smartspeed has downgraded the link speed from " 743 hw_dbg(hw, "Smartspeed has downgraded the link speed from "
720 "the maximum advertised\n"); 744 "the maximum advertised\n");
721 return status; 745 return status;
722} 746}
@@ -748,6 +772,9 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
748 772
749 /* Check to see if speed passed in is supported. */ 773 /* Check to see if speed passed in is supported. */
750 hw->mac.ops.get_link_capabilities(hw, &link_capabilities, &autoneg); 774 hw->mac.ops.get_link_capabilities(hw, &link_capabilities, &autoneg);
775 if (status != 0)
776 goto out;
777
751 speed &= link_capabilities; 778 speed &= link_capabilities;
752 779
753 if (speed == IXGBE_LINK_SPEED_UNKNOWN) { 780 if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
@@ -761,7 +788,6 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
761 else 788 else
762 orig_autoc = autoc; 789 orig_autoc = autoc;
763 790
764
765 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 791 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
766 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 792 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
767 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 793 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
@@ -878,7 +904,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
878 904
879 /* PHY ops must be identified and initialized prior to reset */ 905 /* PHY ops must be identified and initialized prior to reset */
880 906
881 /* Init PHY and function pointers, perform SFP setup */ 907 /* Identify PHY and related function pointers */
882 status = hw->phy.ops.init(hw); 908 status = hw->phy.ops.init(hw);
883 909
884 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) 910 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
@@ -890,6 +916,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
890 hw->phy.sfp_setup_needed = false; 916 hw->phy.sfp_setup_needed = false;
891 } 917 }
892 918
919 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
920 goto reset_hw_out;
921
893 /* Reset PHY */ 922 /* Reset PHY */
894 if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL) 923 if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
895 hw->phy.ops.reset(hw); 924 hw->phy.ops.reset(hw);
@@ -898,12 +927,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
898 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 927 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
899 * access and verify no pending requests before reset 928 * access and verify no pending requests before reset
900 */ 929 */
901 status = ixgbe_disable_pcie_master(hw); 930 ixgbe_disable_pcie_master(hw);
902 if (status != 0) {
903 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
904 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
905 }
906 931
932mac_reset_top:
907 /* 933 /*
908 * Issue global reset to the MAC. This needs to be a SW reset. 934 * Issue global reset to the MAC. This needs to be a SW reset.
909 * If link reset is used, it might reset the MAC when mng is using it 935 * If link reset is used, it might reset the MAC when mng is using it
@@ -924,6 +950,19 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
924 hw_dbg(hw, "Reset polling failed to complete.\n"); 950 hw_dbg(hw, "Reset polling failed to complete.\n");
925 } 951 }
926 952
953 /*
954 * Double resets are required for recovery from certain error
955 * conditions. Between resets, it is necessary to stall to allow time
956 * for any pending HW events to complete. We use 1usec since that is
957 * what is needed for ixgbe_disable_pcie_master(). The second reset
958 * then clears out any effects of those events.
959 */
960 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
961 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
962 udelay(1);
963 goto mac_reset_top;
964 }
965
927 msleep(50); 966 msleep(50);
928 967
929 /* 968 /*
@@ -951,6 +990,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
951 } 990 }
952 } 991 }
953 992
993 /* Store the permanent mac address */
994 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
995
954 /* 996 /*
955 * Store MAC address from RAR0, clear receive address registers, and 997 * Store MAC address from RAR0, clear receive address registers, and
956 * clear the multicast table. Also reset num_rar_entries to 128, 998 * clear the multicast table. Also reset num_rar_entries to 128,
@@ -959,9 +1001,6 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
959 hw->mac.num_rar_entries = 128; 1001 hw->mac.num_rar_entries = 128;
960 hw->mac.ops.init_rx_addrs(hw); 1002 hw->mac.ops.init_rx_addrs(hw);
961 1003
962 /* Store the permanent mac address */
963 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
964
965 /* Store the permanent SAN mac address */ 1004 /* Store the permanent SAN mac address */
966 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); 1005 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
967 1006
@@ -1733,13 +1772,34 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
1733 * @hw: pointer to hardware structure 1772 * @hw: pointer to hardware structure
1734 * 1773 *
1735 * Determines the physical layer module found on the current adapter. 1774 * Determines the physical layer module found on the current adapter.
1775 * If PHY already detected, maintains current PHY type in hw struct,
1776 * otherwise executes the PHY detection routine.
1736 **/ 1777 **/
1737static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) 1778s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
1738{ 1779{
1739 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 1780 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
1781
1782 /* Detect PHY if not unknown - returns success if already detected. */
1740 status = ixgbe_identify_phy_generic(hw); 1783 status = ixgbe_identify_phy_generic(hw);
1741 if (status != 0) 1784 if (status != 0) {
1742 status = ixgbe_identify_sfp_module_generic(hw); 1785 /* 82599 10GBASE-T requires an external PHY */
1786 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
1787 goto out;
1788 else
1789 status = ixgbe_identify_sfp_module_generic(hw);
1790 }
1791
1792 /* Set PHY type none if no PHY detected */
1793 if (hw->phy.type == ixgbe_phy_unknown) {
1794 hw->phy.type = ixgbe_phy_none;
1795 status = 0;
1796 }
1797
1798 /* Return error if SFP module has been detected but is not supported */
1799 if (hw->phy.type == ixgbe_phy_sfp_unsupported)
1800 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
1801
1802out:
1743 return status; 1803 return status;
1744} 1804}
1745 1805
@@ -1763,11 +1823,12 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
1763 1823
1764 hw->phy.ops.identify(hw); 1824 hw->phy.ops.identify(hw);
1765 1825
1766 if (hw->phy.type == ixgbe_phy_tn || 1826 switch (hw->phy.type) {
1767 hw->phy.type == ixgbe_phy_aq || 1827 case ixgbe_phy_tn:
1768 hw->phy.type == ixgbe_phy_cu_unknown) { 1828 case ixgbe_phy_aq:
1829 case ixgbe_phy_cu_unknown:
1769 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, 1830 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
1770 &ext_ability); 1831 &ext_ability);
1771 if (ext_ability & MDIO_PMA_EXTABLE_10GBT) 1832 if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
1772 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; 1833 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1773 if (ext_ability & MDIO_PMA_EXTABLE_1000BT) 1834 if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
@@ -1775,6 +1836,8 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
1775 if (ext_ability & MDIO_PMA_EXTABLE_100BTX) 1836 if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
1776 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; 1837 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1777 goto out; 1838 goto out;
1839 default:
1840 break;
1778 } 1841 }
1779 1842
1780 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 1843 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
@@ -1886,6 +1949,7 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
1886 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) 1949 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
1887 break; 1950 break;
1888 else 1951 else
1952 /* Use interrupt-safe sleep just in case */
1889 udelay(10); 1953 udelay(10);
1890 } 1954 }
1891 1955
@@ -1995,7 +2059,6 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
1995 .set_vmdq = &ixgbe_set_vmdq_generic, 2059 .set_vmdq = &ixgbe_set_vmdq_generic,
1996 .clear_vmdq = &ixgbe_clear_vmdq_generic, 2060 .clear_vmdq = &ixgbe_clear_vmdq_generic,
1997 .init_rx_addrs = &ixgbe_init_rx_addrs_generic, 2061 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
1998 .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
1999 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, 2062 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
2000 .enable_mc = &ixgbe_enable_mc_generic, 2063 .enable_mc = &ixgbe_enable_mc_generic,
2001 .disable_mc = &ixgbe_disable_mc_generic, 2064 .disable_mc = &ixgbe_disable_mc_generic,
@@ -2006,31 +2069,34 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2006 .setup_sfp = &ixgbe_setup_sfp_modules_82599, 2069 .setup_sfp = &ixgbe_setup_sfp_modules_82599,
2007 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, 2070 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing,
2008 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, 2071 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
2072 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
2073 .release_swfw_sync = &ixgbe_release_swfw_sync,
2074
2009}; 2075};
2010 2076
2011static struct ixgbe_eeprom_operations eeprom_ops_82599 = { 2077static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
2012 .init_params = &ixgbe_init_eeprom_params_generic, 2078 .init_params = &ixgbe_init_eeprom_params_generic,
2013 .read = &ixgbe_read_eerd_generic, 2079 .read = &ixgbe_read_eerd_generic,
2014 .write = &ixgbe_write_eeprom_generic, 2080 .write = &ixgbe_write_eeprom_generic,
2015 .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, 2081 .calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
2016 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, 2082 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
2017 .update_checksum = &ixgbe_update_eeprom_checksum_generic, 2083 .update_checksum = &ixgbe_update_eeprom_checksum_generic,
2018}; 2084};
2019 2085
2020static struct ixgbe_phy_operations phy_ops_82599 = { 2086static struct ixgbe_phy_operations phy_ops_82599 = {
2021 .identify = &ixgbe_identify_phy_82599, 2087 .identify = &ixgbe_identify_phy_82599,
2022 .identify_sfp = &ixgbe_identify_sfp_module_generic, 2088 .identify_sfp = &ixgbe_identify_sfp_module_generic,
2023 .init = &ixgbe_init_phy_ops_82599, 2089 .init = &ixgbe_init_phy_ops_82599,
2024 .reset = &ixgbe_reset_phy_generic, 2090 .reset = &ixgbe_reset_phy_generic,
2025 .read_reg = &ixgbe_read_phy_reg_generic, 2091 .read_reg = &ixgbe_read_phy_reg_generic,
2026 .write_reg = &ixgbe_write_phy_reg_generic, 2092 .write_reg = &ixgbe_write_phy_reg_generic,
2027 .setup_link = &ixgbe_setup_phy_link_generic, 2093 .setup_link = &ixgbe_setup_phy_link_generic,
2028 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, 2094 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
2029 .read_i2c_byte = &ixgbe_read_i2c_byte_generic, 2095 .read_i2c_byte = &ixgbe_read_i2c_byte_generic,
2030 .write_i2c_byte = &ixgbe_write_i2c_byte_generic, 2096 .write_i2c_byte = &ixgbe_write_i2c_byte_generic,
2031 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, 2097 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
2032 .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, 2098 .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
2033 .check_overtemp = &ixgbe_tn_check_overtemp, 2099 .check_overtemp = &ixgbe_tn_check_overtemp,
2034}; 2100};
2035 2101
2036struct ixgbe_info ixgbe_82599_info = { 2102struct ixgbe_info ixgbe_82599_info = {
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index ebbda7d15254..bcd952916eb2 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -46,10 +46,13 @@ static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
46static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 46static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
47static void ixgbe_release_eeprom(struct ixgbe_hw *hw); 47static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
48 48
49static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index);
50static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
51static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 49static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
52static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); 50static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
51static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
52static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
53static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
54static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
55 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
53static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num); 56static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
54 57
55/** 58/**
@@ -139,17 +142,29 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
139 IXGBE_READ_REG(hw, IXGBE_MRFC); 142 IXGBE_READ_REG(hw, IXGBE_MRFC);
140 IXGBE_READ_REG(hw, IXGBE_RLEC); 143 IXGBE_READ_REG(hw, IXGBE_RLEC);
141 IXGBE_READ_REG(hw, IXGBE_LXONTXC); 144 IXGBE_READ_REG(hw, IXGBE_LXONTXC);
142 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
143 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 145 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
144 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 146 if (hw->mac.type >= ixgbe_mac_82599EB) {
147 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
148 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
149 } else {
150 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
151 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
152 }
145 153
146 for (i = 0; i < 8; i++) { 154 for (i = 0; i < 8; i++) {
147 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 155 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
148 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
149 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 156 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
150 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 157 if (hw->mac.type >= ixgbe_mac_82599EB) {
158 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
159 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
160 } else {
161 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
162 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
163 }
151 } 164 }
152 165 if (hw->mac.type >= ixgbe_mac_82599EB)
166 for (i = 0; i < 8; i++)
167 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
153 IXGBE_READ_REG(hw, IXGBE_PRC64); 168 IXGBE_READ_REG(hw, IXGBE_PRC64);
154 IXGBE_READ_REG(hw, IXGBE_PRC127); 169 IXGBE_READ_REG(hw, IXGBE_PRC127);
155 IXGBE_READ_REG(hw, IXGBE_PRC255); 170 IXGBE_READ_REG(hw, IXGBE_PRC255);
@@ -187,9 +202,26 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
187 IXGBE_READ_REG(hw, IXGBE_BPTC); 202 IXGBE_READ_REG(hw, IXGBE_BPTC);
188 for (i = 0; i < 16; i++) { 203 for (i = 0; i < 16; i++) {
189 IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 204 IXGBE_READ_REG(hw, IXGBE_QPRC(i));
190 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
191 IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 205 IXGBE_READ_REG(hw, IXGBE_QPTC(i));
192 IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 206 if (hw->mac.type >= ixgbe_mac_82599EB) {
207 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
208 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
209 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
210 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
211 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
212 } else {
213 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
214 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
215 }
216 }
217
218 if (hw->mac.type == ixgbe_mac_X540) {
219 if (hw->phy.id == 0)
220 hw->phy.ops.identify(hw);
221 hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECL, &i);
222 hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECH, &i);
223 hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECL, &i);
224 hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECH, &i);
193 } 225 }
194 226
195 return 0; 227 return 0;
@@ -454,8 +486,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
454 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 486 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
455 * access and verify no pending requests 487 * access and verify no pending requests
456 */ 488 */
457 if (ixgbe_disable_pcie_master(hw) != 0) 489 ixgbe_disable_pcie_master(hw);
458 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
459 490
460 return 0; 491 return 0;
461} 492}
@@ -603,7 +634,6 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
603 ixgbe_shift_out_eeprom_bits(hw, data, 16); 634 ixgbe_shift_out_eeprom_bits(hw, data, 16);
604 ixgbe_standby_eeprom(hw); 635 ixgbe_standby_eeprom(hw);
605 636
606 msleep(hw->eeprom.semaphore_delay);
607 /* Done with writing - release the EEPROM */ 637 /* Done with writing - release the EEPROM */
608 ixgbe_release_eeprom(hw); 638 ixgbe_release_eeprom(hw);
609 } 639 }
@@ -747,10 +777,10 @@ s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
747static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) 777static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
748{ 778{
749 s32 status = 0; 779 s32 status = 0;
750 u32 eec = 0; 780 u32 eec;
751 u32 i; 781 u32 i;
752 782
753 if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) 783 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
754 status = IXGBE_ERR_SWFW_SYNC; 784 status = IXGBE_ERR_SWFW_SYNC;
755 785
756 if (status == 0) { 786 if (status == 0) {
@@ -773,18 +803,18 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
773 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 803 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
774 hw_dbg(hw, "Could not acquire EEPROM grant\n"); 804 hw_dbg(hw, "Could not acquire EEPROM grant\n");
775 805
776 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 806 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
777 status = IXGBE_ERR_EEPROM; 807 status = IXGBE_ERR_EEPROM;
778 } 808 }
779 }
780 809
781 /* Setup EEPROM for Read/Write */ 810 /* Setup EEPROM for Read/Write */
782 if (status == 0) { 811 if (status == 0) {
783 /* Clear CS and SK */ 812 /* Clear CS and SK */
784 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); 813 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
785 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 814 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
786 IXGBE_WRITE_FLUSH(hw); 815 IXGBE_WRITE_FLUSH(hw);
787 udelay(1); 816 udelay(1);
817 }
788 } 818 }
789 return status; 819 return status;
790} 820}
@@ -798,13 +828,10 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
798static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) 828static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
799{ 829{
800 s32 status = IXGBE_ERR_EEPROM; 830 s32 status = IXGBE_ERR_EEPROM;
801 u32 timeout; 831 u32 timeout = 2000;
802 u32 i; 832 u32 i;
803 u32 swsm; 833 u32 swsm;
804 834
805 /* Set timeout value based on size of EEPROM */
806 timeout = hw->eeprom.word_size + 1;
807
808 /* Get SMBI software semaphore between device drivers first */ 835 /* Get SMBI software semaphore between device drivers first */
809 for (i = 0; i < timeout; i++) { 836 for (i = 0; i < timeout; i++) {
810 /* 837 /*
@@ -816,7 +843,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
816 status = 0; 843 status = 0;
817 break; 844 break;
818 } 845 }
819 msleep(1); 846 udelay(50);
820 } 847 }
821 848
822 /* Now get the semaphore between SW/FW through the SWESMBI bit */ 849 /* Now get the semaphore between SW/FW through the SWESMBI bit */
@@ -844,11 +871,14 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
844 * was not granted because we don't have access to the EEPROM 871 * was not granted because we don't have access to the EEPROM
845 */ 872 */
846 if (i >= timeout) { 873 if (i >= timeout) {
847 hw_dbg(hw, "Driver can't access the Eeprom - Semaphore " 874 hw_dbg(hw, "SWESMBI Software EEPROM semaphore "
848 "not granted.\n"); 875 "not granted.\n");
849 ixgbe_release_eeprom_semaphore(hw); 876 ixgbe_release_eeprom_semaphore(hw);
850 status = IXGBE_ERR_EEPROM; 877 status = IXGBE_ERR_EEPROM;
851 } 878 }
879 } else {
880 hw_dbg(hw, "Software semaphore SMBI between device drivers "
881 "not granted.\n");
852 } 882 }
853 883
854 return status; 884 return status;
@@ -1080,11 +1110,14 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1080 eec &= ~IXGBE_EEC_REQ; 1110 eec &= ~IXGBE_EEC_REQ;
1081 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1111 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1082 1112
1083 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1113 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1114
1115 /* Delay before attempt to obtain semaphore again to allow FW access */
1116 msleep(hw->eeprom.semaphore_delay);
1084} 1117}
1085 1118
1086/** 1119/**
1087 * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum 1120 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
1088 * @hw: pointer to hardware structure 1121 * @hw: pointer to hardware structure
1089 **/ 1122 **/
1090u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) 1123u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
@@ -1190,7 +1223,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1190 if (status == 0) { 1223 if (status == 0) {
1191 checksum = hw->eeprom.ops.calc_checksum(hw); 1224 checksum = hw->eeprom.ops.calc_checksum(hw);
1192 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, 1225 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
1193 checksum); 1226 checksum);
1194 } else { 1227 } else {
1195 hw_dbg(hw, "EEPROM read failed\n"); 1228 hw_dbg(hw, "EEPROM read failed\n");
1196 } 1229 }
@@ -1238,37 +1271,37 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
1238 u32 rar_low, rar_high; 1271 u32 rar_low, rar_high;
1239 u32 rar_entries = hw->mac.num_rar_entries; 1272 u32 rar_entries = hw->mac.num_rar_entries;
1240 1273
1274 /* Make sure we are using a valid rar index range */
1275 if (index >= rar_entries) {
1276 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1277 return IXGBE_ERR_INVALID_ARGUMENT;
1278 }
1279
1241 /* setup VMDq pool selection before this RAR gets enabled */ 1280 /* setup VMDq pool selection before this RAR gets enabled */
1242 hw->mac.ops.set_vmdq(hw, index, vmdq); 1281 hw->mac.ops.set_vmdq(hw, index, vmdq);
1243 1282
1244 /* Make sure we are using a valid rar index range */ 1283 /*
1245 if (index < rar_entries) { 1284 * HW expects these in little endian so we reverse the byte
1246 /* 1285 * order from network order (big endian) to little endian
1247 * HW expects these in little endian so we reverse the byte 1286 */
1248 * order from network order (big endian) to little endian 1287 rar_low = ((u32)addr[0] |
1249 */ 1288 ((u32)addr[1] << 8) |
1250 rar_low = ((u32)addr[0] | 1289 ((u32)addr[2] << 16) |
1251 ((u32)addr[1] << 8) | 1290 ((u32)addr[3] << 24));
1252 ((u32)addr[2] << 16) | 1291 /*
1253 ((u32)addr[3] << 24)); 1292 * Some parts put the VMDq setting in the extra RAH bits,
1254 /* 1293 * so save everything except the lower 16 bits that hold part
1255 * Some parts put the VMDq setting in the extra RAH bits, 1294 * of the address and the address valid bit.
1256 * so save everything except the lower 16 bits that hold part 1295 */
1257 * of the address and the address valid bit. 1296 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1258 */ 1297 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1259 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1298 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
1260 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1261 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
1262 1299
1263 if (enable_addr != 0) 1300 if (enable_addr != 0)
1264 rar_high |= IXGBE_RAH_AV; 1301 rar_high |= IXGBE_RAH_AV;
1265 1302
1266 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); 1303 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
1267 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1304 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1268 } else {
1269 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1270 return IXGBE_ERR_RAR_INDEX;
1271 }
1272 1305
1273 return 0; 1306 return 0;
1274} 1307}
@@ -1286,58 +1319,26 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
1286 u32 rar_entries = hw->mac.num_rar_entries; 1319 u32 rar_entries = hw->mac.num_rar_entries;
1287 1320
1288 /* Make sure we are using a valid rar index range */ 1321 /* Make sure we are using a valid rar index range */
1289 if (index < rar_entries) { 1322 if (index >= rar_entries) {
1290 /*
1291 * Some parts put the VMDq setting in the extra RAH bits,
1292 * so save everything except the lower 16 bits that hold part
1293 * of the address and the address valid bit.
1294 */
1295 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1296 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1297
1298 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1299 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1300 } else {
1301 hw_dbg(hw, "RAR index %d is out of range.\n", index); 1323 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1302 return IXGBE_ERR_RAR_INDEX; 1324 return IXGBE_ERR_INVALID_ARGUMENT;
1303 } 1325 }
1304 1326
1305 /* clear VMDq pool/queue selection for this RAR */ 1327 /*
1306 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); 1328 * Some parts put the VMDq setting in the extra RAH bits,
1307 1329 * so save everything except the lower 16 bits that hold part
1308 return 0; 1330 * of the address and the address valid bit.
1309} 1331 */
1310
1311/**
1312 * ixgbe_enable_rar - Enable Rx address register
1313 * @hw: pointer to hardware structure
1314 * @index: index into the RAR table
1315 *
1316 * Enables the select receive address register.
1317 **/
1318static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index)
1319{
1320 u32 rar_high;
1321
1322 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1332 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1323 rar_high |= IXGBE_RAH_AV; 1333 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1334
1335 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1324 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1336 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1325}
1326 1337
1327/** 1338 /* clear VMDq pool/queue selection for this RAR */
1328 * ixgbe_disable_rar - Disable Rx address register 1339 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
1329 * @hw: pointer to hardware structure
1330 * @index: index into the RAR table
1331 *
1332 * Disables the select receive address register.
1333 **/
1334static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index)
1335{
1336 u32 rar_high;
1337 1340
1338 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1341 return 0;
1339 rar_high &= (~IXGBE_RAH_AV);
1340 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1341} 1342}
1342 1343
1343/** 1344/**
@@ -1386,7 +1387,6 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1386 } 1387 }
1387 1388
1388 /* Clear the MTA */ 1389 /* Clear the MTA */
1389 hw->addr_ctrl.mc_addr_in_rar_count = 0;
1390 hw->addr_ctrl.mta_in_use = 0; 1390 hw->addr_ctrl.mta_in_use = 0;
1391 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 1391 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1392 1392
@@ -1401,105 +1401,6 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1401} 1401}
1402 1402
1403/** 1403/**
1404 * ixgbe_add_uc_addr - Adds a secondary unicast address.
1405 * @hw: pointer to hardware structure
1406 * @addr: new address
1407 *
1408 * Adds it to unused receive address register or goes into promiscuous mode.
1409 **/
1410static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
1411{
1412 u32 rar_entries = hw->mac.num_rar_entries;
1413 u32 rar;
1414
1415 hw_dbg(hw, " UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
1416 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
1417
1418 /*
1419 * Place this address in the RAR if there is room,
1420 * else put the controller into promiscuous mode
1421 */
1422 if (hw->addr_ctrl.rar_used_count < rar_entries) {
1423 rar = hw->addr_ctrl.rar_used_count -
1424 hw->addr_ctrl.mc_addr_in_rar_count;
1425 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
1426 hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar);
1427 hw->addr_ctrl.rar_used_count++;
1428 } else {
1429 hw->addr_ctrl.overflow_promisc++;
1430 }
1431
1432 hw_dbg(hw, "ixgbe_add_uc_addr Complete\n");
1433}
1434
1435/**
1436 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
1437 * @hw: pointer to hardware structure
1438 * @netdev: pointer to net device structure
1439 *
1440 * The given list replaces any existing list. Clears the secondary addrs from
1441 * receive address registers. Uses unused receive address registers for the
1442 * first secondary addresses, and falls back to promiscuous mode as needed.
1443 *
1444 * Drivers using secondary unicast addresses must set user_set_promisc when
1445 * manually putting the device into promiscuous mode.
1446 **/
1447s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
1448 struct net_device *netdev)
1449{
1450 u32 i;
1451 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
1452 u32 uc_addr_in_use;
1453 u32 fctrl;
1454 struct netdev_hw_addr *ha;
1455
1456 /*
1457 * Clear accounting of old secondary address list,
1458 * don't count RAR[0]
1459 */
1460 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
1461 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
1462 hw->addr_ctrl.overflow_promisc = 0;
1463
1464 /* Zero out the other receive addresses */
1465 hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use + 1);
1466 for (i = 0; i < uc_addr_in_use; i++) {
1467 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
1468 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
1469 }
1470
1471 /* Add the new addresses */
1472 netdev_for_each_uc_addr(ha, netdev) {
1473 hw_dbg(hw, " Adding the secondary addresses:\n");
1474 ixgbe_add_uc_addr(hw, ha->addr, 0);
1475 }
1476
1477 if (hw->addr_ctrl.overflow_promisc) {
1478 /* enable promisc if not already in overflow or set by user */
1479 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
1480 hw_dbg(hw, " Entering address overflow promisc mode\n");
1481 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1482 fctrl |= IXGBE_FCTRL_UPE;
1483 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1484 hw->addr_ctrl.uc_set_promisc = true;
1485 }
1486 } else {
1487 /* only disable if set by overflow, not by user */
1488 if ((old_promisc_setting && hw->addr_ctrl.uc_set_promisc) &&
1489 !(hw->addr_ctrl.user_set_promisc)) {
1490 hw_dbg(hw, " Leaving address overflow promisc mode\n");
1491 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1492 fctrl &= ~IXGBE_FCTRL_UPE;
1493 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1494 hw->addr_ctrl.uc_set_promisc = false;
1495 }
1496 }
1497
1498 hw_dbg(hw, "ixgbe_update_uc_addr_list_generic Complete\n");
1499 return 0;
1500}
1501
1502/**
1503 * ixgbe_mta_vector - Determines bit-vector in multicast table to set 1404 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
1504 * @hw: pointer to hardware structure 1405 * @hw: pointer to hardware structure
1505 * @mc_addr: the multicast address 1406 * @mc_addr: the multicast address
@@ -1550,7 +1451,6 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
1550 u32 vector; 1451 u32 vector;
1551 u32 vector_bit; 1452 u32 vector_bit;
1552 u32 vector_reg; 1453 u32 vector_reg;
1553 u32 mta_reg;
1554 1454
1555 hw->addr_ctrl.mta_in_use++; 1455 hw->addr_ctrl.mta_in_use++;
1556 1456
@@ -1568,9 +1468,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
1568 */ 1468 */
1569 vector_reg = (vector >> 5) & 0x7F; 1469 vector_reg = (vector >> 5) & 0x7F;
1570 vector_bit = vector & 0x1F; 1470 vector_bit = vector & 0x1F;
1571 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); 1471 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
1572 mta_reg |= (1 << vector_bit);
1573 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
1574} 1472}
1575 1473
1576/** 1474/**
@@ -1596,18 +1494,21 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
1596 hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); 1494 hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
1597 hw->addr_ctrl.mta_in_use = 0; 1495 hw->addr_ctrl.mta_in_use = 0;
1598 1496
1599 /* Clear the MTA */ 1497 /* Clear mta_shadow */
1600 hw_dbg(hw, " Clearing MTA\n"); 1498 hw_dbg(hw, " Clearing MTA\n");
1601 for (i = 0; i < hw->mac.mcft_size; i++) 1499 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
1602 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
1603 1500
1604 /* Add the new addresses */ 1501 /* Update mta shadow */
1605 netdev_for_each_mc_addr(ha, netdev) { 1502 netdev_for_each_mc_addr(ha, netdev) {
1606 hw_dbg(hw, " Adding the multicast addresses:\n"); 1503 hw_dbg(hw, " Adding the multicast addresses:\n");
1607 ixgbe_set_mta(hw, ha->addr); 1504 ixgbe_set_mta(hw, ha->addr);
1608 } 1505 }
1609 1506
1610 /* Enable mta */ 1507 /* Enable mta */
1508 for (i = 0; i < hw->mac.mcft_size; i++)
1509 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
1510 hw->mac.mta_shadow[i]);
1511
1611 if (hw->addr_ctrl.mta_in_use > 0) 1512 if (hw->addr_ctrl.mta_in_use > 0)
1612 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 1513 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
1613 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 1514 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
@@ -1624,15 +1525,8 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
1624 **/ 1525 **/
1625s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) 1526s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
1626{ 1527{
1627 u32 i;
1628 u32 rar_entries = hw->mac.num_rar_entries;
1629 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 1528 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
1630 1529
1631 if (a->mc_addr_in_rar_count > 0)
1632 for (i = (rar_entries - a->mc_addr_in_rar_count);
1633 i < rar_entries; i++)
1634 ixgbe_enable_rar(hw, i);
1635
1636 if (a->mta_in_use > 0) 1530 if (a->mta_in_use > 0)
1637 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | 1531 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
1638 hw->mac.mc_filter_type); 1532 hw->mac.mc_filter_type);
@@ -1648,15 +1542,8 @@ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
1648 **/ 1542 **/
1649s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) 1543s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
1650{ 1544{
1651 u32 i;
1652 u32 rar_entries = hw->mac.num_rar_entries;
1653 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 1545 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
1654 1546
1655 if (a->mc_addr_in_rar_count > 0)
1656 for (i = (rar_entries - a->mc_addr_in_rar_count);
1657 i < rar_entries; i++)
1658 ixgbe_disable_rar(hw, i);
1659
1660 if (a->mta_in_use > 0) 1547 if (a->mta_in_use > 0)
1661 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 1548 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1662 1549
@@ -1685,7 +1572,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1685#endif /* CONFIG_DCB */ 1572#endif /* CONFIG_DCB */
1686 /* Negotiate the fc mode to use */ 1573 /* Negotiate the fc mode to use */
1687 ret_val = ixgbe_fc_autoneg(hw); 1574 ret_val = ixgbe_fc_autoneg(hw);
1688 if (ret_val) 1575 if (ret_val == IXGBE_ERR_FLOW_CONTROL)
1689 goto out; 1576 goto out;
1690 1577
1691 /* Disable any previous flow control settings */ 1578 /* Disable any previous flow control settings */
@@ -1703,7 +1590,9 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1703 * 2: Tx flow control is enabled (we can send pause frames but 1590 * 2: Tx flow control is enabled (we can send pause frames but
1704 * we do not support receiving pause frames). 1591 * we do not support receiving pause frames).
1705 * 3: Both Rx and Tx flow control (symmetric) are enabled. 1592 * 3: Both Rx and Tx flow control (symmetric) are enabled.
1593#ifdef CONFIG_DCB
1706 * 4: Priority Flow Control is enabled. 1594 * 4: Priority Flow Control is enabled.
1595#endif
1707 * other: Invalid. 1596 * other: Invalid.
1708 */ 1597 */
1709 switch (hw->fc.current_mode) { 1598 switch (hw->fc.current_mode) {
@@ -1791,12 +1680,13 @@ out:
1791 **/ 1680 **/
1792s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw) 1681s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
1793{ 1682{
1794 s32 ret_val = 0; 1683 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
1795 ixgbe_link_speed speed; 1684 ixgbe_link_speed speed;
1796 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
1797 u32 links2, anlp1_reg, autoc_reg, links;
1798 bool link_up; 1685 bool link_up;
1799 1686
1687 if (hw->fc.disable_fc_autoneg)
1688 goto out;
1689
1800 /* 1690 /*
1801 * AN should have completed when the cable was plugged in. 1691 * AN should have completed when the cable was plugged in.
1802 * Look for reasons to bail out. Bail out if: 1692 * Look for reasons to bail out. Bail out if:
@@ -1807,153 +1697,199 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
1807 * So use link_up_wait_to_complete=false. 1697 * So use link_up_wait_to_complete=false.
1808 */ 1698 */
1809 hw->mac.ops.check_link(hw, &speed, &link_up, false); 1699 hw->mac.ops.check_link(hw, &speed, &link_up, false);
1810 1700 if (!link_up) {
1811 if (hw->fc.disable_fc_autoneg || (!link_up)) { 1701 ret_val = IXGBE_ERR_FLOW_CONTROL;
1812 hw->fc.fc_was_autonegged = false;
1813 hw->fc.current_mode = hw->fc.requested_mode;
1814 goto out; 1702 goto out;
1815 } 1703 }
1816 1704
1817 /* 1705 switch (hw->phy.media_type) {
1818 * On backplane, bail out if 1706 /* Autoneg flow control on fiber adapters */
1819 * - backplane autoneg was not completed, or if 1707 case ixgbe_media_type_fiber:
1820 * - we are 82599 and link partner is not AN enabled 1708 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
1821 */ 1709 ret_val = ixgbe_fc_autoneg_fiber(hw);
1822 if (hw->phy.media_type == ixgbe_media_type_backplane) { 1710 break;
1823 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
1824 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
1825 hw->fc.fc_was_autonegged = false;
1826 hw->fc.current_mode = hw->fc.requested_mode;
1827 goto out;
1828 }
1829 1711
1830 if (hw->mac.type == ixgbe_mac_82599EB) { 1712 /* Autoneg flow control on backplane adapters */
1831 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); 1713 case ixgbe_media_type_backplane:
1832 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { 1714 ret_val = ixgbe_fc_autoneg_backplane(hw);
1833 hw->fc.fc_was_autonegged = false; 1715 break;
1834 hw->fc.current_mode = hw->fc.requested_mode; 1716
1835 goto out; 1717 /* Autoneg flow control on copper adapters */
1836 } 1718 case ixgbe_media_type_copper:
1837 } 1719 if (ixgbe_device_supports_autoneg_fc(hw) == 0)
1720 ret_val = ixgbe_fc_autoneg_copper(hw);
1721 break;
1722
1723 default:
1724 break;
1838 } 1725 }
1839 1726
1727out:
1728 if (ret_val == 0) {
1729 hw->fc.fc_was_autonegged = true;
1730 } else {
1731 hw->fc.fc_was_autonegged = false;
1732 hw->fc.current_mode = hw->fc.requested_mode;
1733 }
1734 return ret_val;
1735}
1736
1737/**
1738 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
1739 * @hw: pointer to hardware structure
1740 *
1741 * Enable flow control according on 1 gig fiber.
1742 **/
1743static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
1744{
1745 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
1746 s32 ret_val;
1747
1840 /* 1748 /*
1841 * On multispeed fiber at 1g, bail out if 1749 * On multispeed fiber at 1g, bail out if
1842 * - link is up but AN did not complete, or if 1750 * - link is up but AN did not complete, or if
1843 * - link is up and AN completed but timed out 1751 * - link is up and AN completed but timed out
1844 */ 1752 */
1845 if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL)) { 1753
1846 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 1754 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
1847 if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || 1755 if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
1848 ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { 1756 ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
1849 hw->fc.fc_was_autonegged = false; 1757 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
1850 hw->fc.current_mode = hw->fc.requested_mode; 1758 goto out;
1851 goto out;
1852 }
1853 } 1759 }
1854 1760
1761 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
1762 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
1763
1764 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
1765 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
1766 IXGBE_PCS1GANA_ASM_PAUSE,
1767 IXGBE_PCS1GANA_SYM_PAUSE,
1768 IXGBE_PCS1GANA_ASM_PAUSE);
1769
1770out:
1771 return ret_val;
1772}
1773
1774/**
1775 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
1776 * @hw: pointer to hardware structure
1777 *
1778 * Enable flow control according to IEEE clause 37.
1779 **/
1780static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
1781{
1782 u32 links2, anlp1_reg, autoc_reg, links;
1783 s32 ret_val;
1784
1855 /* 1785 /*
1856 * Bail out on 1786 * On backplane, bail out if
1857 * - copper or CX4 adapters 1787 * - backplane autoneg was not completed, or if
1858 * - fiber adapters running at 10gig 1788 * - we are 82599 and link partner is not AN enabled
1859 */ 1789 */
1860 if ((hw->phy.media_type == ixgbe_media_type_copper) || 1790 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
1861 (hw->phy.media_type == ixgbe_media_type_cx4) || 1791 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
1862 ((hw->phy.media_type == ixgbe_media_type_fiber) &&
1863 (speed == IXGBE_LINK_SPEED_10GB_FULL))) {
1864 hw->fc.fc_was_autonegged = false; 1792 hw->fc.fc_was_autonegged = false;
1865 hw->fc.current_mode = hw->fc.requested_mode; 1793 hw->fc.current_mode = hw->fc.requested_mode;
1794 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
1866 goto out; 1795 goto out;
1867 } 1796 }
1868 1797
1798 if (hw->mac.type == ixgbe_mac_82599EB) {
1799 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
1800 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
1801 hw->fc.fc_was_autonegged = false;
1802 hw->fc.current_mode = hw->fc.requested_mode;
1803 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
1804 goto out;
1805 }
1806 }
1869 /* 1807 /*
1870 * Read the AN advertisement and LP ability registers and resolve 1808 * Read the 10g AN autoc and LP ability registers and resolve
1871 * local flow control settings accordingly 1809 * local flow control settings accordingly
1872 */ 1810 */
1873 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && 1811 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1874 (hw->phy.media_type != ixgbe_media_type_backplane)) { 1812 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
1875 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
1876 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
1877 if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1878 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
1879 /*
1880 * Now we need to check if the user selected Rx ONLY
1881 * of pause frames. In this case, we had to advertise
1882 * FULL flow control because we could not advertise RX
1883 * ONLY. Hence, we must now check to see if we need to
1884 * turn OFF the TRANSMISSION of PAUSE frames.
1885 */
1886 if (hw->fc.requested_mode == ixgbe_fc_full) {
1887 hw->fc.current_mode = ixgbe_fc_full;
1888 hw_dbg(hw, "Flow Control = FULL.\n");
1889 } else {
1890 hw->fc.current_mode = ixgbe_fc_rx_pause;
1891 hw_dbg(hw, "Flow Control=RX PAUSE only\n");
1892 }
1893 } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1894 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1895 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1896 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1897 hw->fc.current_mode = ixgbe_fc_tx_pause;
1898 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1899 } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1900 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1901 !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1902 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1903 hw->fc.current_mode = ixgbe_fc_rx_pause;
1904 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1905 } else {
1906 hw->fc.current_mode = ixgbe_fc_none;
1907 hw_dbg(hw, "Flow Control = NONE.\n");
1908 }
1909 }
1910 1813
1911 if (hw->phy.media_type == ixgbe_media_type_backplane) { 1814 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
1815 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
1816 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
1817
1818out:
1819 return ret_val;
1820}
1821
1822/**
1823 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
1824 * @hw: pointer to hardware structure
1825 *
1826 * Enable flow control according to IEEE clause 37.
1827 **/
1828static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
1829{
1830 u16 technology_ability_reg = 0;
1831 u16 lp_technology_ability_reg = 0;
1832
1833 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
1834 MDIO_MMD_AN,
1835 &technology_ability_reg);
1836 hw->phy.ops.read_reg(hw, MDIO_AN_LPA,
1837 MDIO_MMD_AN,
1838 &lp_technology_ability_reg);
1839
1840 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
1841 (u32)lp_technology_ability_reg,
1842 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
1843 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
1844}
1845
1846/**
1847 * ixgbe_negotiate_fc - Negotiate flow control
1848 * @hw: pointer to hardware structure
1849 * @adv_reg: flow control advertised settings
1850 * @lp_reg: link partner's flow control settings
1851 * @adv_sym: symmetric pause bit in advertisement
1852 * @adv_asm: asymmetric pause bit in advertisement
1853 * @lp_sym: symmetric pause bit in link partner advertisement
1854 * @lp_asm: asymmetric pause bit in link partner advertisement
1855 *
1856 * Find the intersection between advertised settings and link partner's
1857 * advertised settings
1858 **/
1859static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
1860 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
1861{
1862 if ((!(adv_reg)) || (!(lp_reg)))
1863 return IXGBE_ERR_FC_NOT_NEGOTIATED;
1864
1865 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
1912 /* 1866 /*
1913 * Read the 10g AN autoc and LP ability registers and resolve 1867 * Now we need to check if the user selected Rx ONLY
1914 * local flow control settings accordingly 1868 * of pause frames. In this case, we had to advertise
1869 * FULL flow control because we could not advertise RX
1870 * ONLY. Hence, we must now check to see if we need to
1871 * turn OFF the TRANSMISSION of PAUSE frames.
1915 */ 1872 */
1916 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1873 if (hw->fc.requested_mode == ixgbe_fc_full) {
1917 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); 1874 hw->fc.current_mode = ixgbe_fc_full;
1918 1875 hw_dbg(hw, "Flow Control = FULL.\n");
1919 if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1920 (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE)) {
1921 /*
1922 * Now we need to check if the user selected Rx ONLY
1923 * of pause frames. In this case, we had to advertise
1924 * FULL flow control because we could not advertise RX
1925 * ONLY. Hence, we must now check to see if we need to
1926 * turn OFF the TRANSMISSION of PAUSE frames.
1927 */
1928 if (hw->fc.requested_mode == ixgbe_fc_full) {
1929 hw->fc.current_mode = ixgbe_fc_full;
1930 hw_dbg(hw, "Flow Control = FULL.\n");
1931 } else {
1932 hw->fc.current_mode = ixgbe_fc_rx_pause;
1933 hw_dbg(hw, "Flow Control=RX PAUSE only\n");
1934 }
1935 } else if (!(autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1936 (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
1937 (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
1938 (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
1939 hw->fc.current_mode = ixgbe_fc_tx_pause;
1940 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1941 } else if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1942 (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
1943 !(anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
1944 (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
1945 hw->fc.current_mode = ixgbe_fc_rx_pause;
1946 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1947 } else { 1876 } else {
1948 hw->fc.current_mode = ixgbe_fc_none; 1877 hw->fc.current_mode = ixgbe_fc_rx_pause;
1949 hw_dbg(hw, "Flow Control = NONE.\n"); 1878 hw_dbg(hw, "Flow Control=RX PAUSE frames only\n");
1950 } 1879 }
1880 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
1881 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
1882 hw->fc.current_mode = ixgbe_fc_tx_pause;
1883 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1884 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
1885 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
1886 hw->fc.current_mode = ixgbe_fc_rx_pause;
1887 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1888 } else {
1889 hw->fc.current_mode = ixgbe_fc_none;
1890 hw_dbg(hw, "Flow Control = NONE.\n");
1951 } 1891 }
1952 /* Record that current_mode is the result of a successful autoneg */ 1892 return 0;
1953 hw->fc.fc_was_autonegged = true;
1954
1955out:
1956 return ret_val;
1957} 1893}
1958 1894
1959/** 1895/**
@@ -1965,7 +1901,8 @@ out:
1965static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) 1901static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1966{ 1902{
1967 s32 ret_val = 0; 1903 s32 ret_val = 0;
1968 u32 reg; 1904 u32 reg = 0, reg_bp = 0;
1905 u16 reg_cu = 0;
1969 1906
1970#ifdef CONFIG_DCB 1907#ifdef CONFIG_DCB
1971 if (hw->fc.requested_mode == ixgbe_fc_pfc) { 1908 if (hw->fc.requested_mode == ixgbe_fc_pfc) {
@@ -1973,7 +1910,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1973 goto out; 1910 goto out;
1974 } 1911 }
1975 1912
1976#endif 1913#endif /* CONFIG_DCB */
1977 /* Validate the packetbuf configuration */ 1914 /* Validate the packetbuf configuration */
1978 if (packetbuf_num < 0 || packetbuf_num > 7) { 1915 if (packetbuf_num < 0 || packetbuf_num > 7) {
1979 hw_dbg(hw, "Invalid packet buffer number [%d], expected range " 1916 hw_dbg(hw, "Invalid packet buffer number [%d], expected range "
@@ -2011,11 +1948,26 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
2011 hw->fc.requested_mode = ixgbe_fc_full; 1948 hw->fc.requested_mode = ixgbe_fc_full;
2012 1949
2013 /* 1950 /*
2014 * Set up the 1G flow control advertisement registers so the HW will be 1951 * Set up the 1G and 10G flow control advertisement registers so the
2015 * able to do fc autoneg once the cable is plugged in. If we end up 1952 * HW will be able to do fc autoneg once the cable is plugged in. If
2016 * using 10g instead, this is harmless. 1953 * we link at 10G, the 1G advertisement is harmless and vice versa.
2017 */ 1954 */
2018 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 1955
1956 switch (hw->phy.media_type) {
1957 case ixgbe_media_type_fiber:
1958 case ixgbe_media_type_backplane:
1959 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
1960 reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1961 break;
1962
1963 case ixgbe_media_type_copper:
1964 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
1965 MDIO_MMD_AN, &reg_cu);
1966 break;
1967
1968 default:
1969 ;
1970 }
2019 1971
2020 /* 1972 /*
2021 * The possible values of fc.requested_mode are: 1973 * The possible values of fc.requested_mode are:
@@ -2034,6 +1986,11 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
2034 case ixgbe_fc_none: 1986 case ixgbe_fc_none:
2035 /* Flow control completely disabled by software override. */ 1987 /* Flow control completely disabled by software override. */
2036 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 1988 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
1989 if (hw->phy.media_type == ixgbe_media_type_backplane)
1990 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
1991 IXGBE_AUTOC_ASM_PAUSE);
1992 else if (hw->phy.media_type == ixgbe_media_type_copper)
1993 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2037 break; 1994 break;
2038 case ixgbe_fc_rx_pause: 1995 case ixgbe_fc_rx_pause:
2039 /* 1996 /*
@@ -2045,6 +2002,11 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
2045 * disable the adapter's ability to send PAUSE frames. 2002 * disable the adapter's ability to send PAUSE frames.
2046 */ 2003 */
2047 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 2004 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2005 if (hw->phy.media_type == ixgbe_media_type_backplane)
2006 reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
2007 IXGBE_AUTOC_ASM_PAUSE);
2008 else if (hw->phy.media_type == ixgbe_media_type_copper)
2009 reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2048 break; 2010 break;
2049 case ixgbe_fc_tx_pause: 2011 case ixgbe_fc_tx_pause:
2050 /* 2012 /*
@@ -2053,10 +2015,22 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
2053 */ 2015 */
2054 reg |= (IXGBE_PCS1GANA_ASM_PAUSE); 2016 reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
2055 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE); 2017 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
2018 if (hw->phy.media_type == ixgbe_media_type_backplane) {
2019 reg_bp |= (IXGBE_AUTOC_ASM_PAUSE);
2020 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE);
2021 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
2022 reg_cu |= (IXGBE_TAF_ASM_PAUSE);
2023 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE);
2024 }
2056 break; 2025 break;
2057 case ixgbe_fc_full: 2026 case ixgbe_fc_full:
2058 /* Flow control (both Rx and Tx) is enabled by SW override. */ 2027 /* Flow control (both Rx and Tx) is enabled by SW override. */
2059 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 2028 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2029 if (hw->phy.media_type == ixgbe_media_type_backplane)
2030 reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
2031 IXGBE_AUTOC_ASM_PAUSE);
2032 else if (hw->phy.media_type == ixgbe_media_type_copper)
2033 reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2060 break; 2034 break;
2061#ifdef CONFIG_DCB 2035#ifdef CONFIG_DCB
2062 case ixgbe_fc_pfc: 2036 case ixgbe_fc_pfc:
@@ -2070,80 +2044,37 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
2070 break; 2044 break;
2071 } 2045 }
2072 2046
2073 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); 2047 if (hw->mac.type != ixgbe_mac_X540) {
2074 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 2048 /*
2075 2049 * Enable auto-negotiation between the MAC & PHY;
2076 /* Disable AN timeout */ 2050 * the MAC will advertise clause 37 flow control.
2077 if (hw->fc.strict_ieee) 2051 */
2078 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; 2052 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
2053 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
2079 2054
2080 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); 2055 /* Disable AN timeout */
2081 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); 2056 if (hw->fc.strict_ieee)
2057 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
2082 2058
2083 /* 2059 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
2084 * Set up the 10G flow control advertisement registers so the HW 2060 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
2085 * can do fc autoneg once the cable is plugged in. If we end up 2061 }
2086 * using 1g instead, this is harmless.
2087 */
2088 reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2089 2062
2090 /* 2063 /*
2091 * The possible values of fc.requested_mode are: 2064 * AUTOC restart handles negotiation of 1G and 10G on backplane
2092 * 0: Flow control is completely disabled 2065 * and copper. There is no need to set the PCS1GCTL register.
2093 * 1: Rx flow control is enabled (we can receive pause frames, 2066 *
2094 * but not send pause frames).
2095 * 2: Tx flow control is enabled (we can send pause frames but
2096 * we do not support receiving pause frames).
2097 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2098 * other: Invalid.
2099 */ 2067 */
2100 switch (hw->fc.requested_mode) { 2068 if (hw->phy.media_type == ixgbe_media_type_backplane) {
2101 case ixgbe_fc_none: 2069 reg_bp |= IXGBE_AUTOC_AN_RESTART;
2102 /* Flow control completely disabled by software override. */ 2070 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
2103 reg &= ~(IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE); 2071 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
2104 break; 2072 (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
2105 case ixgbe_fc_rx_pause: 2073 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
2106 /* 2074 MDIO_MMD_AN, reg_cu);
2107 * Rx Flow control is enabled and Tx Flow control is
2108 * disabled by software override. Since there really
2109 * isn't a way to advertise that we are capable of RX
2110 * Pause ONLY, we will advertise that we support both
2111 * symmetric and asymmetric Rx PAUSE. Later, we will
2112 * disable the adapter's ability to send PAUSE frames.
2113 */
2114 reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2115 break;
2116 case ixgbe_fc_tx_pause:
2117 /*
2118 * Tx Flow control is enabled, and Rx Flow control is
2119 * disabled by software override.
2120 */
2121 reg |= (IXGBE_AUTOC_ASM_PAUSE);
2122 reg &= ~(IXGBE_AUTOC_SYM_PAUSE);
2123 break;
2124 case ixgbe_fc_full:
2125 /* Flow control (both Rx and Tx) is enabled by SW override. */
2126 reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2127 break;
2128#ifdef CONFIG_DCB
2129 case ixgbe_fc_pfc:
2130 goto out;
2131 break;
2132#endif /* CONFIG_DCB */
2133 default:
2134 hw_dbg(hw, "Flow control param set incorrectly\n");
2135 ret_val = IXGBE_ERR_CONFIG;
2136 goto out;
2137 break;
2138 } 2075 }
2139 /*
2140 * AUTOC restart handles negotiation of 1G and 10G. There is
2141 * no need to set the PCS1GCTL register.
2142 */
2143 reg |= IXGBE_AUTOC_AN_RESTART;
2144 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg);
2145 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
2146 2076
2077 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
2147out: 2078out:
2148 return ret_val; 2079 return ret_val;
2149} 2080}
@@ -2159,10 +2090,16 @@ out:
2159 **/ 2090 **/
2160s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) 2091s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2161{ 2092{
2093 struct ixgbe_adapter *adapter = hw->back;
2162 u32 i; 2094 u32 i;
2163 u32 reg_val; 2095 u32 reg_val;
2164 u32 number_of_queues; 2096 u32 number_of_queues;
2165 s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING; 2097 s32 status = 0;
2098 u16 dev_status = 0;
2099
2100 /* Just jump out if bus mastering is already disabled */
2101 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2102 goto out;
2166 2103
2167 /* Disable the receive unit by stopping each queue */ 2104 /* Disable the receive unit by stopping each queue */
2168 number_of_queues = hw->mac.max_rx_queues; 2105 number_of_queues = hw->mac.max_rx_queues;
@@ -2179,13 +2116,43 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2179 IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val); 2116 IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
2180 2117
2181 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 2118 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2182 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) { 2119 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2183 status = 0; 2120 goto check_device_status;
2121 udelay(100);
2122 }
2123
2124 hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n");
2125 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
2126
2127 /*
2128 * Before proceeding, make sure that the PCIe block does not have
2129 * transactions pending.
2130 */
2131check_device_status:
2132 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2133 pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_STATUS,
2134 &dev_status);
2135 if (!(dev_status & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
2184 break; 2136 break;
2185 }
2186 udelay(100); 2137 udelay(100);
2187 } 2138 }
2188 2139
2140 if (i == IXGBE_PCI_MASTER_DISABLE_TIMEOUT)
2141 hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
2142 else
2143 goto out;
2144
2145 /*
2146 * Two consecutive resets are required via CTRL.RST per datasheet
2147 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
2148 * of this need. The first reset prevents new master requests from
2149 * being issued by our device. We then must wait 1usec for any
2150 * remaining completions from the PCIe bus to trickle in, and then reset
2151 * again to clear out any effects they may have had on our device.
2152 */
2153 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2154
2155out:
2189 return status; 2156 return status;
2190} 2157}
2191 2158
@@ -2195,7 +2162,7 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2195 * @hw: pointer to hardware structure 2162 * @hw: pointer to hardware structure
2196 * @mask: Mask to specify which semaphore to acquire 2163 * @mask: Mask to specify which semaphore to acquire
2197 * 2164 *
2198 * Acquires the SWFW semaphore thought the GSSR register for the specified 2165 * Acquires the SWFW semaphore through the GSSR register for the specified
2199 * function (CSR, PHY0, PHY1, EEPROM, Flash) 2166 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2200 **/ 2167 **/
2201s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) 2168s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
@@ -2206,6 +2173,10 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2206 s32 timeout = 200; 2173 s32 timeout = 200;
2207 2174
2208 while (timeout) { 2175 while (timeout) {
2176 /*
2177 * SW EEPROM semaphore bit is used for access to all
2178 * SW_FW_SYNC/GSSR bits (not just EEPROM)
2179 */
2209 if (ixgbe_get_eeprom_semaphore(hw)) 2180 if (ixgbe_get_eeprom_semaphore(hw))
2210 return IXGBE_ERR_SWFW_SYNC; 2181 return IXGBE_ERR_SWFW_SYNC;
2211 2182
@@ -2223,7 +2194,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2223 } 2194 }
2224 2195
2225 if (!timeout) { 2196 if (!timeout) {
2226 hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n"); 2197 hw_dbg(hw, "Driver can't access resource, SW_FW_SYNC timeout.\n");
2227 return IXGBE_ERR_SWFW_SYNC; 2198 return IXGBE_ERR_SWFW_SYNC;
2228 } 2199 }
2229 2200
@@ -2239,7 +2210,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2239 * @hw: pointer to hardware structure 2210 * @hw: pointer to hardware structure
2240 * @mask: Mask to specify which semaphore to release 2211 * @mask: Mask to specify which semaphore to release
2241 * 2212 *
2242 * Releases the SWFW semaphore thought the GSSR register for the specified 2213 * Releases the SWFW semaphore through the GSSR register for the specified
2243 * function (CSR, PHY0, PHY1, EEPROM, Flash) 2214 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2244 **/ 2215 **/
2245void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) 2216void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
@@ -2427,37 +2398,38 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2427 u32 mpsar_lo, mpsar_hi; 2398 u32 mpsar_lo, mpsar_hi;
2428 u32 rar_entries = hw->mac.num_rar_entries; 2399 u32 rar_entries = hw->mac.num_rar_entries;
2429 2400
2430 if (rar < rar_entries) { 2401 /* Make sure we are using a valid rar index range */
2431 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 2402 if (rar >= rar_entries) {
2432 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 2403 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
2404 return IXGBE_ERR_INVALID_ARGUMENT;
2405 }
2433 2406
2434 if (!mpsar_lo && !mpsar_hi) 2407 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2435 goto done; 2408 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2436 2409
2437 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { 2410 if (!mpsar_lo && !mpsar_hi)
2438 if (mpsar_lo) { 2411 goto done;
2439 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2440 mpsar_lo = 0;
2441 }
2442 if (mpsar_hi) {
2443 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
2444 mpsar_hi = 0;
2445 }
2446 } else if (vmdq < 32) {
2447 mpsar_lo &= ~(1 << vmdq);
2448 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
2449 } else {
2450 mpsar_hi &= ~(1 << (vmdq - 32));
2451 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
2452 }
2453 2412
2454 /* was that the last pool using this rar? */ 2413 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
2455 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) 2414 if (mpsar_lo) {
2456 hw->mac.ops.clear_rar(hw, rar); 2415 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2416 mpsar_lo = 0;
2417 }
2418 if (mpsar_hi) {
2419 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
2420 mpsar_hi = 0;
2421 }
2422 } else if (vmdq < 32) {
2423 mpsar_lo &= ~(1 << vmdq);
2424 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
2457 } else { 2425 } else {
2458 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 2426 mpsar_hi &= ~(1 << (vmdq - 32));
2427 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
2459 } 2428 }
2460 2429
2430 /* was that the last pool using this rar? */
2431 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
2432 hw->mac.ops.clear_rar(hw, rar);
2461done: 2433done:
2462 return 0; 2434 return 0;
2463} 2435}
@@ -2473,18 +2445,20 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2473 u32 mpsar; 2445 u32 mpsar;
2474 u32 rar_entries = hw->mac.num_rar_entries; 2446 u32 rar_entries = hw->mac.num_rar_entries;
2475 2447
2476 if (rar < rar_entries) { 2448 /* Make sure we are using a valid rar index range */
2477 if (vmdq < 32) { 2449 if (rar >= rar_entries) {
2478 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2479 mpsar |= 1 << vmdq;
2480 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
2481 } else {
2482 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2483 mpsar |= 1 << (vmdq - 32);
2484 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
2485 }
2486 } else {
2487 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 2450 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
2451 return IXGBE_ERR_INVALID_ARGUMENT;
2452 }
2453
2454 if (vmdq < 32) {
2455 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2456 mpsar |= 1 << vmdq;
2457 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
2458 } else {
2459 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2460 mpsar |= 1 << (vmdq - 32);
2461 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
2488 } 2462 }
2489 return 0; 2463 return 0;
2490} 2464}
@@ -2497,7 +2471,6 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
2497{ 2471{
2498 int i; 2472 int i;
2499 2473
2500
2501 for (i = 0; i < 128; i++) 2474 for (i = 0; i < 128; i++)
2502 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 2475 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
2503 2476
@@ -2726,12 +2699,21 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
2726 * Reads the links register to determine if link is up and the current speed 2699 * Reads the links register to determine if link is up and the current speed
2727 **/ 2700 **/
2728s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 2701s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
2729 bool *link_up, bool link_up_wait_to_complete) 2702 bool *link_up, bool link_up_wait_to_complete)
2730{ 2703{
2731 u32 links_reg; 2704 u32 links_reg, links_orig;
2732 u32 i; 2705 u32 i;
2733 2706
2707 /* clear the old state */
2708 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
2709
2734 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 2710 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
2711
2712 if (links_orig != links_reg) {
2713 hw_dbg(hw, "LINKS changed from %08X to %08X\n",
2714 links_orig, links_reg);
2715 }
2716
2735 if (link_up_wait_to_complete) { 2717 if (link_up_wait_to_complete) {
2736 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 2718 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
2737 if (links_reg & IXGBE_LINKS_UP) { 2719 if (links_reg & IXGBE_LINKS_UP) {
@@ -2754,10 +2736,13 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
2754 IXGBE_LINKS_SPEED_10G_82599) 2736 IXGBE_LINKS_SPEED_10G_82599)
2755 *speed = IXGBE_LINK_SPEED_10GB_FULL; 2737 *speed = IXGBE_LINK_SPEED_10GB_FULL;
2756 else if ((links_reg & IXGBE_LINKS_SPEED_82599) == 2738 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
2757 IXGBE_LINKS_SPEED_1G_82599) 2739 IXGBE_LINKS_SPEED_1G_82599)
2758 *speed = IXGBE_LINK_SPEED_1GB_FULL; 2740 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2759 else 2741 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
2742 IXGBE_LINKS_SPEED_100_82599)
2760 *speed = IXGBE_LINK_SPEED_100_FULL; 2743 *speed = IXGBE_LINK_SPEED_100_FULL;
2744 else
2745 *speed = IXGBE_LINK_SPEED_UNKNOWN;
2761 2746
2762 /* if link is down, zero out the current_mode */ 2747 /* if link is down, zero out the current_mode */
2763 if (*link_up == false) { 2748 if (*link_up == false) {
@@ -2814,6 +2799,28 @@ wwn_prefix_out:
2814} 2799}
2815 2800
2816/** 2801/**
2802 * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
2803 * control
2804 * @hw: pointer to hardware structure
2805 *
2806 * There are several phys that do not support autoneg flow control. This
2807 * function check the device id to see if the associated phy supports
2808 * autoneg flow control.
2809 **/
2810static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
2811{
2812
2813 switch (hw->device_id) {
2814 case IXGBE_DEV_ID_X540T:
2815 return 0;
2816 case IXGBE_DEV_ID_82599_T3_LOM:
2817 return 0;
2818 default:
2819 return IXGBE_ERR_FC_NOT_SUPPORTED;
2820 }
2821}
2822
2823/**
2817 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing 2824 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
2818 * @hw: pointer to hardware structure 2825 * @hw: pointer to hardware structure
2819 * @enable: enable or disable switch for anti-spoofing 2826 * @enable: enable or disable switch for anti-spoofing
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 66ed045a8cf0..508f635fc2ca 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -29,6 +29,7 @@
29#define _IXGBE_COMMON_H_ 29#define _IXGBE_COMMON_H_
30 30
31#include "ixgbe_type.h" 31#include "ixgbe_type.h"
32#include "ixgbe.h"
32 33
33u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw); 34u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
34s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); 35s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
@@ -62,8 +63,6 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
62s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); 63s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
63s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, 64s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
64 struct net_device *netdev); 65 struct net_device *netdev);
65s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
66 struct net_device *netdev);
67s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); 66s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
68s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); 67s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
69s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); 68s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
@@ -110,9 +109,8 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
110 109
111#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) 110#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
112 111
113extern struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw);
114#define hw_dbg(hw, format, arg...) \ 112#define hw_dbg(hw, format, arg...) \
115 netdev_dbg(ixgbe_get_hw_dev(hw), format, ##arg) 113 netdev_dbg(((struct ixgbe_adapter *)(hw->back))->netdev, format, ##arg)
116#define e_dev_info(format, arg...) \ 114#define e_dev_info(format, arg...) \
117 dev_info(&adapter->pdev->dev, format, ## arg) 115 dev_info(&adapter->pdev->dev, format, ## arg)
118#define e_dev_warn(format, arg...) \ 116#define e_dev_warn(format, arg...) \
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index d16c260c1f50..41c529fac0ab 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -34,6 +34,42 @@
34#include "ixgbe_dcb_82599.h" 34#include "ixgbe_dcb_82599.h"
35 35
36/** 36/**
37 * ixgbe_ieee_credits - This calculates the ieee traffic class
38 * credits from the configured bandwidth percentages. Credits
39 * are the smallest unit programable into the underlying
40 * hardware. The IEEE 802.1Qaz specification do not use bandwidth
41 * groups so this is much simplified from the CEE case.
42 */
43s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame)
44{
45 int min_percent = 100;
46 int min_credit, multiplier;
47 int i;
48
49 min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
50 DCB_CREDIT_QUANTUM;
51
52 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
53 if (bw[i] < min_percent && bw[i])
54 min_percent = bw[i];
55 }
56
57 multiplier = (min_credit / min_percent) + 1;
58
59 /* Find out the hw credits for each TC */
60 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
61 int val = min(bw[i] * multiplier, MAX_CREDIT_REFILL);
62
63 if (val < min_credit)
64 val = min_credit;
65 refill[i] = val;
66
67 max[i] = bw[i] ? (bw[i] * MAX_CREDIT)/100 : min_credit;
68 }
69 return 0;
70}
71
72/**
37 * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits 73 * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits
38 * @ixgbe_dcb_config: Struct containing DCB settings. 74 * @ixgbe_dcb_config: Struct containing DCB settings.
39 * @direction: Configuring either Tx or Rx. 75 * @direction: Configuring either Tx or Rx.
@@ -141,6 +177,59 @@ out:
141 return ret_val; 177 return ret_val;
142} 178}
143 179
180void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
181{
182 int i;
183
184 *pfc_en = 0;
185 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
186 *pfc_en |= (cfg->tc_config[i].dcb_pfc & 0xF) << i;
187}
188
189void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction,
190 u16 *refill)
191{
192 struct tc_bw_alloc *p;
193 int i;
194
195 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
196 p = &cfg->tc_config[i].path[direction];
197 refill[i] = p->data_credits_refill;
198 }
199}
200
201void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max)
202{
203 int i;
204
205 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
206 max[i] = cfg->tc_config[i].desc_credits_max;
207}
208
209void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction,
210 u8 *bwgid)
211{
212 struct tc_bw_alloc *p;
213 int i;
214
215 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
216 p = &cfg->tc_config[i].path[direction];
217 bwgid[i] = p->bwg_id;
218 }
219}
220
221void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction,
222 u8 *ptype)
223{
224 struct tc_bw_alloc *p;
225 int i;
226
227 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
228 p = &cfg->tc_config[i].path[direction];
229 ptype[i] = p->prio_type;
230 }
231}
232
144/** 233/**
145 * ixgbe_dcb_hw_config - Config and enable DCB 234 * ixgbe_dcb_hw_config - Config and enable DCB
146 * @hw: pointer to hardware structure 235 * @hw: pointer to hardware structure
@@ -152,13 +241,32 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
152 struct ixgbe_dcb_config *dcb_config) 241 struct ixgbe_dcb_config *dcb_config)
153{ 242{
154 s32 ret = 0; 243 s32 ret = 0;
244 u8 pfc_en;
245 u8 ptype[MAX_TRAFFIC_CLASS];
246 u8 bwgid[MAX_TRAFFIC_CLASS];
247 u16 refill[MAX_TRAFFIC_CLASS];
248 u16 max[MAX_TRAFFIC_CLASS];
249 /* CEE does not define a priority to tc mapping so map 1:1 */
250 u8 prio_tc[MAX_TRAFFIC_CLASS] = {0, 1, 2, 3, 4, 5, 6, 7};
251
252 /* Unpack CEE standard containers */
253 ixgbe_dcb_unpack_pfc(dcb_config, &pfc_en);
254 ixgbe_dcb_unpack_refill(dcb_config, DCB_TX_CONFIG, refill);
255 ixgbe_dcb_unpack_max(dcb_config, max);
256 ixgbe_dcb_unpack_bwgid(dcb_config, DCB_TX_CONFIG, bwgid);
257 ixgbe_dcb_unpack_prio(dcb_config, DCB_TX_CONFIG, ptype);
258
155 switch (hw->mac.type) { 259 switch (hw->mac.type) {
156 case ixgbe_mac_82598EB: 260 case ixgbe_mac_82598EB:
157 ret = ixgbe_dcb_hw_config_82598(hw, dcb_config); 261 ret = ixgbe_dcb_hw_config_82598(hw, dcb_config->rx_pba_cfg,
262 pfc_en, refill, max, bwgid,
263 ptype);
158 break; 264 break;
159 case ixgbe_mac_82599EB: 265 case ixgbe_mac_82599EB:
160 case ixgbe_mac_X540: 266 case ixgbe_mac_X540:
161 ret = ixgbe_dcb_hw_config_82599(hw, dcb_config); 267 ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->rx_pba_cfg,
268 pfc_en, refill, max, bwgid,
269 ptype, prio_tc);
162 break; 270 break;
163 default: 271 default:
164 break; 272 break;
@@ -166,3 +274,49 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
166 return ret; 274 return ret;
167} 275}
168 276
277/* Helper routines to abstract HW specifics from DCB netlink ops */
278s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en)
279{
280 int ret = -EINVAL;
281
282 switch (hw->mac.type) {
283 case ixgbe_mac_82598EB:
284 ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en);
285 break;
286 case ixgbe_mac_82599EB:
287 case ixgbe_mac_X540:
288 ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en);
289 break;
290 default:
291 break;
292 }
293 return ret;
294}
295
296s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
297 u16 *refill, u16 *max, u8 *bwg_id,
298 u8 *prio_type, u8 *prio_tc)
299{
300 switch (hw->mac.type) {
301 case ixgbe_mac_82598EB:
302 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max,
303 prio_type);
304 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
305 bwg_id, prio_type);
306 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
307 bwg_id, prio_type);
308 break;
309 case ixgbe_mac_82599EB:
310 case ixgbe_mac_X540:
311 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max,
312 bwg_id, prio_type, prio_tc);
313 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
314 bwg_id, prio_type);
315 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
316 prio_type, prio_tc);
317 break;
318 default:
319 break;
320 }
321 return 0;
322}
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index 1cfe38ee1644..944838fc7b59 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -139,7 +139,6 @@ struct ixgbe_dcb_config {
139 struct tc_configuration tc_config[MAX_TRAFFIC_CLASS]; 139 struct tc_configuration tc_config[MAX_TRAFFIC_CLASS];
140 u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */ 140 u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */
141 bool pfc_mode_enable; 141 bool pfc_mode_enable;
142 bool round_robin_enable;
143 142
144 enum dcb_rx_pba_cfg rx_pba_cfg; 143 enum dcb_rx_pba_cfg rx_pba_cfg;
145 144
@@ -148,12 +147,21 @@ struct ixgbe_dcb_config {
148}; 147};
149 148
150/* DCB driver APIs */ 149/* DCB driver APIs */
150void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en);
151void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *, int, u16 *);
152void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *);
153void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *);
154void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *);
151 155
152/* DCB credits calculation */ 156/* DCB credits calculation */
157s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame);
153s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *, 158s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
154 struct ixgbe_dcb_config *, int, u8); 159 struct ixgbe_dcb_config *, int, u8);
155 160
156/* DCB hw initialization */ 161/* DCB hw initialization */
162s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
163 u8 *bwg_id, u8 *prio_type, u8 *tc_prio);
164s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en);
157s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); 165s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
158 166
159/* DCB definitions for credit calculation */ 167/* DCB definitions for credit calculation */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
index 9a5e89c12e05..1bc57e52cee3 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -38,15 +38,14 @@
38 * 38 *
39 * Configure packet buffers for DCB mode. 39 * Configure packet buffers for DCB mode.
40 */ 40 */
41static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw, 41static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw, u8 rx_pba)
42 struct ixgbe_dcb_config *dcb_config)
43{ 42{
44 s32 ret_val = 0; 43 s32 ret_val = 0;
45 u32 value = IXGBE_RXPBSIZE_64KB; 44 u32 value = IXGBE_RXPBSIZE_64KB;
46 u8 i = 0; 45 u8 i = 0;
47 46
48 /* Setup Rx packet buffer sizes */ 47 /* Setup Rx packet buffer sizes */
49 switch (dcb_config->rx_pba_cfg) { 48 switch (rx_pba) {
50 case pba_80_48: 49 case pba_80_48:
51 /* Setup the first four at 80KB */ 50 /* Setup the first four at 80KB */
52 value = IXGBE_RXPBSIZE_80KB; 51 value = IXGBE_RXPBSIZE_80KB;
@@ -78,10 +77,11 @@ static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw,
78 * 77 *
79 * Configure Rx Data Arbiter and credits for each traffic class. 78 * Configure Rx Data Arbiter and credits for each traffic class.
80 */ 79 */
81static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, 80s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
82 struct ixgbe_dcb_config *dcb_config) 81 u16 *refill,
82 u16 *max,
83 u8 *prio_type)
83{ 84{
84 struct tc_bw_alloc *p;
85 u32 reg = 0; 85 u32 reg = 0;
86 u32 credit_refill = 0; 86 u32 credit_refill = 0;
87 u32 credit_max = 0; 87 u32 credit_max = 0;
@@ -102,13 +102,12 @@ static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
102 102
103 /* Configure traffic class credits and priority */ 103 /* Configure traffic class credits and priority */
104 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 104 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
105 p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG]; 105 credit_refill = refill[i];
106 credit_refill = p->data_credits_refill; 106 credit_max = max[i];
107 credit_max = p->data_credits_max;
108 107
109 reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT); 108 reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
110 109
111 if (p->prio_type == prio_link) 110 if (prio_type[i] == prio_link)
112 reg |= IXGBE_RT2CR_LSP; 111 reg |= IXGBE_RT2CR_LSP;
113 112
114 IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg); 113 IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
@@ -135,10 +134,12 @@ static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
135 * 134 *
136 * Configure Tx Descriptor Arbiter and credits for each traffic class. 135 * Configure Tx Descriptor Arbiter and credits for each traffic class.
137 */ 136 */
138static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, 137s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
139 struct ixgbe_dcb_config *dcb_config) 138 u16 *refill,
139 u16 *max,
140 u8 *bwg_id,
141 u8 *prio_type)
140{ 142{
141 struct tc_bw_alloc *p;
142 u32 reg, max_credits; 143 u32 reg, max_credits;
143 u8 i; 144 u8 i;
144 145
@@ -146,10 +147,8 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
146 147
147 /* Enable arbiter */ 148 /* Enable arbiter */
148 reg &= ~IXGBE_DPMCS_ARBDIS; 149 reg &= ~IXGBE_DPMCS_ARBDIS;
149 if (!(dcb_config->round_robin_enable)) { 150 /* Enable DFP and Recycle mode */
150 /* Enable DFP and Recycle mode */ 151 reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
151 reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
152 }
153 reg |= IXGBE_DPMCS_TSOEF; 152 reg |= IXGBE_DPMCS_TSOEF;
154 /* Configure Max TSO packet size 34KB including payload and headers */ 153 /* Configure Max TSO packet size 34KB including payload and headers */
155 reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT); 154 reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
@@ -158,16 +157,15 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
158 157
159 /* Configure traffic class credits and priority */ 158 /* Configure traffic class credits and priority */
160 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 159 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
161 p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; 160 max_credits = max[i];
162 max_credits = dcb_config->tc_config[i].desc_credits_max;
163 reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT; 161 reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
164 reg |= p->data_credits_refill; 162 reg |= refill[i];
165 reg |= (u32)(p->bwg_id) << IXGBE_TDTQ2TCCR_BWG_SHIFT; 163 reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
166 164
167 if (p->prio_type == prio_group) 165 if (prio_type[i] == prio_group)
168 reg |= IXGBE_TDTQ2TCCR_GSP; 166 reg |= IXGBE_TDTQ2TCCR_GSP;
169 167
170 if (p->prio_type == prio_link) 168 if (prio_type[i] == prio_link)
171 reg |= IXGBE_TDTQ2TCCR_LSP; 169 reg |= IXGBE_TDTQ2TCCR_LSP;
172 170
173 IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg); 171 IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
@@ -183,10 +181,12 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
183 * 181 *
184 * Configure Tx Data Arbiter and credits for each traffic class. 182 * Configure Tx Data Arbiter and credits for each traffic class.
185 */ 183 */
186static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, 184s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
187 struct ixgbe_dcb_config *dcb_config) 185 u16 *refill,
186 u16 *max,
187 u8 *bwg_id,
188 u8 *prio_type)
188{ 189{
189 struct tc_bw_alloc *p;
190 u32 reg; 190 u32 reg;
191 u8 i; 191 u8 i;
192 192
@@ -200,15 +200,14 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
200 200
201 /* Configure traffic class credits and priority */ 201 /* Configure traffic class credits and priority */
202 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 202 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
203 p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; 203 reg = refill[i];
204 reg = p->data_credits_refill; 204 reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT;
205 reg |= (u32)(p->data_credits_max) << IXGBE_TDPT2TCCR_MCL_SHIFT; 205 reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT;
206 reg |= (u32)(p->bwg_id) << IXGBE_TDPT2TCCR_BWG_SHIFT;
207 206
208 if (p->prio_type == prio_group) 207 if (prio_type[i] == prio_group)
209 reg |= IXGBE_TDPT2TCCR_GSP; 208 reg |= IXGBE_TDPT2TCCR_GSP;
210 209
211 if (p->prio_type == prio_link) 210 if (prio_type[i] == prio_link)
212 reg |= IXGBE_TDPT2TCCR_LSP; 211 reg |= IXGBE_TDPT2TCCR_LSP;
213 212
214 IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg); 213 IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
@@ -229,59 +228,57 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
229 * 228 *
230 * Configure Priority Flow Control for each traffic class. 229 * Configure Priority Flow Control for each traffic class.
231 */ 230 */
232s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, 231s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
233 struct ixgbe_dcb_config *dcb_config)
234{ 232{
235 u32 reg, rx_pba_size; 233 u32 reg, rx_pba_size;
236 u8 i; 234 u8 i;
237 235
238 if (!dcb_config->pfc_mode_enable) 236 if (pfc_en) {
239 goto out; 237 /* Enable Transmit Priority Flow Control */
240 238 reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
241 /* Enable Transmit Priority Flow Control */ 239 reg &= ~IXGBE_RMCS_TFCE_802_3X;
242 reg = IXGBE_READ_REG(hw, IXGBE_RMCS); 240 /* correct the reporting of our flow control status */
243 reg &= ~IXGBE_RMCS_TFCE_802_3X; 241 reg |= IXGBE_RMCS_TFCE_PRIORITY;
244 /* correct the reporting of our flow control status */ 242 IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
245 reg |= IXGBE_RMCS_TFCE_PRIORITY; 243
246 IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); 244 /* Enable Receive Priority Flow Control */
247 245 reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
248 /* Enable Receive Priority Flow Control */ 246 reg &= ~IXGBE_FCTRL_RFCE;
249 reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); 247 reg |= IXGBE_FCTRL_RPFCE;
250 reg &= ~IXGBE_FCTRL_RFCE; 248 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
251 reg |= IXGBE_FCTRL_RPFCE; 249
252 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg); 250 /* Configure pause time */
251 for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++)
252 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800);
253
254 /* Configure flow control refresh threshold value */
255 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400);
256 }
253 257
254 /* 258 /*
255 * Configure flow control thresholds and enable priority flow control 259 * Configure flow control thresholds and enable priority flow control
256 * for each traffic class. 260 * for each traffic class.
257 */ 261 */
258 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 262 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
263 int enabled = pfc_en & (1 << i);
259 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); 264 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
260 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; 265 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
261 reg = (rx_pba_size - hw->fc.low_water) << 10; 266 reg = (rx_pba_size - hw->fc.low_water) << 10;
262 267
263 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx || 268 if (enabled == pfc_enabled_tx ||
264 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full) 269 enabled == pfc_enabled_full)
265 reg |= IXGBE_FCRTL_XONE; 270 reg |= IXGBE_FCRTL_XONE;
266 271
267 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg); 272 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg);
268 273
269 reg = (rx_pba_size - hw->fc.high_water) << 10; 274 reg = (rx_pba_size - hw->fc.high_water) << 10;
270 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx || 275 if (enabled == pfc_enabled_tx ||
271 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full) 276 enabled == pfc_enabled_full)
272 reg |= IXGBE_FCRTH_FCEN; 277 reg |= IXGBE_FCRTH_FCEN;
273 278
274 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg); 279 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
275 } 280 }
276 281
277 /* Configure pause time */
278 for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++)
279 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800);
280
281 /* Configure flow control refresh threshold value */
282 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400);
283
284out:
285 return 0; 282 return 0;
286} 283}
287 284
@@ -292,7 +289,7 @@ out:
292 * Configure queue statistics registers, all queues belonging to same traffic 289 * Configure queue statistics registers, all queues belonging to same traffic
293 * class uses a single set of queue statistics counters. 290 * class uses a single set of queue statistics counters.
294 */ 291 */
295static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) 292s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
296{ 293{
297 u32 reg = 0; 294 u32 reg = 0;
298 u8 i = 0; 295 u8 i = 0;
@@ -325,13 +322,16 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
325 * Configure dcb settings and enable dcb mode. 322 * Configure dcb settings and enable dcb mode.
326 */ 323 */
327s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, 324s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
328 struct ixgbe_dcb_config *dcb_config) 325 u8 rx_pba, u8 pfc_en, u16 *refill,
326 u16 *max, u8 *bwg_id, u8 *prio_type)
329{ 327{
330 ixgbe_dcb_config_packet_buffers_82598(hw, dcb_config); 328 ixgbe_dcb_config_packet_buffers_82598(hw, rx_pba);
331 ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config); 329 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type);
332 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config); 330 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
333 ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config); 331 bwg_id, prio_type);
334 ixgbe_dcb_config_pfc_82598(hw, dcb_config); 332 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
333 bwg_id, prio_type);
334 ixgbe_dcb_config_pfc_82598(hw, pfc_en);
335 ixgbe_dcb_config_tc_stats_82598(hw); 335 ixgbe_dcb_config_tc_stats_82598(hw);
336 336
337 return 0; 337 return 0;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ixgbe/ixgbe_dcb_82598.h
index abc03ccfa088..1e9750c2b46b 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -71,9 +71,28 @@
71/* DCB hardware-specific driver APIs */ 71/* DCB hardware-specific driver APIs */
72 72
73/* DCB PFC functions */ 73/* DCB PFC functions */
74s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *); 74s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en);
75 75
76/* DCB hw initialization */ 76/* DCB hw initialization */
77s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *); 77s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
78 u16 *refill,
79 u16 *max,
80 u8 *prio_type);
81
82s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
83 u16 *refill,
84 u16 *max,
85 u8 *bwg_id,
86 u8 *prio_type);
87
88s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
89 u16 *refill,
90 u16 *max,
91 u8 *bwg_id,
92 u8 *prio_type);
93
94s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
95 u8 rx_pba, u8 pfc_en, u16 *refill,
96 u16 *max, u8 *bwg_id, u8 *prio_type);
78 97
79#endif /* _DCB_82598_CONFIG_H */ 98#endif /* _DCB_82598_CONFIG_H */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index 374e1f74d0f5..025af8c53ddb 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -33,19 +33,18 @@
33/** 33/**
34 * ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers 34 * ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers
35 * @hw: pointer to hardware structure 35 * @hw: pointer to hardware structure
36 * @dcb_config: pointer to ixgbe_dcb_config structure 36 * @rx_pba: method to distribute packet buffer
37 * 37 *
38 * Configure packet buffers for DCB mode. 38 * Configure packet buffers for DCB mode.
39 */ 39 */
40static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, 40static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, u8 rx_pba)
41 struct ixgbe_dcb_config *dcb_config)
42{ 41{
43 s32 ret_val = 0; 42 s32 ret_val = 0;
44 u32 value = IXGBE_RXPBSIZE_64KB; 43 u32 value = IXGBE_RXPBSIZE_64KB;
45 u8 i = 0; 44 u8 i = 0;
46 45
47 /* Setup Rx packet buffer sizes */ 46 /* Setup Rx packet buffer sizes */
48 switch (dcb_config->rx_pba_cfg) { 47 switch (rx_pba) {
49 case pba_80_48: 48 case pba_80_48:
50 /* Setup the first four at 80KB */ 49 /* Setup the first four at 80KB */
51 value = IXGBE_RXPBSIZE_80KB; 50 value = IXGBE_RXPBSIZE_80KB;
@@ -75,14 +74,20 @@ static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw,
75/** 74/**
76 * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter 75 * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
77 * @hw: pointer to hardware structure 76 * @hw: pointer to hardware structure
78 * @dcb_config: pointer to ixgbe_dcb_config structure 77 * @refill: refill credits index by traffic class
78 * @max: max credits index by traffic class
79 * @bwg_id: bandwidth grouping indexed by traffic class
80 * @prio_type: priority type indexed by traffic class
79 * 81 *
80 * Configure Rx Packet Arbiter and credits for each traffic class. 82 * Configure Rx Packet Arbiter and credits for each traffic class.
81 */ 83 */
82static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, 84s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
83 struct ixgbe_dcb_config *dcb_config) 85 u16 *refill,
86 u16 *max,
87 u8 *bwg_id,
88 u8 *prio_type,
89 u8 *prio_tc)
84{ 90{
85 struct tc_bw_alloc *p;
86 u32 reg = 0; 91 u32 reg = 0;
87 u32 credit_refill = 0; 92 u32 credit_refill = 0;
88 u32 credit_max = 0; 93 u32 credit_max = 0;
@@ -98,20 +103,18 @@ static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
98 /* Map all traffic classes to their UP, 1 to 1 */ 103 /* Map all traffic classes to their UP, 1 to 1 */
99 reg = 0; 104 reg = 0;
100 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) 105 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
101 reg |= (i << (i * IXGBE_RTRUP2TC_UP_SHIFT)); 106 reg |= (prio_tc[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
102 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); 107 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
103 108
104 /* Configure traffic class credits and priority */ 109 /* Configure traffic class credits and priority */
105 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 110 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
106 p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG]; 111 credit_refill = refill[i];
107 112 credit_max = max[i];
108 credit_refill = p->data_credits_refill;
109 credit_max = p->data_credits_max;
110 reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT); 113 reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
111 114
112 reg |= (u32)(p->bwg_id) << IXGBE_RTRPT4C_BWG_SHIFT; 115 reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
113 116
114 if (p->prio_type == prio_link) 117 if (prio_type[i] == prio_link)
115 reg |= IXGBE_RTRPT4C_LSP; 118 reg |= IXGBE_RTRPT4C_LSP;
116 119
117 IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg); 120 IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
@@ -130,14 +133,19 @@ static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
130/** 133/**
131 * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter 134 * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
132 * @hw: pointer to hardware structure 135 * @hw: pointer to hardware structure
133 * @dcb_config: pointer to ixgbe_dcb_config structure 136 * @refill: refill credits index by traffic class
137 * @max: max credits index by traffic class
138 * @bwg_id: bandwidth grouping indexed by traffic class
139 * @prio_type: priority type indexed by traffic class
134 * 140 *
135 * Configure Tx Descriptor Arbiter and credits for each traffic class. 141 * Configure Tx Descriptor Arbiter and credits for each traffic class.
136 */ 142 */
137static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, 143s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
138 struct ixgbe_dcb_config *dcb_config) 144 u16 *refill,
145 u16 *max,
146 u8 *bwg_id,
147 u8 *prio_type)
139{ 148{
140 struct tc_bw_alloc *p;
141 u32 reg, max_credits; 149 u32 reg, max_credits;
142 u8 i; 150 u8 i;
143 151
@@ -149,16 +157,15 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
149 157
150 /* Configure traffic class credits and priority */ 158 /* Configure traffic class credits and priority */
151 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 159 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
152 p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; 160 max_credits = max[i];
153 max_credits = dcb_config->tc_config[i].desc_credits_max;
154 reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT; 161 reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
155 reg |= p->data_credits_refill; 162 reg |= refill[i];
156 reg |= (u32)(p->bwg_id) << IXGBE_RTTDT2C_BWG_SHIFT; 163 reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
157 164
158 if (p->prio_type == prio_group) 165 if (prio_type[i] == prio_group)
159 reg |= IXGBE_RTTDT2C_GSP; 166 reg |= IXGBE_RTTDT2C_GSP;
160 167
161 if (p->prio_type == prio_link) 168 if (prio_type[i] == prio_link)
162 reg |= IXGBE_RTTDT2C_LSP; 169 reg |= IXGBE_RTTDT2C_LSP;
163 170
164 IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg); 171 IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
@@ -177,14 +184,20 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
177/** 184/**
178 * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter 185 * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
179 * @hw: pointer to hardware structure 186 * @hw: pointer to hardware structure
180 * @dcb_config: pointer to ixgbe_dcb_config structure 187 * @refill: refill credits index by traffic class
188 * @max: max credits index by traffic class
189 * @bwg_id: bandwidth grouping indexed by traffic class
190 * @prio_type: priority type indexed by traffic class
181 * 191 *
182 * Configure Tx Packet Arbiter and credits for each traffic class. 192 * Configure Tx Packet Arbiter and credits for each traffic class.
183 */ 193 */
184static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, 194s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
185 struct ixgbe_dcb_config *dcb_config) 195 u16 *refill,
196 u16 *max,
197 u8 *bwg_id,
198 u8 *prio_type,
199 u8 *prio_tc)
186{ 200{
187 struct tc_bw_alloc *p;
188 u32 reg; 201 u32 reg;
189 u8 i; 202 u8 i;
190 203
@@ -200,20 +213,19 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
200 /* Map all traffic classes to their UP, 1 to 1 */ 213 /* Map all traffic classes to their UP, 1 to 1 */
201 reg = 0; 214 reg = 0;
202 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) 215 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
203 reg |= (i << (i * IXGBE_RTTUP2TC_UP_SHIFT)); 216 reg |= (prio_tc[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
204 IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg); 217 IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
205 218
206 /* Configure traffic class credits and priority */ 219 /* Configure traffic class credits and priority */
207 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 220 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
208 p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; 221 reg = refill[i];
209 reg = p->data_credits_refill; 222 reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
210 reg |= (u32)(p->data_credits_max) << IXGBE_RTTPT2C_MCL_SHIFT; 223 reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
211 reg |= (u32)(p->bwg_id) << IXGBE_RTTPT2C_BWG_SHIFT;
212 224
213 if (p->prio_type == prio_group) 225 if (prio_type[i] == prio_group)
214 reg |= IXGBE_RTTPT2C_GSP; 226 reg |= IXGBE_RTTPT2C_GSP;
215 227
216 if (p->prio_type == prio_link) 228 if (prio_type[i] == prio_link)
217 reg |= IXGBE_RTTPT2C_LSP; 229 reg |= IXGBE_RTTPT2C_LSP;
218 230
219 IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg); 231 IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
@@ -233,63 +245,59 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
233/** 245/**
234 * ixgbe_dcb_config_pfc_82599 - Configure priority flow control 246 * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
235 * @hw: pointer to hardware structure 247 * @hw: pointer to hardware structure
236 * @dcb_config: pointer to ixgbe_dcb_config structure 248 * @pfc_en: enabled pfc bitmask
237 * 249 *
238 * Configure Priority Flow Control (PFC) for each traffic class. 250 * Configure Priority Flow Control (PFC) for each traffic class.
239 */ 251 */
240s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, 252s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en)
241 struct ixgbe_dcb_config *dcb_config)
242{ 253{
243 u32 i, reg, rx_pba_size; 254 u32 i, reg, rx_pba_size;
244 255
245 /* If PFC is disabled globally then fall back to LFC. */
246 if (!dcb_config->pfc_mode_enable) {
247 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
248 hw->mac.ops.fc_enable(hw, i);
249 goto out;
250 }
251
252 /* Configure PFC Tx thresholds per TC */ 256 /* Configure PFC Tx thresholds per TC */
253 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 257 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
258 int enabled = pfc_en & (1 << i);
254 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); 259 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
255 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; 260 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
256 261
257 reg = (rx_pba_size - hw->fc.low_water) << 10; 262 reg = (rx_pba_size - hw->fc.low_water) << 10;
258 263
259 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full || 264 if (enabled)
260 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
261 reg |= IXGBE_FCRTL_XONE; 265 reg |= IXGBE_FCRTL_XONE;
262 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg); 266 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
263 267
264 reg = (rx_pba_size - hw->fc.high_water) << 10; 268 reg = (rx_pba_size - hw->fc.high_water) << 10;
265 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full || 269 if (enabled)
266 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
267 reg |= IXGBE_FCRTH_FCEN; 270 reg |= IXGBE_FCRTH_FCEN;
268 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg); 271 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
269 } 272 }
270 273
271 /* Configure pause time (2 TCs per register) */ 274 if (pfc_en) {
272 reg = hw->fc.pause_time | (hw->fc.pause_time << 16); 275 /* Configure pause time (2 TCs per register) */
273 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) 276 reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
274 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 277 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
275 278 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
276 /* Configure flow control refresh threshold value */ 279
277 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 280 /* Configure flow control refresh threshold value */
278 281 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
279 /* Enable Transmit PFC */ 282
280 reg = IXGBE_FCCFG_TFCE_PRIORITY; 283
281 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg); 284 reg = IXGBE_FCCFG_TFCE_PRIORITY;
285 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg);
286 /*
287 * Enable Receive PFC
288 * We will always honor XOFF frames we receive when
289 * we are in PFC mode.
290 */
291 reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
292 reg &= ~IXGBE_MFLCN_RFCE;
293 reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF;
294 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
295
296 } else {
297 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
298 hw->mac.ops.fc_enable(hw, i);
299 }
282 300
283 /*
284 * Enable Receive PFC
285 * We will always honor XOFF frames we receive when
286 * we are in PFC mode.
287 */
288 reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
289 reg &= ~IXGBE_MFLCN_RFCE;
290 reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF;
291 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
292out:
293 return 0; 301 return 0;
294} 302}
295 303
@@ -349,7 +357,6 @@ static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
349/** 357/**
350 * ixgbe_dcb_config_82599 - Configure general DCB parameters 358 * ixgbe_dcb_config_82599 - Configure general DCB parameters
351 * @hw: pointer to hardware structure 359 * @hw: pointer to hardware structure
352 * @dcb_config: pointer to ixgbe_dcb_config structure
353 * 360 *
354 * Configure general DCB parameters. 361 * Configure general DCB parameters.
355 */ 362 */
@@ -406,19 +413,28 @@ static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
406/** 413/**
407 * ixgbe_dcb_hw_config_82599 - Configure and enable DCB 414 * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
408 * @hw: pointer to hardware structure 415 * @hw: pointer to hardware structure
409 * @dcb_config: pointer to ixgbe_dcb_config structure 416 * @rx_pba: method to distribute packet buffer
417 * @refill: refill credits index by traffic class
418 * @max: max credits index by traffic class
419 * @bwg_id: bandwidth grouping indexed by traffic class
420 * @prio_type: priority type indexed by traffic class
421 * @pfc_en: enabled pfc bitmask
410 * 422 *
411 * Configure dcb settings and enable dcb mode. 423 * Configure dcb settings and enable dcb mode.
412 */ 424 */
413s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, 425s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
414 struct ixgbe_dcb_config *dcb_config) 426 u8 rx_pba, u8 pfc_en, u16 *refill,
427 u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc)
415{ 428{
416 ixgbe_dcb_config_packet_buffers_82599(hw, dcb_config); 429 ixgbe_dcb_config_packet_buffers_82599(hw, rx_pba);
417 ixgbe_dcb_config_82599(hw); 430 ixgbe_dcb_config_82599(hw);
418 ixgbe_dcb_config_rx_arbiter_82599(hw, dcb_config); 431 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
419 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, dcb_config); 432 prio_type, prio_tc);
420 ixgbe_dcb_config_tx_data_arbiter_82599(hw, dcb_config); 433 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
421 ixgbe_dcb_config_pfc_82599(hw, dcb_config); 434 bwg_id, prio_type);
435 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
436 bwg_id, prio_type, prio_tc);
437 ixgbe_dcb_config_pfc_82599(hw, pfc_en);
422 ixgbe_dcb_config_tc_stats_82599(hw); 438 ixgbe_dcb_config_tc_stats_82599(hw);
423 439
424 return 0; 440 return 0;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h
index 3841649fb954..148fd8b477a9 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -102,11 +102,32 @@
102/* DCB hardware-specific driver APIs */ 102/* DCB hardware-specific driver APIs */
103 103
104/* DCB PFC functions */ 104/* DCB PFC functions */
105s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, 105s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en);
106 struct ixgbe_dcb_config *dcb_config);
107 106
108/* DCB hw initialization */ 107/* DCB hw initialization */
108s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
109 u16 *refill,
110 u16 *max,
111 u8 *bwg_id,
112 u8 *prio_type,
113 u8 *prio_tc);
114
115s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
116 u16 *refill,
117 u16 *max,
118 u8 *bwg_id,
119 u8 *prio_type);
120
121s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
122 u16 *refill,
123 u16 *max,
124 u8 *bwg_id,
125 u8 *prio_type,
126 u8 *prio_tc);
127
109s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, 128s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
110 struct ixgbe_dcb_config *config); 129 u8 rx_pba, u8 pfc_en, u16 *refill,
130 u16 *max, u8 *bwg_id, u8 *prio_type,
131 u8 *prio_tc);
111 132
112#endif /* _DCB_82599_CONFIG_H */ 133#endif /* _DCB_82599_CONFIG_H */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index bf566e8a455e..fec4c724c37a 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -37,7 +37,6 @@
37#define BIT_PG_RX 0x04 37#define BIT_PG_RX 0x04
38#define BIT_PG_TX 0x08 38#define BIT_PG_TX 0x08
39#define BIT_APP_UPCHG 0x10 39#define BIT_APP_UPCHG 0x10
40#define BIT_RESETLINK 0x40
41#define BIT_LINKSPEED 0x80 40#define BIT_LINKSPEED 0x80
42 41
43/* Responses for the DCB_C_SET_ALL command */ 42/* Responses for the DCB_C_SET_ALL command */
@@ -130,7 +129,6 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
130 netdev->netdev_ops->ndo_stop(netdev); 129 netdev->netdev_ops->ndo_stop(netdev);
131 ixgbe_clear_interrupt_scheme(adapter); 130 ixgbe_clear_interrupt_scheme(adapter);
132 131
133 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
134 switch (adapter->hw.mac.type) { 132 switch (adapter->hw.mac.type) {
135 case ixgbe_mac_82598EB: 133 case ixgbe_mac_82598EB:
136 adapter->last_lfc_mode = adapter->hw.fc.current_mode; 134 adapter->last_lfc_mode = adapter->hw.fc.current_mode;
@@ -146,6 +144,9 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
146 } 144 }
147 145
148 adapter->flags |= IXGBE_FLAG_DCB_ENABLED; 146 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
147 if (!netdev_get_num_tc(netdev))
148 ixgbe_setup_tc(netdev, MAX_TRAFFIC_CLASS);
149
149 ixgbe_init_interrupt_scheme(adapter); 150 ixgbe_init_interrupt_scheme(adapter);
150 if (netif_running(netdev)) 151 if (netif_running(netdev))
151 netdev->netdev_ops->ndo_open(netdev); 152 netdev->netdev_ops->ndo_open(netdev);
@@ -160,7 +161,6 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
160 adapter->temp_dcb_cfg.pfc_mode_enable = false; 161 adapter->temp_dcb_cfg.pfc_mode_enable = false;
161 adapter->dcb_cfg.pfc_mode_enable = false; 162 adapter->dcb_cfg.pfc_mode_enable = false;
162 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 163 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
163 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
164 switch (adapter->hw.mac.type) { 164 switch (adapter->hw.mac.type) {
165 case ixgbe_mac_82599EB: 165 case ixgbe_mac_82599EB:
166 case ixgbe_mac_X540: 166 case ixgbe_mac_X540:
@@ -170,6 +170,8 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
170 break; 170 break;
171 } 171 }
172 172
173 ixgbe_setup_tc(netdev, 0);
174
173 ixgbe_init_interrupt_scheme(adapter); 175 ixgbe_init_interrupt_scheme(adapter);
174 if (netif_running(netdev)) 176 if (netif_running(netdev))
175 netdev->netdev_ops->ndo_open(netdev); 177 netdev->netdev_ops->ndo_open(netdev);
@@ -225,10 +227,8 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
225 (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent != 227 (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent !=
226 adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) || 228 adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) ||
227 (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap != 229 (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
228 adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)) { 230 adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap))
229 adapter->dcb_set_bitmap |= BIT_PG_TX; 231 adapter->dcb_set_bitmap |= BIT_PG_TX;
230 adapter->dcb_set_bitmap |= BIT_RESETLINK;
231 }
232} 232}
233 233
234static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, 234static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
@@ -239,10 +239,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
239 adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct; 239 adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
240 240
241 if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] != 241 if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
242 adapter->dcb_cfg.bw_percentage[0][bwg_id]) { 242 adapter->dcb_cfg.bw_percentage[0][bwg_id])
243 adapter->dcb_set_bitmap |= BIT_PG_TX; 243 adapter->dcb_set_bitmap |= BIT_PG_TX;
244 adapter->dcb_set_bitmap |= BIT_RESETLINK;
245 }
246} 244}
247 245
248static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, 246static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
@@ -269,10 +267,8 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
269 (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent != 267 (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent !=
270 adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) || 268 adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) ||
271 (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap != 269 (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
272 adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)) { 270 adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap))
273 adapter->dcb_set_bitmap |= BIT_PG_RX; 271 adapter->dcb_set_bitmap |= BIT_PG_RX;
274 adapter->dcb_set_bitmap |= BIT_RESETLINK;
275 }
276} 272}
277 273
278static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, 274static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
@@ -283,10 +279,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
283 adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct; 279 adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
284 280
285 if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] != 281 if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] !=
286 adapter->dcb_cfg.bw_percentage[1][bwg_id]) { 282 adapter->dcb_cfg.bw_percentage[1][bwg_id])
287 adapter->dcb_set_bitmap |= BIT_PG_RX; 283 adapter->dcb_set_bitmap |= BIT_PG_RX;
288 adapter->dcb_set_bitmap |= BIT_RESETLINK;
289 }
290} 284}
291 285
292static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, 286static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
@@ -355,31 +349,28 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
355 struct ixgbe_adapter *adapter = netdev_priv(netdev); 349 struct ixgbe_adapter *adapter = netdev_priv(netdev);
356 int ret; 350 int ret;
357 351
358 if (!adapter->dcb_set_bitmap) 352 if (!adapter->dcb_set_bitmap ||
353 !(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
359 return DCB_NO_HW_CHG; 354 return DCB_NO_HW_CHG;
360 355
361 ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, 356 ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
362 adapter->ring_feature[RING_F_DCB].indices); 357 MAX_TRAFFIC_CLASS);
363 358
364 if (ret) 359 if (ret)
365 return DCB_NO_HW_CHG; 360 return DCB_NO_HW_CHG;
366 361
367 /* 362 /*
368 * Only take down the adapter if the configuration change 363 * Only take down the adapter if an app change occured. FCoE
369 * requires a reset. 364 * may shuffle tx rings in this case and this can not be done
365 * without a reset currently.
370 */ 366 */
371 if (adapter->dcb_set_bitmap & BIT_RESETLINK) { 367 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
372 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 368 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
373 msleep(1); 369 msleep(1);
374 370
375 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { 371 if (netif_running(netdev))
376 if (netif_running(netdev)) 372 netdev->netdev_ops->ndo_stop(netdev);
377 netdev->netdev_ops->ndo_stop(netdev); 373 ixgbe_clear_interrupt_scheme(adapter);
378 ixgbe_clear_interrupt_scheme(adapter);
379 } else {
380 if (netif_running(netdev))
381 ixgbe_down(adapter);
382 }
383 } 374 }
384 375
385 if (adapter->dcb_cfg.pfc_mode_enable) { 376 if (adapter->dcb_cfg.pfc_mode_enable) {
@@ -408,29 +399,53 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
408 } 399 }
409 } 400 }
410 401
411 if (adapter->dcb_set_bitmap & BIT_RESETLINK) { 402 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
412 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { 403 ixgbe_init_interrupt_scheme(adapter);
413 ixgbe_init_interrupt_scheme(adapter); 404 if (netif_running(netdev))
414 if (netif_running(netdev)) 405 netdev->netdev_ops->ndo_open(netdev);
415 netdev->netdev_ops->ndo_open(netdev);
416 } else {
417 if (netif_running(netdev))
418 ixgbe_up(adapter);
419 }
420 ret = DCB_HW_CHG_RST; 406 ret = DCB_HW_CHG_RST;
421 } else if (adapter->dcb_set_bitmap & BIT_PFC) { 407 }
422 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 408
423 ixgbe_dcb_config_pfc_82598(&adapter->hw, 409 if (adapter->dcb_set_bitmap & BIT_PFC) {
424 &adapter->dcb_cfg); 410 u8 pfc_en;
425 else if (adapter->hw.mac.type == ixgbe_mac_82599EB) 411 ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en);
426 ixgbe_dcb_config_pfc_82599(&adapter->hw, 412 ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en);
427 &adapter->dcb_cfg);
428 ret = DCB_HW_CHG; 413 ret = DCB_HW_CHG;
429 } 414 }
415
416 if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
417 u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
418 u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
419 /* Priority to TC mapping in CEE case default to 1:1 */
420 u8 prio_tc[MAX_TRAFFIC_CLASS] = {0, 1, 2, 3, 4, 5, 6, 7};
421 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
422
423#ifdef CONFIG_FCOE
424 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
425 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
426#endif
427
428 ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
429 max_frame, DCB_TX_CONFIG);
430 ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
431 max_frame, DCB_RX_CONFIG);
432
433 ixgbe_dcb_unpack_refill(&adapter->dcb_cfg,
434 DCB_TX_CONFIG, refill);
435 ixgbe_dcb_unpack_max(&adapter->dcb_cfg, max);
436 ixgbe_dcb_unpack_bwgid(&adapter->dcb_cfg,
437 DCB_TX_CONFIG, bwg_id);
438 ixgbe_dcb_unpack_prio(&adapter->dcb_cfg,
439 DCB_TX_CONFIG, prio_type);
440
441 ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
442 bwg_id, prio_type, prio_tc);
443 }
444
430 if (adapter->dcb_cfg.pfc_mode_enable) 445 if (adapter->dcb_cfg.pfc_mode_enable)
431 adapter->hw.fc.current_mode = ixgbe_fc_pfc; 446 adapter->hw.fc.current_mode = ixgbe_fc_pfc;
432 447
433 if (adapter->dcb_set_bitmap & BIT_RESETLINK) 448 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG)
434 clear_bit(__IXGBE_RESETTING, &adapter->state); 449 clear_bit(__IXGBE_RESETTING, &adapter->state);
435 adapter->dcb_set_bitmap = 0x00; 450 adapter->dcb_set_bitmap = 0x00;
436 return ret; 451 return ret;
@@ -439,40 +454,38 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
439static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) 454static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
440{ 455{
441 struct ixgbe_adapter *adapter = netdev_priv(netdev); 456 struct ixgbe_adapter *adapter = netdev_priv(netdev);
442 u8 rval = 0;
443 457
444 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 458 switch (capid) {
445 switch (capid) { 459 case DCB_CAP_ATTR_PG:
446 case DCB_CAP_ATTR_PG: 460 *cap = true;
447 *cap = true; 461 break;
448 break; 462 case DCB_CAP_ATTR_PFC:
449 case DCB_CAP_ATTR_PFC: 463 *cap = true;
450 *cap = true; 464 break;
451 break; 465 case DCB_CAP_ATTR_UP2TC:
452 case DCB_CAP_ATTR_UP2TC: 466 *cap = false;
453 *cap = false; 467 break;
454 break; 468 case DCB_CAP_ATTR_PG_TCS:
455 case DCB_CAP_ATTR_PG_TCS: 469 *cap = 0x80;
456 *cap = 0x80; 470 break;
457 break; 471 case DCB_CAP_ATTR_PFC_TCS:
458 case DCB_CAP_ATTR_PFC_TCS: 472 *cap = 0x80;
459 *cap = 0x80; 473 break;
460 break; 474 case DCB_CAP_ATTR_GSP:
461 case DCB_CAP_ATTR_GSP: 475 *cap = true;
462 *cap = true; 476 break;
463 break; 477 case DCB_CAP_ATTR_BCN:
464 case DCB_CAP_ATTR_BCN: 478 *cap = false;
465 *cap = false; 479 break;
466 break; 480 case DCB_CAP_ATTR_DCBX:
467 default: 481 *cap = adapter->dcbx_cap;
468 rval = -EINVAL; 482 break;
469 break; 483 default:
470 } 484 *cap = false;
471 } else { 485 break;
472 rval = -EINVAL;
473 } 486 }
474 487
475 return rval; 488 return 0;
476} 489}
477 490
478static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) 491static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
@@ -533,21 +546,16 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
533 */ 546 */
534static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) 547static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
535{ 548{
536 u8 rval = 0; 549 struct ixgbe_adapter *adapter = netdev_priv(netdev);
550 struct dcb_app app = {
551 .selector = idtype,
552 .protocol = id,
553 };
537 554
538 switch (idtype) { 555 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
539 case DCB_APP_IDTYPE_ETHTYPE: 556 return 0;
540#ifdef IXGBE_FCOE 557
541 if (id == ETH_P_FCOE) 558 return dcb_getapp(netdev, &app);
542 rval = ixgbe_fcoe_getapp(netdev_priv(netdev));
543#endif
544 break;
545 case DCB_APP_IDTYPE_PORTNUM:
546 break;
547 default:
548 break;
549 }
550 return rval;
551} 559}
552 560
553/** 561/**
@@ -562,24 +570,45 @@ static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
562static u8 ixgbe_dcbnl_setapp(struct net_device *netdev, 570static u8 ixgbe_dcbnl_setapp(struct net_device *netdev,
563 u8 idtype, u16 id, u8 up) 571 u8 idtype, u16 id, u8 up)
564{ 572{
573 struct ixgbe_adapter *adapter = netdev_priv(netdev);
565 u8 rval = 1; 574 u8 rval = 1;
575 struct dcb_app app = {
576 .selector = idtype,
577 .protocol = id,
578 .priority = up
579 };
580
581 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
582 return rval;
583
584 rval = dcb_setapp(netdev, &app);
566 585
567 switch (idtype) { 586 switch (idtype) {
568 case DCB_APP_IDTYPE_ETHTYPE: 587 case DCB_APP_IDTYPE_ETHTYPE:
569#ifdef IXGBE_FCOE 588#ifdef IXGBE_FCOE
570 if (id == ETH_P_FCOE) { 589 if (id == ETH_P_FCOE) {
571 u8 tc; 590 u8 old_tc;
572 struct ixgbe_adapter *adapter;
573 591
574 adapter = netdev_priv(netdev); 592 /* Get current programmed tc */
575 tc = adapter->fcoe.tc; 593 old_tc = adapter->fcoe.tc;
576 rval = ixgbe_fcoe_setapp(adapter, up); 594 rval = ixgbe_fcoe_setapp(adapter, up);
577 if ((!rval) && (tc != adapter->fcoe.tc) && 595
578 (adapter->flags & IXGBE_FLAG_DCB_ENABLED) && 596 if (rval ||
579 (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { 597 !(adapter->flags & IXGBE_FLAG_DCB_ENABLED) ||
598 !(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
599 break;
600
601 /* The FCoE application priority may be changed multiple
602 * times in quick sucession with switches that build up
603 * TLVs. To avoid creating uneeded device resets this
604 * checks the actual HW configuration and clears
605 * BIT_APP_UPCHG if a HW configuration change is not
606 * need
607 */
608 if (old_tc == adapter->fcoe.tc)
609 adapter->dcb_set_bitmap &= ~BIT_APP_UPCHG;
610 else
580 adapter->dcb_set_bitmap |= BIT_APP_UPCHG; 611 adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
581 adapter->dcb_set_bitmap |= BIT_RESETLINK;
582 }
583 } 612 }
584#endif 613#endif
585 break; 614 break;
@@ -591,7 +620,204 @@ static u8 ixgbe_dcbnl_setapp(struct net_device *netdev,
591 return rval; 620 return rval;
592} 621}
593 622
623static int ixgbe_dcbnl_ieee_getets(struct net_device *dev,
624 struct ieee_ets *ets)
625{
626 struct ixgbe_adapter *adapter = netdev_priv(dev);
627 struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets;
628
629 /* No IEEE PFC settings available */
630 if (!my_ets)
631 return -EINVAL;
632
633 ets->ets_cap = MAX_TRAFFIC_CLASS;
634 ets->cbs = my_ets->cbs;
635 memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
636 memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
637 memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
638 memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
639 return 0;
640}
641
642static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
643 struct ieee_ets *ets)
644{
645 struct ixgbe_adapter *adapter = netdev_priv(dev);
646 __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS];
647 __u8 prio_type[IEEE_8021QAZ_MAX_TCS];
648 int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
649 int i, err;
650 __u64 *p = (__u64 *) ets->prio_tc;
651 /* naively give each TC a bwg to map onto CEE hardware */
652 __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7};
653
654 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
655 return -EINVAL;
656
657 if (!adapter->ixgbe_ieee_ets) {
658 adapter->ixgbe_ieee_ets = kmalloc(sizeof(struct ieee_ets),
659 GFP_KERNEL);
660 if (!adapter->ixgbe_ieee_ets)
661 return -ENOMEM;
662 }
663
664 memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets));
665
666 /* Map TSA onto CEE prio type */
667 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
668 switch (ets->tc_tsa[i]) {
669 case IEEE_8021QAZ_TSA_STRICT:
670 prio_type[i] = 2;
671 break;
672 case IEEE_8021QAZ_TSA_ETS:
673 prio_type[i] = 0;
674 break;
675 default:
676 /* Hardware only supports priority strict or
677 * ETS transmission selection algorithms if
678 * we receive some other value from dcbnl
679 * throw an error
680 */
681 return -EINVAL;
682 }
683 }
684
685 if (*p)
686 ixgbe_dcbnl_set_state(dev, 1);
687 else
688 ixgbe_dcbnl_set_state(dev, 0);
689
690 ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame);
691 err = ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
692 bwg_id, prio_type, ets->prio_tc);
693 return err;
694}
695
696static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
697 struct ieee_pfc *pfc)
698{
699 struct ixgbe_adapter *adapter = netdev_priv(dev);
700 struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc;
701 int i;
702
703 /* No IEEE PFC settings available */
704 if (!my_pfc)
705 return -EINVAL;
706
707 pfc->pfc_cap = MAX_TRAFFIC_CLASS;
708 pfc->pfc_en = my_pfc->pfc_en;
709 pfc->mbc = my_pfc->mbc;
710 pfc->delay = my_pfc->delay;
711
712 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
713 pfc->requests[i] = adapter->stats.pxoffrxc[i];
714 pfc->indications[i] = adapter->stats.pxofftxc[i];
715 }
716
717 return 0;
718}
719
720static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
721 struct ieee_pfc *pfc)
722{
723 struct ixgbe_adapter *adapter = netdev_priv(dev);
724 int err;
725
726 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
727 return -EINVAL;
728
729 if (!adapter->ixgbe_ieee_pfc) {
730 adapter->ixgbe_ieee_pfc = kmalloc(sizeof(struct ieee_pfc),
731 GFP_KERNEL);
732 if (!adapter->ixgbe_ieee_pfc)
733 return -ENOMEM;
734 }
735
736 memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc));
737 err = ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en);
738 return err;
739}
740
741static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
742 struct dcb_app *app)
743{
744 struct ixgbe_adapter *adapter = netdev_priv(dev);
745
746 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
747 return -EINVAL;
748#ifdef IXGBE_FCOE
749 if (app->selector == 1 && app->protocol == ETH_P_FCOE) {
750 if (adapter->fcoe.tc == app->priority)
751 goto setapp;
752
753 /* In IEEE mode map up to tc 1:1 */
754 adapter->fcoe.tc = app->priority;
755 adapter->fcoe.up = app->priority;
756
757 /* Force hardware reset required to push FCoE
758 * setup on {tx|rx}_rings
759 */
760 adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
761 ixgbe_dcbnl_set_all(dev);
762 }
763
764setapp:
765#endif
766 dcb_setapp(dev, app);
767 return 0;
768}
769
770static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev)
771{
772 struct ixgbe_adapter *adapter = netdev_priv(dev);
773 return adapter->dcbx_cap;
774}
775
776static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
777{
778 struct ixgbe_adapter *adapter = netdev_priv(dev);
779 struct ieee_ets ets = {0};
780 struct ieee_pfc pfc = {0};
781
782 /* no support for LLD_MANAGED modes or CEE+IEEE */
783 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
784 ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) ||
785 !(mode & DCB_CAP_DCBX_HOST))
786 return 1;
787
788 if (mode == adapter->dcbx_cap)
789 return 0;
790
791 adapter->dcbx_cap = mode;
792
793 /* ETS and PFC defaults */
794 ets.ets_cap = 8;
795 pfc.pfc_cap = 8;
796
797 if (mode & DCB_CAP_DCBX_VER_IEEE) {
798 ixgbe_dcbnl_ieee_setets(dev, &ets);
799 ixgbe_dcbnl_ieee_setpfc(dev, &pfc);
800 } else if (mode & DCB_CAP_DCBX_VER_CEE) {
801 adapter->dcb_set_bitmap |= (BIT_PFC & BIT_PG_TX & BIT_PG_RX);
802 ixgbe_dcbnl_set_all(dev);
803 } else {
804 /* Drop into single TC mode strict priority as this
805 * indicates CEE and IEEE versions are disabled
806 */
807 ixgbe_dcbnl_ieee_setets(dev, &ets);
808 ixgbe_dcbnl_ieee_setpfc(dev, &pfc);
809 ixgbe_dcbnl_set_state(dev, 0);
810 }
811
812 return 0;
813}
814
594const struct dcbnl_rtnl_ops dcbnl_ops = { 815const struct dcbnl_rtnl_ops dcbnl_ops = {
816 .ieee_getets = ixgbe_dcbnl_ieee_getets,
817 .ieee_setets = ixgbe_dcbnl_ieee_setets,
818 .ieee_getpfc = ixgbe_dcbnl_ieee_getpfc,
819 .ieee_setpfc = ixgbe_dcbnl_ieee_setpfc,
820 .ieee_setapp = ixgbe_dcbnl_ieee_setapp,
595 .getstate = ixgbe_dcbnl_get_state, 821 .getstate = ixgbe_dcbnl_get_state,
596 .setstate = ixgbe_dcbnl_set_state, 822 .setstate = ixgbe_dcbnl_set_state,
597 .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr, 823 .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr,
@@ -613,5 +839,6 @@ const struct dcbnl_rtnl_ops dcbnl_ops = {
613 .setpfcstate = ixgbe_dcbnl_setpfcstate, 839 .setpfcstate = ixgbe_dcbnl_setpfcstate,
614 .getapp = ixgbe_dcbnl_getapp, 840 .getapp = ixgbe_dcbnl_getapp,
615 .setapp = ixgbe_dcbnl_setapp, 841 .setapp = ixgbe_dcbnl_setapp,
842 .getdcbx = ixgbe_dcbnl_getdcbx,
843 .setdcbx = ixgbe_dcbnl_setdcbx,
616}; 844};
617
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 2002ea88ca2a..76380a2b35aa 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -152,20 +152,35 @@ static int ixgbe_get_settings(struct net_device *netdev,
152 ecmd->supported |= (SUPPORTED_1000baseT_Full | 152 ecmd->supported |= (SUPPORTED_1000baseT_Full |
153 SUPPORTED_Autoneg); 153 SUPPORTED_Autoneg);
154 154
155 switch (hw->mac.type) {
156 case ixgbe_mac_X540:
157 ecmd->supported |= SUPPORTED_100baseT_Full;
158 break;
159 default:
160 break;
161 }
162
155 ecmd->advertising = ADVERTISED_Autoneg; 163 ecmd->advertising = ADVERTISED_Autoneg;
156 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) 164 if (hw->phy.autoneg_advertised) {
157 ecmd->advertising |= ADVERTISED_10000baseT_Full; 165 if (hw->phy.autoneg_advertised &
158 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) 166 IXGBE_LINK_SPEED_100_FULL)
159 ecmd->advertising |= ADVERTISED_1000baseT_Full; 167 ecmd->advertising |= ADVERTISED_100baseT_Full;
160 /* 168 if (hw->phy.autoneg_advertised &
161 * It's possible that phy.autoneg_advertised may not be 169 IXGBE_LINK_SPEED_10GB_FULL)
162 * set yet. If so display what the default would be - 170 ecmd->advertising |= ADVERTISED_10000baseT_Full;
163 * both 1G and 10G supported. 171 if (hw->phy.autoneg_advertised &
164 */ 172 IXGBE_LINK_SPEED_1GB_FULL)
165 if (!(ecmd->advertising & (ADVERTISED_1000baseT_Full | 173 ecmd->advertising |= ADVERTISED_1000baseT_Full;
166 ADVERTISED_10000baseT_Full))) 174 } else {
175 /*
176 * Default advertised modes in case
177 * phy.autoneg_advertised isn't set.
178 */
167 ecmd->advertising |= (ADVERTISED_10000baseT_Full | 179 ecmd->advertising |= (ADVERTISED_10000baseT_Full |
168 ADVERTISED_1000baseT_Full); 180 ADVERTISED_1000baseT_Full);
181 if (hw->mac.type == ixgbe_mac_X540)
182 ecmd->advertising |= ADVERTISED_100baseT_Full;
183 }
169 184
170 if (hw->phy.media_type == ixgbe_media_type_copper) { 185 if (hw->phy.media_type == ixgbe_media_type_copper) {
171 ecmd->supported |= SUPPORTED_TP; 186 ecmd->supported |= SUPPORTED_TP;
@@ -271,8 +286,19 @@ static int ixgbe_get_settings(struct net_device *netdev,
271 286
272 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 287 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
273 if (link_up) { 288 if (link_up) {
274 ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? 289 switch (link_speed) {
275 SPEED_10000 : SPEED_1000; 290 case IXGBE_LINK_SPEED_10GB_FULL:
291 ecmd->speed = SPEED_10000;
292 break;
293 case IXGBE_LINK_SPEED_1GB_FULL:
294 ecmd->speed = SPEED_1000;
295 break;
296 case IXGBE_LINK_SPEED_100_FULL:
297 ecmd->speed = SPEED_100;
298 break;
299 default:
300 break;
301 }
276 ecmd->duplex = DUPLEX_FULL; 302 ecmd->duplex = DUPLEX_FULL;
277 } else { 303 } else {
278 ecmd->speed = -1; 304 ecmd->speed = -1;
@@ -306,6 +332,9 @@ static int ixgbe_set_settings(struct net_device *netdev,
306 if (ecmd->advertising & ADVERTISED_1000baseT_Full) 332 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
307 advertised |= IXGBE_LINK_SPEED_1GB_FULL; 333 advertised |= IXGBE_LINK_SPEED_1GB_FULL;
308 334
335 if (ecmd->advertising & ADVERTISED_100baseT_Full)
336 advertised |= IXGBE_LINK_SPEED_100_FULL;
337
309 if (old == advertised) 338 if (old == advertised)
310 return err; 339 return err;
311 /* this sets the link speed and restarts auto-neg */ 340 /* this sets the link speed and restarts auto-neg */
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index c54a88274d51..dba7d77588ef 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -135,22 +135,19 @@ out_ddp_put:
135 return len; 135 return len;
136} 136}
137 137
138
138/** 139/**
139 * ixgbe_fcoe_ddp_get - called to set up ddp context 140 * ixgbe_fcoe_ddp_setup - called to set up ddp context
140 * @netdev: the corresponding net_device 141 * @netdev: the corresponding net_device
141 * @xid: the exchange id requesting ddp 142 * @xid: the exchange id requesting ddp
142 * @sgl: the scatter-gather list for this request 143 * @sgl: the scatter-gather list for this request
143 * @sgc: the number of scatter-gather items 144 * @sgc: the number of scatter-gather items
144 * 145 *
145 * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
146 * and is expected to be called from ULD, e.g., FCP layer of libfc
147 * to set up ddp for the corresponding xid of the given sglist for
148 * the corresponding I/O.
149 *
150 * Returns : 1 for success and 0 for no ddp 146 * Returns : 1 for success and 0 for no ddp
151 */ 147 */
152int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, 148static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
153 struct scatterlist *sgl, unsigned int sgc) 149 struct scatterlist *sgl, unsigned int sgc,
150 int target_mode)
154{ 151{
155 struct ixgbe_adapter *adapter; 152 struct ixgbe_adapter *adapter;
156 struct ixgbe_hw *hw; 153 struct ixgbe_hw *hw;
@@ -164,7 +161,7 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
164 unsigned int lastsize; 161 unsigned int lastsize;
165 unsigned int thisoff = 0; 162 unsigned int thisoff = 0;
166 unsigned int thislen = 0; 163 unsigned int thislen = 0;
167 u32 fcbuff, fcdmarw, fcfltrw; 164 u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
168 dma_addr_t addr = 0; 165 dma_addr_t addr = 0;
169 166
170 if (!netdev || !sgl) 167 if (!netdev || !sgl)
@@ -275,6 +272,9 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
275 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); 272 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
276 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); 273 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
277 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); 274 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
275 /* Set WRCONTX bit to allow DDP for target */
276 if (target_mode)
277 fcbuff |= (IXGBE_FCBUFF_WRCONTX);
278 fcbuff |= (IXGBE_FCBUFF_VALID); 278 fcbuff |= (IXGBE_FCBUFF_VALID);
279 279
280 fcdmarw = xid; 280 fcdmarw = xid;
@@ -287,6 +287,16 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
287 /* program DMA context */ 287 /* program DMA context */
288 hw = &adapter->hw; 288 hw = &adapter->hw;
289 spin_lock_bh(&fcoe->lock); 289 spin_lock_bh(&fcoe->lock);
290
291 /* turn on last frame indication for target mode as FCP_RSPtarget is
292 * supposed to send FCP_RSP when it is done. */
293 if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) {
294 set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode);
295 fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL);
296 fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH;
297 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl);
298 }
299
290 IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); 300 IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32));
291 IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); 301 IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32);
292 IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); 302 IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff);
@@ -295,6 +305,7 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
295 IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); 305 IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0);
296 IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); 306 IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID);
297 IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); 307 IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw);
308
298 spin_unlock_bh(&fcoe->lock); 309 spin_unlock_bh(&fcoe->lock);
299 310
300 return 1; 311 return 1;
@@ -309,6 +320,47 @@ out_noddp_unmap:
309} 320}
310 321
311/** 322/**
323 * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode
324 * @netdev: the corresponding net_device
325 * @xid: the exchange id requesting ddp
326 * @sgl: the scatter-gather list for this request
327 * @sgc: the number of scatter-gather items
328 *
329 * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
330 * and is expected to be called from ULD, e.g., FCP layer of libfc
331 * to set up ddp for the corresponding xid of the given sglist for
332 * the corresponding I/O.
333 *
334 * Returns : 1 for success and 0 for no ddp
335 */
336int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
337 struct scatterlist *sgl, unsigned int sgc)
338{
339 return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0);
340}
341
342/**
343 * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode
344 * @netdev: the corresponding net_device
345 * @xid: the exchange id requesting ddp
346 * @sgl: the scatter-gather list for this request
347 * @sgc: the number of scatter-gather items
348 *
349 * This is the implementation of net_device_ops.ndo_fcoe_ddp_target
350 * and is expected to be called from ULD, e.g., FCP layer of libfc
351 * to set up ddp for the corresponding xid of the given sglist for
352 * the corresponding I/O. The DDP in target mode is a write I/O request
353 * from the initiator.
354 *
355 * Returns : 1 for success and 0 for no ddp
356 */
357int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
358 struct scatterlist *sgl, unsigned int sgc)
359{
360 return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1);
361}
362
363/**
312 * ixgbe_fcoe_ddp - check ddp status and mark it done 364 * ixgbe_fcoe_ddp - check ddp status and mark it done
313 * @adapter: ixgbe adapter 365 * @adapter: ixgbe adapter
314 * @rx_desc: advanced rx descriptor 366 * @rx_desc: advanced rx descriptor
@@ -331,6 +383,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
331 struct ixgbe_fcoe *fcoe; 383 struct ixgbe_fcoe *fcoe;
332 struct ixgbe_fcoe_ddp *ddp; 384 struct ixgbe_fcoe_ddp *ddp;
333 struct fc_frame_header *fh; 385 struct fc_frame_header *fh;
386 struct fcoe_crc_eof *crc;
334 387
335 if (!ixgbe_rx_is_fcoe(rx_desc)) 388 if (!ixgbe_rx_is_fcoe(rx_desc))
336 goto ddp_out; 389 goto ddp_out;
@@ -384,7 +437,18 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
384 else if (ddp->len) 437 else if (ddp->len)
385 rc = ddp->len; 438 rc = ddp->len;
386 } 439 }
387 440 /* In target mode, check the last data frame of the sequence.
441 * For DDP in target mode, data is already DDPed but the header
442 * indication of the last data frame ould allow is to tell if we
443 * got all the data and the ULP can send FCP_RSP back, as this is
444 * not a full fcoe frame, we fill the trailer here so it won't be
445 * dropped by the ULP stack.
446 */
447 if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) &&
448 (fctl & FC_FC_END_SEQ)) {
449 crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
450 crc->fcoe_eof = FC_EOF_T;
451 }
388ddp_out: 452ddp_out:
389 return rc; 453 return rc;
390} 454}
@@ -749,21 +813,6 @@ out_disable:
749 813
750#ifdef CONFIG_IXGBE_DCB 814#ifdef CONFIG_IXGBE_DCB
751/** 815/**
752 * ixgbe_fcoe_getapp - retrieves current user priority bitmap for FCoE
753 * @adapter : ixgbe adapter
754 *
755 * Finds out the corresponding user priority bitmap from the current
756 * traffic class that FCoE belongs to. Returns 0 as the invalid user
757 * priority bitmap to indicate an error.
758 *
759 * Returns : 802.1p user priority bitmap for FCoE
760 */
761u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter)
762{
763 return 1 << adapter->fcoe.up;
764}
765
766/**
767 * ixgbe_fcoe_setapp - sets the user priority bitmap for FCoE 816 * ixgbe_fcoe_setapp - sets the user priority bitmap for FCoE
768 * @adapter : ixgbe adapter 817 * @adapter : ixgbe adapter
769 * @up : 802.1p user priority bitmap 818 * @up : 802.1p user priority bitmap
@@ -840,5 +889,3 @@ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
840 } 889 }
841 return rc; 890 return rc;
842} 891}
843
844
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
index 65cc8fb14fe7..5a650a4ace66 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -52,6 +52,9 @@
52/* fcerr */ 52/* fcerr */
53#define IXGBE_FCERR_BADCRC 0x00100000 53#define IXGBE_FCERR_BADCRC 0x00100000
54 54
55/* FCoE DDP for target mode */
56#define __IXGBE_FCOE_TARGET 1
57
55struct ixgbe_fcoe_ddp { 58struct ixgbe_fcoe_ddp {
56 int len; 59 int len;
57 u32 err; 60 u32 err;
@@ -66,6 +69,7 @@ struct ixgbe_fcoe {
66 u8 tc; 69 u8 tc;
67 u8 up; 70 u8 up;
68#endif 71#endif
72 unsigned long mode;
69 atomic_t refcnt; 73 atomic_t refcnt;
70 spinlock_t lock; 74 spinlock_t lock;
71 struct pci_pool *pool; 75 struct pci_pool *pool;
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 30f9ccfb4f87..f17e4a7ee731 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -54,7 +54,8 @@ static const char ixgbe_driver_string[] =
54 54
55#define DRV_VERSION "3.2.9-k2" 55#define DRV_VERSION "3.2.9-k2"
56const char ixgbe_driver_version[] = DRV_VERSION; 56const char ixgbe_driver_version[] = DRV_VERSION;
57static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; 57static const char ixgbe_copyright[] =
58 "Copyright (c) 1999-2011 Intel Corporation.";
58 59
59static const struct ixgbe_info *ixgbe_info_tbl[] = { 60static const struct ixgbe_info *ixgbe_info_tbl[] = {
60 [board_82598] = &ixgbe_82598_info, 61 [board_82598] = &ixgbe_82598_info,
@@ -648,10 +649,10 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
648 * 649 *
649 * Returns : a tc index for use in range 0-7, or 0-3 650 * Returns : a tc index for use in range 0-7, or 0-3
650 */ 651 */
651u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx) 652static u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
652{ 653{
653 int tc = -1; 654 int tc = -1;
654 int dcb_i = adapter->ring_feature[RING_F_DCB].indices; 655 int dcb_i = netdev_get_num_tc(adapter->netdev);
655 656
656 /* if DCB is not enabled the queues have no TC */ 657 /* if DCB is not enabled the queues have no TC */
657 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) 658 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
@@ -2597,6 +2598,11 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2597 2598
2598 i--; 2599 i--;
2599 for (; i >= 0; i--) { 2600 for (; i >= 0; i--) {
2601 /* free only the irqs that were actually requested */
2602 if (!adapter->q_vector[i]->rxr_count &&
2603 !adapter->q_vector[i]->txr_count)
2604 continue;
2605
2600 free_irq(adapter->msix_entries[i].vector, 2606 free_irq(adapter->msix_entries[i].vector,
2601 adapter->q_vector[i]); 2607 adapter->q_vector[i]);
2602 } 2608 }
@@ -2886,17 +2892,20 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2886 ); 2892 );
2887 2893
2888 switch (mask) { 2894 switch (mask) {
2895#ifdef CONFIG_IXGBE_DCB
2896 case (IXGBE_FLAG_DCB_ENABLED | IXGBE_FLAG_RSS_ENABLED):
2897 mrqc = IXGBE_MRQC_RTRSS8TCEN;
2898 break;
2899 case (IXGBE_FLAG_DCB_ENABLED):
2900 mrqc = IXGBE_MRQC_RT8TCEN;
2901 break;
2902#endif /* CONFIG_IXGBE_DCB */
2889 case (IXGBE_FLAG_RSS_ENABLED): 2903 case (IXGBE_FLAG_RSS_ENABLED):
2890 mrqc = IXGBE_MRQC_RSSEN; 2904 mrqc = IXGBE_MRQC_RSSEN;
2891 break; 2905 break;
2892 case (IXGBE_FLAG_SRIOV_ENABLED): 2906 case (IXGBE_FLAG_SRIOV_ENABLED):
2893 mrqc = IXGBE_MRQC_VMDQEN; 2907 mrqc = IXGBE_MRQC_VMDQEN;
2894 break; 2908 break;
2895#ifdef CONFIG_IXGBE_DCB
2896 case (IXGBE_FLAG_DCB_ENABLED):
2897 mrqc = IXGBE_MRQC_RT8TCEN;
2898 break;
2899#endif /* CONFIG_IXGBE_DCB */
2900 default: 2909 default:
2901 break; 2910 break;
2902 } 2911 }
@@ -3077,6 +3086,14 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
3077 ixgbe_configure_srrctl(adapter, ring); 3086 ixgbe_configure_srrctl(adapter, ring);
3078 ixgbe_configure_rscctl(adapter, ring); 3087 ixgbe_configure_rscctl(adapter, ring);
3079 3088
3089 /* If operating in IOV mode set RLPML for X540 */
3090 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
3091 hw->mac.type == ixgbe_mac_X540) {
3092 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
3093 rxdctl |= ((ring->netdev->mtu + ETH_HLEN +
3094 ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN);
3095 }
3096
3080 if (hw->mac.type == ixgbe_mac_82598EB) { 3097 if (hw->mac.type == ixgbe_mac_82598EB) {
3081 /* 3098 /*
3082 * enable cache line friendly hardware writes: 3099 * enable cache line friendly hardware writes:
@@ -3641,15 +3658,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3641 if (hw->mac.type == ixgbe_mac_82598EB) 3658 if (hw->mac.type == ixgbe_mac_82598EB)
3642 netif_set_gso_max_size(adapter->netdev, 32768); 3659 netif_set_gso_max_size(adapter->netdev, 32768);
3643 3660
3644#ifdef CONFIG_FCOE
3645 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
3646 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
3647#endif
3648
3649 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3650 DCB_TX_CONFIG);
3651 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3652 DCB_RX_CONFIG);
3653 3661
3654 /* Enable VLAN tag insert/strip */ 3662 /* Enable VLAN tag insert/strip */
3655 adapter->netdev->features |= NETIF_F_HW_VLAN_RX; 3663 adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
@@ -3657,7 +3665,43 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3657 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); 3665 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
3658 3666
3659 /* reconfigure the hardware */ 3667 /* reconfigure the hardware */
3660 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg); 3668 if (adapter->dcbx_cap & (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE)) {
3669#ifdef CONFIG_FCOE
3670 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
3671 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
3672#endif
3673 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3674 DCB_TX_CONFIG);
3675 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3676 DCB_RX_CONFIG);
3677 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
3678 } else {
3679 struct net_device *dev = adapter->netdev;
3680
3681 if (adapter->ixgbe_ieee_ets)
3682 dev->dcbnl_ops->ieee_setets(dev,
3683 adapter->ixgbe_ieee_ets);
3684 if (adapter->ixgbe_ieee_pfc)
3685 dev->dcbnl_ops->ieee_setpfc(dev,
3686 adapter->ixgbe_ieee_pfc);
3687 }
3688
3689 /* Enable RSS Hash per TC */
3690 if (hw->mac.type != ixgbe_mac_82598EB) {
3691 int i;
3692 u32 reg = 0;
3693
3694 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
3695 u8 msb = 0;
3696 u8 cnt = adapter->netdev->tc_to_txq[i].count;
3697
3698 while (cnt >>= 1)
3699 msb++;
3700
3701 reg |= msb << IXGBE_RQTC_SHIFT_TC(i);
3702 }
3703 IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg);
3704 }
3661} 3705}
3662 3706
3663#endif 3707#endif
@@ -3761,7 +3805,8 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
3761 if (ret) 3805 if (ret)
3762 goto link_cfg_out; 3806 goto link_cfg_out;
3763 3807
3764 if (hw->mac.ops.get_link_capabilities) 3808 autoneg = hw->phy.autoneg_advertised;
3809 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3765 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, 3810 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
3766 &negotiation); 3811 &negotiation);
3767 if (ret) 3812 if (ret)
@@ -3876,7 +3921,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3876 * If we're not hot-pluggable SFP+, we just need to configure link 3921 * If we're not hot-pluggable SFP+, we just need to configure link
3877 * and bring it up. 3922 * and bring it up.
3878 */ 3923 */
3879 if (hw->phy.type == ixgbe_phy_unknown) 3924 if (hw->phy.type == ixgbe_phy_none)
3880 schedule_work(&adapter->sfp_config_module_task); 3925 schedule_work(&adapter->sfp_config_module_task);
3881 3926
3882 /* enable transmits */ 3927 /* enable transmits */
@@ -4243,24 +4288,6 @@ static void ixgbe_reset_task(struct work_struct *work)
4243 ixgbe_reinit_locked(adapter); 4288 ixgbe_reinit_locked(adapter);
4244} 4289}
4245 4290
4246#ifdef CONFIG_IXGBE_DCB
4247static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
4248{
4249 bool ret = false;
4250 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
4251
4252 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
4253 return ret;
4254
4255 f->mask = 0x7 << 3;
4256 adapter->num_rx_queues = f->indices;
4257 adapter->num_tx_queues = f->indices;
4258 ret = true;
4259
4260 return ret;
4261}
4262#endif
4263
4264/** 4291/**
4265 * ixgbe_set_rss_queues: Allocate queues for RSS 4292 * ixgbe_set_rss_queues: Allocate queues for RSS
4266 * @adapter: board private structure to initialize 4293 * @adapter: board private structure to initialize
@@ -4331,19 +4358,26 @@ static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
4331 **/ 4358 **/
4332static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) 4359static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
4333{ 4360{
4334 bool ret = false;
4335 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; 4361 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
4336 4362
4337 f->indices = min((int)num_online_cpus(), f->indices); 4363 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
4338 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 4364 return false;
4339 adapter->num_rx_queues = 1; 4365
4340 adapter->num_tx_queues = 1; 4366 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
4341#ifdef CONFIG_IXGBE_DCB 4367#ifdef CONFIG_IXGBE_DCB
4342 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 4368 int tc;
4343 e_info(probe, "FCoE enabled with DCB\n"); 4369 struct net_device *dev = adapter->netdev;
4344 ixgbe_set_dcb_queues(adapter); 4370
4345 } 4371 tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
4372 f->indices = dev->tc_to_txq[tc].count;
4373 f->mask = dev->tc_to_txq[tc].offset;
4346#endif 4374#endif
4375 } else {
4376 f->indices = min((int)num_online_cpus(), f->indices);
4377
4378 adapter->num_rx_queues = 1;
4379 adapter->num_tx_queues = 1;
4380
4347 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 4381 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
4348 e_info(probe, "FCoE enabled with RSS\n"); 4382 e_info(probe, "FCoE enabled with RSS\n");
4349 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || 4383 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
@@ -4356,14 +4390,45 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
4356 f->mask = adapter->num_rx_queues; 4390 f->mask = adapter->num_rx_queues;
4357 adapter->num_rx_queues += f->indices; 4391 adapter->num_rx_queues += f->indices;
4358 adapter->num_tx_queues += f->indices; 4392 adapter->num_tx_queues += f->indices;
4393 }
4359 4394
4360 ret = true; 4395 return true;
4396}
4397#endif /* IXGBE_FCOE */
4398
4399#ifdef CONFIG_IXGBE_DCB
4400static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
4401{
4402 bool ret = false;
4403 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
4404 int i, q;
4405
4406 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
4407 return ret;
4408
4409 f->indices = 0;
4410 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
4411 q = min((int)num_online_cpus(), MAX_TRAFFIC_CLASS);
4412 f->indices += q;
4361 } 4413 }
4362 4414
4415 f->mask = 0x7 << 3;
4416 adapter->num_rx_queues = f->indices;
4417 adapter->num_tx_queues = f->indices;
4418 ret = true;
4419
4420#ifdef IXGBE_FCOE
4421 /* FCoE enabled queues require special configuration done through
4422 * configure_fcoe() and others. Here we map FCoE indices onto the
4423 * DCB queue pairs allowing FCoE to own configuration later.
4424 */
4425 ixgbe_set_fcoe_queues(adapter);
4426#endif
4427
4363 return ret; 4428 return ret;
4364} 4429}
4430#endif
4365 4431
4366#endif /* IXGBE_FCOE */
4367/** 4432/**
4368 * ixgbe_set_sriov_queues: Allocate queues for IOV use 4433 * ixgbe_set_sriov_queues: Allocate queues for IOV use
4369 * @adapter: board private structure to initialize 4434 * @adapter: board private structure to initialize
@@ -4399,16 +4464,16 @@ static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
4399 if (ixgbe_set_sriov_queues(adapter)) 4464 if (ixgbe_set_sriov_queues(adapter))
4400 goto done; 4465 goto done;
4401 4466
4402#ifdef IXGBE_FCOE
4403 if (ixgbe_set_fcoe_queues(adapter))
4404 goto done;
4405
4406#endif /* IXGBE_FCOE */
4407#ifdef CONFIG_IXGBE_DCB 4467#ifdef CONFIG_IXGBE_DCB
4408 if (ixgbe_set_dcb_queues(adapter)) 4468 if (ixgbe_set_dcb_queues(adapter))
4409 goto done; 4469 goto done;
4410 4470
4411#endif 4471#endif
4472#ifdef IXGBE_FCOE
4473 if (ixgbe_set_fcoe_queues(adapter))
4474 goto done;
4475
4476#endif /* IXGBE_FCOE */
4412 if (ixgbe_set_fdir_queues(adapter)) 4477 if (ixgbe_set_fdir_queues(adapter))
4413 goto done; 4478 goto done;
4414 4479
@@ -4500,6 +4565,110 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
4500} 4565}
4501 4566
4502#ifdef CONFIG_IXGBE_DCB 4567#ifdef CONFIG_IXGBE_DCB
4568
4569/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
4570void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
4571 unsigned int *tx, unsigned int *rx)
4572{
4573 struct net_device *dev = adapter->netdev;
4574 struct ixgbe_hw *hw = &adapter->hw;
4575 u8 num_tcs = netdev_get_num_tc(dev);
4576
4577 *tx = 0;
4578 *rx = 0;
4579
4580 switch (hw->mac.type) {
4581 case ixgbe_mac_82598EB:
4582 *tx = tc << 3;
4583 *rx = tc << 2;
4584 break;
4585 case ixgbe_mac_82599EB:
4586 case ixgbe_mac_X540:
4587 if (num_tcs == 8) {
4588 if (tc < 3) {
4589 *tx = tc << 5;
4590 *rx = tc << 4;
4591 } else if (tc < 5) {
4592 *tx = ((tc + 2) << 4);
4593 *rx = tc << 4;
4594 } else if (tc < num_tcs) {
4595 *tx = ((tc + 8) << 3);
4596 *rx = tc << 4;
4597 }
4598 } else if (num_tcs == 4) {
4599 *rx = tc << 5;
4600 switch (tc) {
4601 case 0:
4602 *tx = 0;
4603 break;
4604 case 1:
4605 *tx = 64;
4606 break;
4607 case 2:
4608 *tx = 96;
4609 break;
4610 case 3:
4611 *tx = 112;
4612 break;
4613 default:
4614 break;
4615 }
4616 }
4617 break;
4618 default:
4619 break;
4620 }
4621}
4622
4623#define IXGBE_MAX_Q_PER_TC (IXGBE_MAX_DCB_INDICES / MAX_TRAFFIC_CLASS)
4624
4625/* ixgbe_setup_tc - routine to configure net_device for multiple traffic
4626 * classes.
4627 *
4628 * @netdev: net device to configure
4629 * @tc: number of traffic classes to enable
4630 */
4631int ixgbe_setup_tc(struct net_device *dev, u8 tc)
4632{
4633 int i;
4634 unsigned int q, offset = 0;
4635
4636 if (!tc) {
4637 netdev_reset_tc(dev);
4638 } else {
4639 struct ixgbe_adapter *adapter = netdev_priv(dev);
4640
4641 /* Hardware supports up to 8 traffic classes */
4642 if (tc > MAX_TRAFFIC_CLASS || netdev_set_num_tc(dev, tc))
4643 return -EINVAL;
4644
4645 /* Partition Tx queues evenly amongst traffic classes */
4646 for (i = 0; i < tc; i++) {
4647 q = min((int)num_online_cpus(), IXGBE_MAX_Q_PER_TC);
4648 netdev_set_prio_tc_map(dev, i, i);
4649 netdev_set_tc_queue(dev, i, q, offset);
4650 offset += q;
4651 }
4652
4653 /* This enables multiple traffic class support in the hardware
4654 * which defaults to strict priority transmission by default.
4655 * If traffic classes are already enabled perhaps through DCB
4656 * code path then existing configuration will be used.
4657 */
4658 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
4659 dev->dcbnl_ops && dev->dcbnl_ops->setdcbx) {
4660 struct ieee_ets ets = {
4661 .prio_tc = {0, 1, 2, 3, 4, 5, 6, 7},
4662 };
4663 u8 mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
4664
4665 dev->dcbnl_ops->setdcbx(dev, mode);
4666 dev->dcbnl_ops->ieee_setets(dev, &ets);
4667 }
4668 }
4669 return 0;
4670}
4671
4503/** 4672/**
4504 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB 4673 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
4505 * @adapter: board private structure to initialize 4674 * @adapter: board private structure to initialize
@@ -4509,72 +4678,27 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
4509 **/ 4678 **/
4510static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) 4679static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
4511{ 4680{
4512 int i; 4681 struct net_device *dev = adapter->netdev;
4513 bool ret = false; 4682 int i, j, k;
4514 int dcb_i = adapter->ring_feature[RING_F_DCB].indices; 4683 u8 num_tcs = netdev_get_num_tc(dev);
4515 4684
4516 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) 4685 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
4517 return false; 4686 return false;
4518 4687
4519 /* the number of queues is assumed to be symmetric */ 4688 for (i = 0, k = 0; i < num_tcs; i++) {
4520 switch (adapter->hw.mac.type) { 4689 unsigned int tx_s, rx_s;
4521 case ixgbe_mac_82598EB: 4690 u16 count = dev->tc_to_txq[i].count;
4522 for (i = 0; i < dcb_i; i++) { 4691
4523 adapter->rx_ring[i]->reg_idx = i << 3; 4692 ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s);
4524 adapter->tx_ring[i]->reg_idx = i << 2; 4693 for (j = 0; j < count; j++, k++) {
4525 } 4694 adapter->tx_ring[k]->reg_idx = tx_s + j;
4526 ret = true; 4695 adapter->rx_ring[k]->reg_idx = rx_s + j;
4527 break; 4696 adapter->tx_ring[k]->dcb_tc = i;
4528 case ixgbe_mac_82599EB: 4697 adapter->rx_ring[k]->dcb_tc = i;
4529 case ixgbe_mac_X540:
4530 if (dcb_i == 8) {
4531 /*
4532 * Tx TC0 starts at: descriptor queue 0
4533 * Tx TC1 starts at: descriptor queue 32
4534 * Tx TC2 starts at: descriptor queue 64
4535 * Tx TC3 starts at: descriptor queue 80
4536 * Tx TC4 starts at: descriptor queue 96
4537 * Tx TC5 starts at: descriptor queue 104
4538 * Tx TC6 starts at: descriptor queue 112
4539 * Tx TC7 starts at: descriptor queue 120
4540 *
4541 * Rx TC0-TC7 are offset by 16 queues each
4542 */
4543 for (i = 0; i < 3; i++) {
4544 adapter->tx_ring[i]->reg_idx = i << 5;
4545 adapter->rx_ring[i]->reg_idx = i << 4;
4546 }
4547 for ( ; i < 5; i++) {
4548 adapter->tx_ring[i]->reg_idx = ((i + 2) << 4);
4549 adapter->rx_ring[i]->reg_idx = i << 4;
4550 }
4551 for ( ; i < dcb_i; i++) {
4552 adapter->tx_ring[i]->reg_idx = ((i + 8) << 3);
4553 adapter->rx_ring[i]->reg_idx = i << 4;
4554 }
4555 ret = true;
4556 } else if (dcb_i == 4) {
4557 /*
4558 * Tx TC0 starts at: descriptor queue 0
4559 * Tx TC1 starts at: descriptor queue 64
4560 * Tx TC2 starts at: descriptor queue 96
4561 * Tx TC3 starts at: descriptor queue 112
4562 *
4563 * Rx TC0-TC3 are offset by 32 queues each
4564 */
4565 adapter->tx_ring[0]->reg_idx = 0;
4566 adapter->tx_ring[1]->reg_idx = 64;
4567 adapter->tx_ring[2]->reg_idx = 96;
4568 adapter->tx_ring[3]->reg_idx = 112;
4569 for (i = 0 ; i < dcb_i; i++)
4570 adapter->rx_ring[i]->reg_idx = i << 5;
4571 ret = true;
4572 } 4698 }
4573 break;
4574 default:
4575 break;
4576 } 4699 }
4577 return ret; 4700
4701 return true;
4578} 4702}
4579#endif 4703#endif
4580 4704
@@ -4620,33 +4744,6 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
4620 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 4744 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
4621 return false; 4745 return false;
4622 4746
4623#ifdef CONFIG_IXGBE_DCB
4624 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
4625 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
4626
4627 ixgbe_cache_ring_dcb(adapter);
4628 /* find out queues in TC for FCoE */
4629 fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
4630 fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
4631 /*
4632 * In 82599, the number of Tx queues for each traffic
4633 * class for both 8-TC and 4-TC modes are:
4634 * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
4635 * 8 TCs: 32 32 16 16 8 8 8 8
4636 * 4 TCs: 64 64 32 32
4637 * We have max 8 queues for FCoE, where 8 the is
4638 * FCoE redirection table size. If TC for FCoE is
4639 * less than or equal to TC3, we have enough queues
4640 * to add max of 8 queues for FCoE, so we start FCoE
4641 * Tx queue from the next one, i.e., reg_idx + 1.
4642 * If TC for FCoE is above TC3, implying 8 TC mode,
4643 * and we need 8 for FCoE, we have to take all queues
4644 * in that traffic class for FCoE.
4645 */
4646 if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
4647 fcoe_tx_i--;
4648 }
4649#endif /* CONFIG_IXGBE_DCB */
4650 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 4747 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
4651 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || 4748 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
4652 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 4749 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
@@ -4703,16 +4800,16 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
4703 if (ixgbe_cache_ring_sriov(adapter)) 4800 if (ixgbe_cache_ring_sriov(adapter))
4704 return; 4801 return;
4705 4802
4803#ifdef CONFIG_IXGBE_DCB
4804 if (ixgbe_cache_ring_dcb(adapter))
4805 return;
4806#endif
4807
4706#ifdef IXGBE_FCOE 4808#ifdef IXGBE_FCOE
4707 if (ixgbe_cache_ring_fcoe(adapter)) 4809 if (ixgbe_cache_ring_fcoe(adapter))
4708 return; 4810 return;
4709
4710#endif /* IXGBE_FCOE */ 4811#endif /* IXGBE_FCOE */
4711#ifdef CONFIG_IXGBE_DCB
4712 if (ixgbe_cache_ring_dcb(adapter))
4713 return;
4714 4812
4715#endif
4716 if (ixgbe_cache_ring_fdir(adapter)) 4813 if (ixgbe_cache_ring_fdir(adapter))
4717 return; 4814 return;
4718 4815
@@ -5174,10 +5271,10 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
5174 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; 5271 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
5175 adapter->dcb_cfg.rx_pba_cfg = pba_equal; 5272 adapter->dcb_cfg.rx_pba_cfg = pba_equal;
5176 adapter->dcb_cfg.pfc_mode_enable = false; 5273 adapter->dcb_cfg.pfc_mode_enable = false;
5177 adapter->dcb_cfg.round_robin_enable = false;
5178 adapter->dcb_set_bitmap = 0x00; 5274 adapter->dcb_set_bitmap = 0x00;
5275 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
5179 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, 5276 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
5180 adapter->ring_feature[RING_F_DCB].indices); 5277 MAX_TRAFFIC_CLASS);
5181 5278
5182#endif 5279#endif
5183 5280
@@ -5442,8 +5539,14 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5442 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 5539 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5443 5540
5444 /* MTU < 68 is an error and causes problems on some kernels */ 5541 /* MTU < 68 is an error and causes problems on some kernels */
5445 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) 5542 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED &&
5446 return -EINVAL; 5543 hw->mac.type != ixgbe_mac_X540) {
5544 if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
5545 return -EINVAL;
5546 } else {
5547 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
5548 return -EINVAL;
5549 }
5447 5550
5448 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); 5551 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5449 /* must set new MTU before calling down or up */ 5552 /* must set new MTU before calling down or up */
@@ -5611,6 +5714,10 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5611 } 5714 }
5612 5715
5613 ixgbe_clear_interrupt_scheme(adapter); 5716 ixgbe_clear_interrupt_scheme(adapter);
5717#ifdef CONFIG_DCB
5718 kfree(adapter->ixgbe_ieee_pfc);
5719 kfree(adapter->ixgbe_ieee_ets);
5720#endif
5614 5721
5615#ifdef CONFIG_PM 5722#ifdef CONFIG_PM
5616 retval = pci_save_state(pdev); 5723 retval = pci_save_state(pdev);
@@ -6101,12 +6208,16 @@ static void ixgbe_watchdog_task(struct work_struct *work)
6101 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? 6208 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
6102 "10 Gbps" : 6209 "10 Gbps" :
6103 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? 6210 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
6104 "1 Gbps" : "unknown speed")), 6211 "1 Gbps" :
6212 (link_speed == IXGBE_LINK_SPEED_100_FULL ?
6213 "100 Mbps" :
6214 "unknown speed"))),
6105 ((flow_rx && flow_tx) ? "RX/TX" : 6215 ((flow_rx && flow_tx) ? "RX/TX" :
6106 (flow_rx ? "RX" : 6216 (flow_rx ? "RX" :
6107 (flow_tx ? "TX" : "None")))); 6217 (flow_tx ? "TX" : "None"))));
6108 6218
6109 netif_carrier_on(netdev); 6219 netif_carrier_on(netdev);
6220 ixgbe_check_vf_rate_limit(adapter);
6110 } else { 6221 } else {
6111 /* Force detection of hung controller */ 6222 /* Force detection of hung controller */
6112 for (i = 0; i < adapter->num_tx_queues; i++) { 6223 for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -6636,18 +6747,12 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6636 6747
6637 protocol = vlan_get_protocol(skb); 6748 protocol = vlan_get_protocol(skb);
6638 6749
6639 if ((protocol == htons(ETH_P_FCOE)) || 6750 if (((protocol == htons(ETH_P_FCOE)) ||
6640 (protocol == htons(ETH_P_FIP))) { 6751 (protocol == htons(ETH_P_FIP))) &&
6641 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 6752 (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
6642 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); 6753 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
6643 txq += adapter->ring_feature[RING_F_FCOE].mask; 6754 txq += adapter->ring_feature[RING_F_FCOE].mask;
6644 return txq; 6755 return txq;
6645#ifdef CONFIG_IXGBE_DCB
6646 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6647 txq = adapter->fcoe.up;
6648 return txq;
6649#endif
6650 }
6651 } 6756 }
6652#endif 6757#endif
6653 6758
@@ -6657,15 +6762,6 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6657 return txq; 6762 return txq;
6658 } 6763 }
6659 6764
6660 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6661 if (skb->priority == TC_PRIO_CONTROL)
6662 txq = adapter->ring_feature[RING_F_DCB].indices-1;
6663 else
6664 txq = (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK)
6665 >> 13;
6666 return txq;
6667 }
6668
6669 return skb_tx_hash(dev, skb); 6765 return skb_tx_hash(dev, skb);
6670} 6766}
6671 6767
@@ -6687,13 +6783,13 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6687 tx_flags |= vlan_tx_tag_get(skb); 6783 tx_flags |= vlan_tx_tag_get(skb);
6688 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 6784 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6689 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; 6785 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
6690 tx_flags |= ((skb->queue_mapping & 0x7) << 13); 6786 tx_flags |= tx_ring->dcb_tc << 13;
6691 } 6787 }
6692 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 6788 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
6693 tx_flags |= IXGBE_TX_FLAGS_VLAN; 6789 tx_flags |= IXGBE_TX_FLAGS_VLAN;
6694 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED && 6790 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED &&
6695 skb->priority != TC_PRIO_CONTROL) { 6791 skb->priority != TC_PRIO_CONTROL) {
6696 tx_flags |= ((skb->queue_mapping & 0x7) << 13); 6792 tx_flags |= tx_ring->dcb_tc << 13;
6697 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 6793 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
6698 tx_flags |= IXGBE_TX_FLAGS_VLAN; 6794 tx_flags |= IXGBE_TX_FLAGS_VLAN;
6699 } 6795 }
@@ -6702,20 +6798,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6702 /* for FCoE with DCB, we force the priority to what 6798 /* for FCoE with DCB, we force the priority to what
6703 * was specified by the switch */ 6799 * was specified by the switch */
6704 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && 6800 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
6705 (protocol == htons(ETH_P_FCOE) || 6801 (protocol == htons(ETH_P_FCOE)))
6706 protocol == htons(ETH_P_FIP))) { 6802 tx_flags |= IXGBE_TX_FLAGS_FCOE;
6707#ifdef CONFIG_IXGBE_DCB
6708 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6709 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
6710 << IXGBE_TX_FLAGS_VLAN_SHIFT);
6711 tx_flags |= ((adapter->fcoe.up << 13)
6712 << IXGBE_TX_FLAGS_VLAN_SHIFT);
6713 }
6714#endif
6715 /* flag for FCoE offloads */
6716 if (protocol == htons(ETH_P_FCOE))
6717 tx_flags |= IXGBE_TX_FLAGS_FCOE;
6718 }
6719#endif 6803#endif
6720 6804
6721 /* four things can cause us to need a context descriptor */ 6805 /* four things can cause us to need a context descriptor */
@@ -6988,11 +7072,15 @@ static const struct net_device_ops ixgbe_netdev_ops = {
6988 .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, 7072 .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
6989 .ndo_get_vf_config = ixgbe_ndo_get_vf_config, 7073 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
6990 .ndo_get_stats64 = ixgbe_get_stats64, 7074 .ndo_get_stats64 = ixgbe_get_stats64,
7075#ifdef CONFIG_IXGBE_DCB
7076 .ndo_setup_tc = ixgbe_setup_tc,
7077#endif
6991#ifdef CONFIG_NET_POLL_CONTROLLER 7078#ifdef CONFIG_NET_POLL_CONTROLLER
6992 .ndo_poll_controller = ixgbe_netpoll, 7079 .ndo_poll_controller = ixgbe_netpoll,
6993#endif 7080#endif
6994#ifdef IXGBE_FCOE 7081#ifdef IXGBE_FCOE
6995 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, 7082 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
7083 .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
6996 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, 7084 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
6997 .ndo_fcoe_enable = ixgbe_fcoe_enable, 7085 .ndo_fcoe_enable = ixgbe_fcoe_enable,
6998 .ndo_fcoe_disable = ixgbe_fcoe_disable, 7086 .ndo_fcoe_disable = ixgbe_fcoe_disable,
@@ -7128,8 +7216,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7128 else 7216 else
7129 indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); 7217 indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
7130 7218
7219#if defined(CONFIG_DCB)
7131 indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES); 7220 indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES);
7132#ifdef IXGBE_FCOE 7221#elif defined(IXGBE_FCOE)
7133 indices += min_t(unsigned int, num_possible_cpus(), 7222 indices += min_t(unsigned int, num_possible_cpus(),
7134 IXGBE_MAX_FCOE_INDICES); 7223 IXGBE_MAX_FCOE_INDICES);
7135#endif 7224#endif
@@ -7285,8 +7374,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7285 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 7374 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7286 adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED | 7375 adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
7287 IXGBE_FLAG_DCB_ENABLED); 7376 IXGBE_FLAG_DCB_ENABLED);
7288 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
7289 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
7290 7377
7291#ifdef CONFIG_IXGBE_DCB 7378#ifdef CONFIG_IXGBE_DCB
7292 netdev->dcbnl_ops = &dcbnl_ops; 7379 netdev->dcbnl_ops = &dcbnl_ops;
@@ -7706,16 +7793,6 @@ static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
7706 7793
7707#endif /* CONFIG_IXGBE_DCA */ 7794#endif /* CONFIG_IXGBE_DCA */
7708 7795
7709/**
7710 * ixgbe_get_hw_dev return device
7711 * used by hardware layer to print debugging information
7712 **/
7713struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw)
7714{
7715 struct ixgbe_adapter *adapter = hw->back;
7716 return adapter->netdev;
7717}
7718
7719module_exit(ixgbe_exit_module); 7796module_exit(ixgbe_exit_module);
7720 7797
7721/* ixgbe_main.c */ 7798/* ixgbe_main.c */
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c
index ea82c5a1cd3e..1ff0eefcfd0a 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ixgbe/ixgbe_mbx.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -154,9 +154,6 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
154 udelay(mbx->usec_delay); 154 udelay(mbx->usec_delay);
155 } 155 }
156 156
157 /* if we failed, all future posted messages fail until reset */
158 if (!countdown)
159 mbx->timeout = 0;
160out: 157out:
161 return countdown ? 0 : IXGBE_ERR_MBX; 158 return countdown ? 0 : IXGBE_ERR_MBX;
162} 159}
@@ -183,9 +180,6 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
183 udelay(mbx->usec_delay); 180 udelay(mbx->usec_delay);
184 } 181 }
185 182
186 /* if we failed, all future posted messages fail until reset */
187 if (!countdown)
188 mbx->timeout = 0;
189out: 183out:
190 return countdown ? 0 : IXGBE_ERR_MBX; 184 return countdown ? 0 : IXGBE_ERR_MBX;
191} 185}
@@ -437,6 +431,7 @@ out_no_read:
437 return ret_val; 431 return ret_val;
438} 432}
439 433
434#ifdef CONFIG_PCI_IOV
440/** 435/**
441 * ixgbe_init_mbx_params_pf - set initial values for pf mailbox 436 * ixgbe_init_mbx_params_pf - set initial values for pf mailbox
442 * @hw: pointer to the HW structure 437 * @hw: pointer to the HW structure
@@ -447,24 +442,22 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
447{ 442{
448 struct ixgbe_mbx_info *mbx = &hw->mbx; 443 struct ixgbe_mbx_info *mbx = &hw->mbx;
449 444
450 switch (hw->mac.type) { 445 if (hw->mac.type != ixgbe_mac_82599EB &&
451 case ixgbe_mac_82599EB: 446 hw->mac.type != ixgbe_mac_X540)
452 case ixgbe_mac_X540: 447 return;
453 mbx->timeout = 0;
454 mbx->usec_delay = 0;
455 448
456 mbx->size = IXGBE_VFMAILBOX_SIZE; 449 mbx->timeout = 0;
450 mbx->usec_delay = 0;
457 451
458 mbx->stats.msgs_tx = 0; 452 mbx->stats.msgs_tx = 0;
459 mbx->stats.msgs_rx = 0; 453 mbx->stats.msgs_rx = 0;
460 mbx->stats.reqs = 0; 454 mbx->stats.reqs = 0;
461 mbx->stats.acks = 0; 455 mbx->stats.acks = 0;
462 mbx->stats.rsts = 0; 456 mbx->stats.rsts = 0;
463 break; 457
464 default: 458 mbx->size = IXGBE_VFMAILBOX_SIZE;
465 break;
466 }
467} 459}
460#endif /* CONFIG_PCI_IOV */
468 461
469struct ixgbe_mbx_operations mbx_ops_generic = { 462struct ixgbe_mbx_operations mbx_ops_generic = {
470 .read = ixgbe_read_mbx_pf, 463 .read = ixgbe_read_mbx_pf,
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h
index 3df9b1590218..fe6ea81dc7f8 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ixgbe/ixgbe_mbx.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -86,7 +86,9 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
86s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16); 86s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
87s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16); 87s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
88s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); 88s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
89#ifdef CONFIG_PCI_IOV
89void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); 90void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
91#endif /* CONFIG_PCI_IOV */
90 92
91extern struct ixgbe_mbx_operations mbx_ops_generic; 93extern struct ixgbe_mbx_operations mbx_ops_generic;
92 94
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 8f7123e8fc0a..f72f705f6183 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -57,6 +57,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
57{ 57{
58 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 58 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
59 u32 phy_addr; 59 u32 phy_addr;
60 u16 ext_ability = 0;
60 61
61 if (hw->phy.type == ixgbe_phy_unknown) { 62 if (hw->phy.type == ixgbe_phy_unknown) {
62 for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { 63 for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
@@ -65,12 +66,29 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
65 ixgbe_get_phy_id(hw); 66 ixgbe_get_phy_id(hw);
66 hw->phy.type = 67 hw->phy.type =
67 ixgbe_get_phy_type_from_id(hw->phy.id); 68 ixgbe_get_phy_type_from_id(hw->phy.id);
69
70 if (hw->phy.type == ixgbe_phy_unknown) {
71 hw->phy.ops.read_reg(hw,
72 MDIO_PMA_EXTABLE,
73 MDIO_MMD_PMAPMD,
74 &ext_ability);
75 if (ext_ability &
76 (MDIO_PMA_EXTABLE_10GBT |
77 MDIO_PMA_EXTABLE_1000BT))
78 hw->phy.type =
79 ixgbe_phy_cu_unknown;
80 else
81 hw->phy.type =
82 ixgbe_phy_generic;
83 }
84
68 status = 0; 85 status = 0;
69 break; 86 break;
70 } 87 }
71 } 88 }
72 /* clear value if nothing found */ 89 /* clear value if nothing found */
73 hw->phy.mdio.prtad = 0; 90 if (status != 0)
91 hw->phy.mdio.prtad = 0;
74 } else { 92 } else {
75 status = 0; 93 status = 0;
76 } 94 }
@@ -138,17 +156,51 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
138 **/ 156 **/
139s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) 157s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
140{ 158{
159 u32 i;
160 u16 ctrl = 0;
161 s32 status = 0;
162
163 if (hw->phy.type == ixgbe_phy_unknown)
164 status = ixgbe_identify_phy_generic(hw);
165
166 if (status != 0 || hw->phy.type == ixgbe_phy_none)
167 goto out;
168
141 /* Don't reset PHY if it's shut down due to overtemp. */ 169 /* Don't reset PHY if it's shut down due to overtemp. */
142 if (!hw->phy.reset_if_overtemp && 170 if (!hw->phy.reset_if_overtemp &&
143 (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw))) 171 (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
144 return 0; 172 goto out;
145 173
146 /* 174 /*
147 * Perform soft PHY reset to the PHY_XS. 175 * Perform soft PHY reset to the PHY_XS.
148 * This will cause a soft reset to the PHY 176 * This will cause a soft reset to the PHY
149 */ 177 */
150 return hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, 178 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
151 MDIO_CTRL1_RESET); 179 MDIO_MMD_PHYXS,
180 MDIO_CTRL1_RESET);
181
182 /*
183 * Poll for reset bit to self-clear indicating reset is complete.
184 * Some PHYs could take up to 3 seconds to complete and need about
185 * 1.7 usec delay after the reset is complete.
186 */
187 for (i = 0; i < 30; i++) {
188 msleep(100);
189 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
190 MDIO_MMD_PHYXS, &ctrl);
191 if (!(ctrl & MDIO_CTRL1_RESET)) {
192 udelay(2);
193 break;
194 }
195 }
196
197 if (ctrl & MDIO_CTRL1_RESET) {
198 status = IXGBE_ERR_RESET_FAILED;
199 hw_dbg(hw, "PHY reset polling failed to complete.\n");
200 }
201
202out:
203 return status;
152} 204}
153 205
154/** 206/**
@@ -171,7 +223,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
171 else 223 else
172 gssr = IXGBE_GSSR_PHY0_SM; 224 gssr = IXGBE_GSSR_PHY0_SM;
173 225
174 if (ixgbe_acquire_swfw_sync(hw, gssr) != 0) 226 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
175 status = IXGBE_ERR_SWFW_SYNC; 227 status = IXGBE_ERR_SWFW_SYNC;
176 228
177 if (status == 0) { 229 if (status == 0) {
@@ -243,7 +295,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
243 } 295 }
244 } 296 }
245 297
246 ixgbe_release_swfw_sync(hw, gssr); 298 hw->mac.ops.release_swfw_sync(hw, gssr);
247 } 299 }
248 300
249 return status; 301 return status;
@@ -269,7 +321,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
269 else 321 else
270 gssr = IXGBE_GSSR_PHY0_SM; 322 gssr = IXGBE_GSSR_PHY0_SM;
271 323
272 if (ixgbe_acquire_swfw_sync(hw, gssr) != 0) 324 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
273 status = IXGBE_ERR_SWFW_SYNC; 325 status = IXGBE_ERR_SWFW_SYNC;
274 326
275 if (status == 0) { 327 if (status == 0) {
@@ -336,7 +388,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
336 } 388 }
337 } 389 }
338 390
339 ixgbe_release_swfw_sync(hw, gssr); 391 hw->mac.ops.release_swfw_sync(hw, gssr);
340 } 392 }
341 393
342 return status; 394 return status;
@@ -350,49 +402,89 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
350 **/ 402 **/
351s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) 403s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
352{ 404{
353 s32 status = IXGBE_NOT_IMPLEMENTED; 405 s32 status = 0;
354 u32 time_out; 406 u32 time_out;
355 u32 max_time_out = 10; 407 u32 max_time_out = 10;
356 u16 autoneg_reg; 408 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
409 bool autoneg = false;
410 ixgbe_link_speed speed;
357 411
358 /* 412 ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
359 * Set advertisement settings in PHY based on autoneg_advertised 413
360 * settings. If autoneg_advertised = 0, then advertise default values 414 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
361 * tnx devices cannot be "forced" to a autoneg 10G and fail. But can 415 /* Set or unset auto-negotiation 10G advertisement */
362 * for a 1G. 416 hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL,
363 */ 417 MDIO_MMD_AN,
364 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, &autoneg_reg); 418 &autoneg_reg);
365 419
366 if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL)
367 autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; 420 autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
368 else 421 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
369 autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; 422 autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
423
424 hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL,
425 MDIO_MMD_AN,
426 autoneg_reg);
427 }
428
429 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
430 /* Set or unset auto-negotiation 1G advertisement */
431 hw->phy.ops.read_reg(hw,
432 IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
433 MDIO_MMD_AN,
434 &autoneg_reg);
435
436 autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
437 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
438 autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
439
440 hw->phy.ops.write_reg(hw,
441 IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
442 MDIO_MMD_AN,
443 autoneg_reg);
444 }
445
446 if (speed & IXGBE_LINK_SPEED_100_FULL) {
447 /* Set or unset auto-negotiation 100M advertisement */
448 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
449 MDIO_MMD_AN,
450 &autoneg_reg);
370 451
371 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, autoneg_reg); 452 autoneg_reg &= ~ADVERTISE_100FULL;
453 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
454 autoneg_reg |= ADVERTISE_100FULL;
455
456 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
457 MDIO_MMD_AN,
458 autoneg_reg);
459 }
372 460
373 /* Restart PHY autonegotiation and wait for completion */ 461 /* Restart PHY autonegotiation and wait for completion */
374 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, &autoneg_reg); 462 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
463 MDIO_MMD_AN, &autoneg_reg);
375 464
376 autoneg_reg |= MDIO_AN_CTRL1_RESTART; 465 autoneg_reg |= MDIO_AN_CTRL1_RESTART;
377 466
378 hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, autoneg_reg); 467 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
468 MDIO_MMD_AN, autoneg_reg);
379 469
380 /* Wait for autonegotiation to finish */ 470 /* Wait for autonegotiation to finish */
381 for (time_out = 0; time_out < max_time_out; time_out++) { 471 for (time_out = 0; time_out < max_time_out; time_out++) {
382 udelay(10); 472 udelay(10);
383 /* Restart PHY autonegotiation and wait for completion */ 473 /* Restart PHY autonegotiation and wait for completion */
384 status = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, 474 status = hw->phy.ops.read_reg(hw, MDIO_STAT1,
385 &autoneg_reg); 475 MDIO_MMD_AN,
476 &autoneg_reg);
386 477
387 autoneg_reg &= MDIO_AN_STAT1_COMPLETE; 478 autoneg_reg &= MDIO_AN_STAT1_COMPLETE;
388 if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) { 479 if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) {
389 status = 0;
390 break; 480 break;
391 } 481 }
392 } 482 }
393 483
394 if (time_out == max_time_out) 484 if (time_out == max_time_out) {
395 status = IXGBE_ERR_LINK_SETUP; 485 status = IXGBE_ERR_LINK_SETUP;
486 hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out");
487 }
396 488
397 return status; 489 return status;
398} 490}
@@ -421,6 +513,9 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
421 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 513 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
422 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 514 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
423 515
516 if (speed & IXGBE_LINK_SPEED_100_FULL)
517 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
518
424 /* Setup link based on the new speed settings */ 519 /* Setup link based on the new speed settings */
425 hw->phy.ops.setup_link(hw); 520 hw->phy.ops.setup_link(hw);
426 521
@@ -461,6 +556,180 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
461} 556}
462 557
463/** 558/**
559 * ixgbe_check_phy_link_tnx - Determine link and speed status
560 * @hw: pointer to hardware structure
561 *
562 * Reads the VS1 register to determine if link is up and the current speed for
563 * the PHY.
564 **/
565s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
566 bool *link_up)
567{
568 s32 status = 0;
569 u32 time_out;
570 u32 max_time_out = 10;
571 u16 phy_link = 0;
572 u16 phy_speed = 0;
573 u16 phy_data = 0;
574
575 /* Initialize speed and link to default case */
576 *link_up = false;
577 *speed = IXGBE_LINK_SPEED_10GB_FULL;
578
579 /*
580 * Check current speed and link status of the PHY register.
581 * This is a vendor specific register and may have to
582 * be changed for other copper PHYs.
583 */
584 for (time_out = 0; time_out < max_time_out; time_out++) {
585 udelay(10);
586 status = hw->phy.ops.read_reg(hw,
587 MDIO_STAT1,
588 MDIO_MMD_VEND1,
589 &phy_data);
590 phy_link = phy_data &
591 IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
592 phy_speed = phy_data &
593 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
594 if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
595 *link_up = true;
596 if (phy_speed ==
597 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
598 *speed = IXGBE_LINK_SPEED_1GB_FULL;
599 break;
600 }
601 }
602
603 return status;
604}
605
606/**
607 * ixgbe_setup_phy_link_tnx - Set and restart autoneg
608 * @hw: pointer to hardware structure
609 *
610 * Restart autonegotiation and PHY and waits for completion.
611 **/
612s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
613{
614 s32 status = 0;
615 u32 time_out;
616 u32 max_time_out = 10;
617 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
618 bool autoneg = false;
619 ixgbe_link_speed speed;
620
621 ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
622
623 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
624 /* Set or unset auto-negotiation 10G advertisement */
625 hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL,
626 MDIO_MMD_AN,
627 &autoneg_reg);
628
629 autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
630 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
631 autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
632
633 hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL,
634 MDIO_MMD_AN,
635 autoneg_reg);
636 }
637
638 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
639 /* Set or unset auto-negotiation 1G advertisement */
640 hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
641 MDIO_MMD_AN,
642 &autoneg_reg);
643
644 autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
645 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
646 autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
647
648 hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
649 MDIO_MMD_AN,
650 autoneg_reg);
651 }
652
653 if (speed & IXGBE_LINK_SPEED_100_FULL) {
654 /* Set or unset auto-negotiation 100M advertisement */
655 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
656 MDIO_MMD_AN,
657 &autoneg_reg);
658
659 autoneg_reg &= ~ADVERTISE_100FULL;
660 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
661 autoneg_reg |= ADVERTISE_100FULL;
662
663 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
664 MDIO_MMD_AN,
665 autoneg_reg);
666 }
667
668 /* Restart PHY autonegotiation and wait for completion */
669 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
670 MDIO_MMD_AN, &autoneg_reg);
671
672 autoneg_reg |= MDIO_AN_CTRL1_RESTART;
673
674 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
675 MDIO_MMD_AN, autoneg_reg);
676
677 /* Wait for autonegotiation to finish */
678 for (time_out = 0; time_out < max_time_out; time_out++) {
679 udelay(10);
680 /* Restart PHY autonegotiation and wait for completion */
681 status = hw->phy.ops.read_reg(hw, MDIO_STAT1,
682 MDIO_MMD_AN,
683 &autoneg_reg);
684
685 autoneg_reg &= MDIO_AN_STAT1_COMPLETE;
686 if (autoneg_reg == MDIO_AN_STAT1_COMPLETE)
687 break;
688 }
689
690 if (time_out == max_time_out) {
691 status = IXGBE_ERR_LINK_SETUP;
692 hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out");
693 }
694
695 return status;
696}
697
698/**
699 * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
700 * @hw: pointer to hardware structure
701 * @firmware_version: pointer to the PHY Firmware Version
702 **/
703s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
704 u16 *firmware_version)
705{
706 s32 status = 0;
707
708 status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
709 MDIO_MMD_VEND1,
710 firmware_version);
711
712 return status;
713}
714
715/**
716 * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
717 * @hw: pointer to hardware structure
718 * @firmware_version: pointer to the PHY Firmware Version
719 **/
720s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
721 u16 *firmware_version)
722{
723 s32 status = 0;
724
725 status = hw->phy.ops.read_reg(hw, AQ_FW_REV,
726 MDIO_MMD_VEND1,
727 firmware_version);
728
729 return status;
730}
731
732/**
464 * ixgbe_reset_phy_nl - Performs a PHY reset 733 * ixgbe_reset_phy_nl - Performs a PHY reset
465 * @hw: pointer to hardware structure 734 * @hw: pointer to hardware structure
466 **/ 735 **/
@@ -556,11 +825,10 @@ out:
556} 825}
557 826
558/** 827/**
559 * ixgbe_identify_sfp_module_generic - Identifies SFP module and assigns 828 * ixgbe_identify_sfp_module_generic - Identifies SFP modules
560 * the PHY type.
561 * @hw: pointer to hardware structure 829 * @hw: pointer to hardware structure
562 * 830 *
563 * Searches for and indentifies the SFP module. Assings appropriate PHY type. 831 * Searches for and identifies the SFP module and assigns appropriate PHY type.
564 **/ 832 **/
565s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) 833s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
566{ 834{
@@ -581,41 +849,62 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
581 goto out; 849 goto out;
582 } 850 }
583 851
584 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, 852 status = hw->phy.ops.read_i2c_eeprom(hw,
853 IXGBE_SFF_IDENTIFIER,
585 &identifier); 854 &identifier);
586 855
587 if (status == IXGBE_ERR_SFP_NOT_PRESENT || status == IXGBE_ERR_I2C) { 856 if (status == IXGBE_ERR_SWFW_SYNC ||
588 status = IXGBE_ERR_SFP_NOT_PRESENT; 857 status == IXGBE_ERR_I2C ||
589 hw->phy.sfp_type = ixgbe_sfp_type_not_present; 858 status == IXGBE_ERR_SFP_NOT_PRESENT)
590 if (hw->phy.type != ixgbe_phy_nl) { 859 goto err_read_i2c_eeprom;
591 hw->phy.id = 0;
592 hw->phy.type = ixgbe_phy_unknown;
593 }
594 goto out;
595 }
596 860
597 if (identifier == IXGBE_SFF_IDENTIFIER_SFP) { 861 /* LAN ID is needed for sfp_type determination */
598 hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_1GBE_COMP_CODES, 862 hw->mac.ops.set_lan_id(hw);
599 &comp_codes_1g); 863
600 hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_10GBE_COMP_CODES, 864 if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
601 &comp_codes_10g); 865 hw->phy.type = ixgbe_phy_sfp_unsupported;
602 hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_CABLE_TECHNOLOGY, 866 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
603 &cable_tech); 867 } else {
604 868 status = hw->phy.ops.read_i2c_eeprom(hw,
605 /* ID Module 869 IXGBE_SFF_1GBE_COMP_CODES,
606 * ========= 870 &comp_codes_1g);
607 * 0 SFP_DA_CU 871
608 * 1 SFP_SR 872 if (status == IXGBE_ERR_SWFW_SYNC ||
609 * 2 SFP_LR 873 status == IXGBE_ERR_I2C ||
610 * 3 SFP_DA_CORE0 - 82599-specific 874 status == IXGBE_ERR_SFP_NOT_PRESENT)
611 * 4 SFP_DA_CORE1 - 82599-specific 875 goto err_read_i2c_eeprom;
612 * 5 SFP_SR/LR_CORE0 - 82599-specific 876
613 * 6 SFP_SR/LR_CORE1 - 82599-specific 877 status = hw->phy.ops.read_i2c_eeprom(hw,
614 * 7 SFP_act_lmt_DA_CORE0 - 82599-specific 878 IXGBE_SFF_10GBE_COMP_CODES,
615 * 8 SFP_act_lmt_DA_CORE1 - 82599-specific 879 &comp_codes_10g);
616 * 9 SFP_1g_cu_CORE0 - 82599-specific 880
617 * 10 SFP_1g_cu_CORE1 - 82599-specific 881 if (status == IXGBE_ERR_SWFW_SYNC ||
618 */ 882 status == IXGBE_ERR_I2C ||
883 status == IXGBE_ERR_SFP_NOT_PRESENT)
884 goto err_read_i2c_eeprom;
885 status = hw->phy.ops.read_i2c_eeprom(hw,
886 IXGBE_SFF_CABLE_TECHNOLOGY,
887 &cable_tech);
888
889 if (status == IXGBE_ERR_SWFW_SYNC ||
890 status == IXGBE_ERR_I2C ||
891 status == IXGBE_ERR_SFP_NOT_PRESENT)
892 goto err_read_i2c_eeprom;
893
894 /* ID Module
895 * =========
896 * 0 SFP_DA_CU
897 * 1 SFP_SR
898 * 2 SFP_LR
899 * 3 SFP_DA_CORE0 - 82599-specific
900 * 4 SFP_DA_CORE1 - 82599-specific
901 * 5 SFP_SR/LR_CORE0 - 82599-specific
902 * 6 SFP_SR/LR_CORE1 - 82599-specific
903 * 7 SFP_act_lmt_DA_CORE0 - 82599-specific
904 * 8 SFP_act_lmt_DA_CORE1 - 82599-specific
905 * 9 SFP_1g_cu_CORE0 - 82599-specific
906 * 10 SFP_1g_cu_CORE1 - 82599-specific
907 */
619 if (hw->mac.type == ixgbe_mac_82598EB) { 908 if (hw->mac.type == ixgbe_mac_82598EB) {
620 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) 909 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
621 hw->phy.sfp_type = ixgbe_sfp_type_da_cu; 910 hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
@@ -647,31 +936,27 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
647 ixgbe_sfp_type_da_act_lmt_core1; 936 ixgbe_sfp_type_da_act_lmt_core1;
648 } else { 937 } else {
649 hw->phy.sfp_type = 938 hw->phy.sfp_type =
650 ixgbe_sfp_type_unknown; 939 ixgbe_sfp_type_unknown;
651 } 940 }
652 } else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) 941 } else if (comp_codes_10g &
942 (IXGBE_SFF_10GBASESR_CAPABLE |
943 IXGBE_SFF_10GBASELR_CAPABLE)) {
653 if (hw->bus.lan_id == 0) 944 if (hw->bus.lan_id == 0)
654 hw->phy.sfp_type = 945 hw->phy.sfp_type =
655 ixgbe_sfp_type_srlr_core0; 946 ixgbe_sfp_type_srlr_core0;
656 else 947 else
657 hw->phy.sfp_type = 948 hw->phy.sfp_type =
658 ixgbe_sfp_type_srlr_core1; 949 ixgbe_sfp_type_srlr_core1;
659 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) 950 } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
660 if (hw->bus.lan_id == 0)
661 hw->phy.sfp_type =
662 ixgbe_sfp_type_srlr_core0;
663 else
664 hw->phy.sfp_type =
665 ixgbe_sfp_type_srlr_core1;
666 else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
667 if (hw->bus.lan_id == 0) 951 if (hw->bus.lan_id == 0)
668 hw->phy.sfp_type = 952 hw->phy.sfp_type =
669 ixgbe_sfp_type_1g_cu_core0; 953 ixgbe_sfp_type_1g_cu_core0;
670 else 954 else
671 hw->phy.sfp_type = 955 hw->phy.sfp_type =
672 ixgbe_sfp_type_1g_cu_core1; 956 ixgbe_sfp_type_1g_cu_core1;
673 else 957 } else {
674 hw->phy.sfp_type = ixgbe_sfp_type_unknown; 958 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
959 }
675 } 960 }
676 961
677 if (hw->phy.sfp_type != stored_sfp_type) 962 if (hw->phy.sfp_type != stored_sfp_type)
@@ -688,16 +973,33 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
688 /* Determine PHY vendor */ 973 /* Determine PHY vendor */
689 if (hw->phy.type != ixgbe_phy_nl) { 974 if (hw->phy.type != ixgbe_phy_nl) {
690 hw->phy.id = identifier; 975 hw->phy.id = identifier;
691 hw->phy.ops.read_i2c_eeprom(hw, 976 status = hw->phy.ops.read_i2c_eeprom(hw,
692 IXGBE_SFF_VENDOR_OUI_BYTE0, 977 IXGBE_SFF_VENDOR_OUI_BYTE0,
693 &oui_bytes[0]); 978 &oui_bytes[0]);
694 hw->phy.ops.read_i2c_eeprom(hw, 979
980 if (status == IXGBE_ERR_SWFW_SYNC ||
981 status == IXGBE_ERR_I2C ||
982 status == IXGBE_ERR_SFP_NOT_PRESENT)
983 goto err_read_i2c_eeprom;
984
985 status = hw->phy.ops.read_i2c_eeprom(hw,
695 IXGBE_SFF_VENDOR_OUI_BYTE1, 986 IXGBE_SFF_VENDOR_OUI_BYTE1,
696 &oui_bytes[1]); 987 &oui_bytes[1]);
697 hw->phy.ops.read_i2c_eeprom(hw, 988
989 if (status == IXGBE_ERR_SWFW_SYNC ||
990 status == IXGBE_ERR_I2C ||
991 status == IXGBE_ERR_SFP_NOT_PRESENT)
992 goto err_read_i2c_eeprom;
993
994 status = hw->phy.ops.read_i2c_eeprom(hw,
698 IXGBE_SFF_VENDOR_OUI_BYTE2, 995 IXGBE_SFF_VENDOR_OUI_BYTE2,
699 &oui_bytes[2]); 996 &oui_bytes[2]);
700 997
998 if (status == IXGBE_ERR_SWFW_SYNC ||
999 status == IXGBE_ERR_I2C ||
1000 status == IXGBE_ERR_SFP_NOT_PRESENT)
1001 goto err_read_i2c_eeprom;
1002
701 vendor_oui = 1003 vendor_oui =
702 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | 1004 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
703 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | 1005 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
@@ -707,7 +1009,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
707 case IXGBE_SFF_VENDOR_OUI_TYCO: 1009 case IXGBE_SFF_VENDOR_OUI_TYCO:
708 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) 1010 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
709 hw->phy.type = 1011 hw->phy.type =
710 ixgbe_phy_sfp_passive_tyco; 1012 ixgbe_phy_sfp_passive_tyco;
711 break; 1013 break;
712 case IXGBE_SFF_VENDOR_OUI_FTL: 1014 case IXGBE_SFF_VENDOR_OUI_FTL:
713 if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) 1015 if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
@@ -724,7 +1026,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
724 default: 1026 default:
725 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) 1027 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
726 hw->phy.type = 1028 hw->phy.type =
727 ixgbe_phy_sfp_passive_unknown; 1029 ixgbe_phy_sfp_passive_unknown;
728 else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) 1030 else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
729 hw->phy.type = 1031 hw->phy.type =
730 ixgbe_phy_sfp_active_unknown; 1032 ixgbe_phy_sfp_active_unknown;
@@ -734,7 +1036,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
734 } 1036 }
735 } 1037 }
736 1038
737 /* All passive DA cables are supported */ 1039 /* Allow any DA cable vendor */
738 if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE | 1040 if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
739 IXGBE_SFF_DA_ACTIVE_CABLE)) { 1041 IXGBE_SFF_DA_ACTIVE_CABLE)) {
740 status = 0; 1042 status = 0;
@@ -756,7 +1058,6 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
756 goto out; 1058 goto out;
757 } 1059 }
758 1060
759 /* This is guaranteed to be 82599, no need to check for NULL */
760 hw->mac.ops.get_device_caps(hw, &enforce_sfp); 1061 hw->mac.ops.get_device_caps(hw, &enforce_sfp);
761 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) && 1062 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
762 !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) || 1063 !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) ||
@@ -776,15 +1077,24 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
776 1077
777out: 1078out:
778 return status; 1079 return status;
1080
1081err_read_i2c_eeprom:
1082 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1083 if (hw->phy.type != ixgbe_phy_nl) {
1084 hw->phy.id = 0;
1085 hw->phy.type = ixgbe_phy_unknown;
1086 }
1087 return IXGBE_ERR_SFP_NOT_PRESENT;
779} 1088}
780 1089
781/** 1090/**
782 * ixgbe_get_sfp_init_sequence_offsets - Checks the MAC's EEPROM to see 1091 * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
783 * if it supports a given SFP+ module type, if so it returns the offsets to the
784 * phy init sequence block.
785 * @hw: pointer to hardware structure 1092 * @hw: pointer to hardware structure
786 * @list_offset: offset to the SFP ID list 1093 * @list_offset: offset to the SFP ID list
787 * @data_offset: offset to the SFP data block 1094 * @data_offset: offset to the SFP data block
1095 *
1096 * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if
1097 * so it returns the offsets to the phy init sequence block.
788 **/ 1098 **/
789s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, 1099s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
790 u16 *list_offset, 1100 u16 *list_offset,
@@ -899,11 +1209,22 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
899 u8 dev_addr, u8 *data) 1209 u8 dev_addr, u8 *data)
900{ 1210{
901 s32 status = 0; 1211 s32 status = 0;
902 u32 max_retry = 1; 1212 u32 max_retry = 10;
903 u32 retry = 0; 1213 u32 retry = 0;
1214 u16 swfw_mask = 0;
904 bool nack = 1; 1215 bool nack = 1;
905 1216
1217 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
1218 swfw_mask = IXGBE_GSSR_PHY1_SM;
1219 else
1220 swfw_mask = IXGBE_GSSR_PHY0_SM;
1221
906 do { 1222 do {
1223 if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != 0) {
1224 status = IXGBE_ERR_SWFW_SYNC;
1225 goto read_byte_out;
1226 }
1227
907 ixgbe_i2c_start(hw); 1228 ixgbe_i2c_start(hw);
908 1229
909 /* Device Address and write indication */ 1230 /* Device Address and write indication */
@@ -946,6 +1267,8 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
946 break; 1267 break;
947 1268
948fail: 1269fail:
1270 ixgbe_release_swfw_sync(hw, swfw_mask);
1271 msleep(100);
949 ixgbe_i2c_bus_clear(hw); 1272 ixgbe_i2c_bus_clear(hw);
950 retry++; 1273 retry++;
951 if (retry < max_retry) 1274 if (retry < max_retry)
@@ -955,6 +1278,9 @@ fail:
955 1278
956 } while (retry < max_retry); 1279 } while (retry < max_retry);
957 1280
1281 ixgbe_release_swfw_sync(hw, swfw_mask);
1282
1283read_byte_out:
958 return status; 1284 return status;
959} 1285}
960 1286
@@ -973,6 +1299,17 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
973 s32 status = 0; 1299 s32 status = 0;
974 u32 max_retry = 1; 1300 u32 max_retry = 1;
975 u32 retry = 0; 1301 u32 retry = 0;
1302 u16 swfw_mask = 0;
1303
1304 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
1305 swfw_mask = IXGBE_GSSR_PHY1_SM;
1306 else
1307 swfw_mask = IXGBE_GSSR_PHY0_SM;
1308
1309 if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != 0) {
1310 status = IXGBE_ERR_SWFW_SYNC;
1311 goto write_byte_out;
1312 }
976 1313
977 do { 1314 do {
978 ixgbe_i2c_start(hw); 1315 ixgbe_i2c_start(hw);
@@ -1013,6 +1350,9 @@ fail:
1013 hw_dbg(hw, "I2C byte write error.\n"); 1350 hw_dbg(hw, "I2C byte write error.\n");
1014 } while (retry < max_retry); 1351 } while (retry < max_retry);
1015 1352
1353 ixgbe_release_swfw_sync(hw, swfw_mask);
1354
1355write_byte_out:
1016 return status; 1356 return status;
1017} 1357}
1018 1358
@@ -1331,6 +1671,8 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
1331 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); 1671 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
1332 u32 i; 1672 u32 i;
1333 1673
1674 ixgbe_i2c_start(hw);
1675
1334 ixgbe_set_i2c_data(hw, &i2cctl, 1); 1676 ixgbe_set_i2c_data(hw, &i2cctl, 1);
1335 1677
1336 for (i = 0; i < 9; i++) { 1678 for (i = 0; i < 9; i++) {
@@ -1345,91 +1687,13 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
1345 udelay(IXGBE_I2C_T_LOW); 1687 udelay(IXGBE_I2C_T_LOW);
1346 } 1688 }
1347 1689
1690 ixgbe_i2c_start(hw);
1691
1348 /* Put the i2c bus back to default state */ 1692 /* Put the i2c bus back to default state */
1349 ixgbe_i2c_stop(hw); 1693 ixgbe_i2c_stop(hw);
1350} 1694}
1351 1695
1352/** 1696/**
1353 * ixgbe_check_phy_link_tnx - Determine link and speed status
1354 * @hw: pointer to hardware structure
1355 *
1356 * Reads the VS1 register to determine if link is up and the current speed for
1357 * the PHY.
1358 **/
1359s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
1360 bool *link_up)
1361{
1362 s32 status = 0;
1363 u32 time_out;
1364 u32 max_time_out = 10;
1365 u16 phy_link = 0;
1366 u16 phy_speed = 0;
1367 u16 phy_data = 0;
1368
1369 /* Initialize speed and link to default case */
1370 *link_up = false;
1371 *speed = IXGBE_LINK_SPEED_10GB_FULL;
1372
1373 /*
1374 * Check current speed and link status of the PHY register.
1375 * This is a vendor specific register and may have to
1376 * be changed for other copper PHYs.
1377 */
1378 for (time_out = 0; time_out < max_time_out; time_out++) {
1379 udelay(10);
1380 status = hw->phy.ops.read_reg(hw,
1381 IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
1382 MDIO_MMD_VEND1,
1383 &phy_data);
1384 phy_link = phy_data &
1385 IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
1386 phy_speed = phy_data &
1387 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
1388 if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
1389 *link_up = true;
1390 if (phy_speed ==
1391 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
1392 *speed = IXGBE_LINK_SPEED_1GB_FULL;
1393 break;
1394 }
1395 }
1396
1397 return status;
1398}
1399
1400/**
1401 * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
1402 * @hw: pointer to hardware structure
1403 * @firmware_version: pointer to the PHY Firmware Version
1404 **/
1405s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
1406 u16 *firmware_version)
1407{
1408 s32 status = 0;
1409
1410 status = hw->phy.ops.read_reg(hw, TNX_FW_REV, MDIO_MMD_VEND1,
1411 firmware_version);
1412
1413 return status;
1414}
1415
1416/**
1417 * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
1418 * @hw: pointer to hardware structure
1419 * @firmware_version: pointer to the PHY Firmware Version
1420**/
1421s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
1422 u16 *firmware_version)
1423{
1424 s32 status = 0;
1425
1426 status = hw->phy.ops.read_reg(hw, AQ_FW_REV, MDIO_MMD_VEND1,
1427 firmware_version);
1428
1429 return status;
1430}
1431
1432/**
1433 * ixgbe_tn_check_overtemp - Checks if an overtemp occured. 1697 * ixgbe_tn_check_overtemp - Checks if an overtemp occured.
1434 * @hw: pointer to hardware structure 1698 * @hw: pointer to hardware structure
1435 * 1699 *
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index e2c6b7eac641..197bdd13106a 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -58,6 +58,10 @@
58#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 58#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
59#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 59#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
60 60
61/* Flow control defines */
62#define IXGBE_TAF_SYM_PAUSE 0x400
63#define IXGBE_TAF_ASM_PAUSE 0x800
64
61/* Bit-shift macros */ 65/* Bit-shift macros */
62#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24 66#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24
63#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16 67#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16
@@ -104,6 +108,7 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
104s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, 108s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
105 ixgbe_link_speed *speed, 109 ixgbe_link_speed *speed,
106 bool *link_up); 110 bool *link_up);
111s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
107s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, 112s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
108 u16 *firmware_version); 113 u16 *firmware_version);
109s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, 114s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index 187b3a16ec1f..6e50d8328942 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -110,6 +110,33 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
110 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); 110 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
111} 111}
112 112
113void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf)
114{
115 struct ixgbe_hw *hw = &adapter->hw;
116 int new_mtu = msgbuf[1];
117 u32 max_frs;
118 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
119
120 /* Only X540 supports jumbo frames in IOV mode */
121 if (adapter->hw.mac.type != ixgbe_mac_X540)
122 return;
123
124 /* MTU < 68 is an error and causes problems on some kernels */
125 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) {
126 e_err(drv, "VF mtu %d out of range\n", new_mtu);
127 return;
128 }
129
130 max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
131 IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
132 if (max_frs < new_mtu) {
133 max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
134 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
135 }
136
137 e_info(hw, "VF requests change max MTU to %d\n", new_mtu);
138}
139
113static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) 140static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
114{ 141{
115 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); 142 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
@@ -302,7 +329,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
302 hash_list, vf); 329 hash_list, vf);
303 break; 330 break;
304 case IXGBE_VF_SET_LPE: 331 case IXGBE_VF_SET_LPE:
305 WARN_ON((msgbuf[0] & 0xFFFF) == IXGBE_VF_SET_LPE); 332 ixgbe_set_vf_lpe(adapter, msgbuf);
306 break; 333 break;
307 case IXGBE_VF_SET_VLAN: 334 case IXGBE_VF_SET_VLAN:
308 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) 335 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
@@ -451,9 +478,90 @@ out:
451 return err; 478 return err;
452} 479}
453 480
481static int ixgbe_link_mbps(int internal_link_speed)
482{
483 switch (internal_link_speed) {
484 case IXGBE_LINK_SPEED_100_FULL:
485 return 100;
486 case IXGBE_LINK_SPEED_1GB_FULL:
487 return 1000;
488 case IXGBE_LINK_SPEED_10GB_FULL:
489 return 10000;
490 default:
491 return 0;
492 }
493}
494
495static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate,
496 int link_speed)
497{
498 int rf_dec, rf_int;
499 u32 bcnrc_val;
500
501 if (tx_rate != 0) {
502 /* Calculate the rate factor values to set */
503 rf_int = link_speed / tx_rate;
504 rf_dec = (link_speed - (rf_int * tx_rate));
505 rf_dec = (rf_dec * (1<<IXGBE_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
506
507 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
508 bcnrc_val |= ((rf_int<<IXGBE_RTTBCNRC_RF_INT_SHIFT) &
509 IXGBE_RTTBCNRC_RF_INT_MASK);
510 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
511 } else {
512 bcnrc_val = 0;
513 }
514
515 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, 2*vf); /* vf Y uses queue 2*Y */
516 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
517}
518
519void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
520{
521 int actual_link_speed, i;
522 bool reset_rate = false;
523
524 /* VF Tx rate limit was not set */
525 if (adapter->vf_rate_link_speed == 0)
526 return;
527
528 actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
529 if (actual_link_speed != adapter->vf_rate_link_speed) {
530 reset_rate = true;
531 adapter->vf_rate_link_speed = 0;
532 dev_info(&adapter->pdev->dev,
533 "Link speed has been changed. VF Transmit rate "
534 "is disabled\n");
535 }
536
537 for (i = 0; i < adapter->num_vfs; i++) {
538 if (reset_rate)
539 adapter->vfinfo[i].tx_rate = 0;
540
541 ixgbe_set_vf_rate_limit(&adapter->hw, i,
542 adapter->vfinfo[i].tx_rate,
543 actual_link_speed);
544 }
545}
546
454int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) 547int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
455{ 548{
456 return -EOPNOTSUPP; 549 struct ixgbe_adapter *adapter = netdev_priv(netdev);
550 struct ixgbe_hw *hw = &adapter->hw;
551 int actual_link_speed;
552
553 actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
554 if ((vf >= adapter->num_vfs) || (!adapter->link_up) ||
555 (tx_rate > actual_link_speed) || (actual_link_speed != 10000) ||
556 ((tx_rate != 0) && (tx_rate <= 10)))
557 /* rate limit cannot be set to 10Mb or less in 10Gb adapters */
558 return -EINVAL;
559
560 adapter->vf_rate_link_speed = actual_link_speed;
561 adapter->vfinfo[vf].tx_rate = (u16)tx_rate;
562 ixgbe_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
563
564 return 0;
457} 565}
458 566
459int ixgbe_ndo_get_vf_config(struct net_device *netdev, 567int ixgbe_ndo_get_vf_config(struct net_device *netdev,
@@ -464,7 +572,7 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
464 return -EINVAL; 572 return -EINVAL;
465 ivi->vf = vf; 573 ivi->vf = vf;
466 memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); 574 memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
467 ivi->tx_rate = 0; 575 ivi->tx_rate = adapter->vfinfo[vf].tx_rate;
468 ivi->vlan = adapter->vfinfo[vf].pf_vlan; 576 ivi->vlan = adapter->vfinfo[vf].pf_vlan;
469 ivi->qos = adapter->vfinfo[vf].pf_qos; 577 ivi->qos = adapter->vfinfo[vf].pf_qos;
470 return 0; 578 return 0;
diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h
index 49dc14debef7..34175564bb78 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ixgbe/ixgbe_sriov.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -40,6 +40,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
40int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); 40int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
41int ixgbe_ndo_get_vf_config(struct net_device *netdev, 41int ixgbe_ndo_get_vf_config(struct net_device *netdev,
42 int vf, struct ifla_vf_info *ivi); 42 int vf, struct ifla_vf_info *ivi);
43void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
43 44
44#endif /* _IXGBE_SRIOV_H_ */ 45#endif /* _IXGBE_SRIOV_H_ */
45 46
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index fd3358f54139..25c1fb7eda06 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -91,7 +91,7 @@
91 91
92/* General Receive Control */ 92/* General Receive Control */
93#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */ 93#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */
94#define IXGBE_GRC_APME 0x00000002 /* Advanced Power Management Enable */ 94#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */
95 95
96#define IXGBE_VPDDIAG0 0x10204 96#define IXGBE_VPDDIAG0 0x10204
97#define IXGBE_VPDDIAG1 0x10208 97#define IXGBE_VPDDIAG1 0x10208
@@ -342,7 +342,7 @@
342/* Wake Up Control */ 342/* Wake Up Control */
343#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */ 343#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */
344#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */ 344#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */
345#define IXGBE_WUC_ADVD3WUC 0x00000010 /* D3Cold wake up cap. enable*/ 345#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */
346 346
347/* Wake Up Filter Control */ 347/* Wake Up Filter Control */
348#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ 348#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
@@ -533,6 +533,12 @@
533#define IXGBE_RTTDTECC 0x04990 533#define IXGBE_RTTDTECC 0x04990
534#define IXGBE_RTTDTECC_NO_BCN 0x00000100 534#define IXGBE_RTTDTECC_NO_BCN 0x00000100
535#define IXGBE_RTTBCNRC 0x04984 535#define IXGBE_RTTBCNRC 0x04984
536#define IXGBE_RTTBCNRC_RS_ENA 0x80000000
537#define IXGBE_RTTBCNRC_RF_DEC_MASK 0x00003FFF
538#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14
539#define IXGBE_RTTBCNRC_RF_INT_MASK \
540 (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
541
536 542
537/* FCoE registers */ 543/* FCoE registers */
538#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ 544#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */
@@ -659,6 +665,8 @@
659#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ 665#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
660#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ 666#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
661#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */ 667#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */
668#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
669#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */
662#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */ 670#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */
663#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */ 671#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */
664#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */ 672#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */
@@ -669,6 +677,11 @@
669#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */ 677#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */
670#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */ 678#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */
671#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */ 679#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */
680#define IXGBE_PCRC8ECL 0x0E810
681#define IXGBE_PCRC8ECH 0x0E811
682#define IXGBE_PCRC8ECH_MASK 0x1F
683#define IXGBE_LDPCECL 0x0E820
684#define IXGBE_LDPCECH 0x0E821
672 685
673/* Management */ 686/* Management */
674#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ 687#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
@@ -1002,6 +1015,13 @@
1002#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ 1015#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
1003#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */ 1016#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */
1004 1017
1018/* MII clause 22/28 definitions */
1019#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */
1020#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */
1021#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/
1022#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/
1023#define IXGBE_MII_AUTONEG_REG 0x0
1024
1005#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 1025#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0
1006#define IXGBE_MAX_PHY_ADDR 32 1026#define IXGBE_MAX_PHY_ADDR 32
1007 1027
@@ -1614,6 +1634,8 @@
1614#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */ 1634#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */
1615 1635
1616/* PCI Bus Info */ 1636/* PCI Bus Info */
1637#define IXGBE_PCI_DEVICE_STATUS 0xAA
1638#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
1617#define IXGBE_PCI_LINK_STATUS 0xB2 1639#define IXGBE_PCI_LINK_STATUS 0xB2
1618#define IXGBE_PCI_DEVICE_CONTROL2 0xC8 1640#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
1619#define IXGBE_PCI_LINK_WIDTH 0x3F0 1641#define IXGBE_PCI_LINK_WIDTH 0x3F0
@@ -1680,6 +1702,8 @@
1680#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ 1702#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
1681#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ 1703#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
1682#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ 1704#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
1705#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */
1706#define IXGBE_RXDCTL_RLPML_EN 0x00008000
1683 1707
1684#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ 1708#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
1685#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ 1709#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
@@ -2240,6 +2264,7 @@ enum ixgbe_mac_type {
2240 2264
2241enum ixgbe_phy_type { 2265enum ixgbe_phy_type {
2242 ixgbe_phy_unknown = 0, 2266 ixgbe_phy_unknown = 0,
2267 ixgbe_phy_none,
2243 ixgbe_phy_tn, 2268 ixgbe_phy_tn,
2244 ixgbe_phy_aq, 2269 ixgbe_phy_aq,
2245 ixgbe_phy_cu_unknown, 2270 ixgbe_phy_cu_unknown,
@@ -2328,32 +2353,31 @@ enum ixgbe_bus_type {
2328/* PCI bus speeds */ 2353/* PCI bus speeds */
2329enum ixgbe_bus_speed { 2354enum ixgbe_bus_speed {
2330 ixgbe_bus_speed_unknown = 0, 2355 ixgbe_bus_speed_unknown = 0,
2331 ixgbe_bus_speed_33, 2356 ixgbe_bus_speed_33 = 33,
2332 ixgbe_bus_speed_66, 2357 ixgbe_bus_speed_66 = 66,
2333 ixgbe_bus_speed_100, 2358 ixgbe_bus_speed_100 = 100,
2334 ixgbe_bus_speed_120, 2359 ixgbe_bus_speed_120 = 120,
2335 ixgbe_bus_speed_133, 2360 ixgbe_bus_speed_133 = 133,
2336 ixgbe_bus_speed_2500, 2361 ixgbe_bus_speed_2500 = 2500,
2337 ixgbe_bus_speed_5000, 2362 ixgbe_bus_speed_5000 = 5000,
2338 ixgbe_bus_speed_reserved 2363 ixgbe_bus_speed_reserved
2339}; 2364};
2340 2365
2341/* PCI bus widths */ 2366/* PCI bus widths */
2342enum ixgbe_bus_width { 2367enum ixgbe_bus_width {
2343 ixgbe_bus_width_unknown = 0, 2368 ixgbe_bus_width_unknown = 0,
2344 ixgbe_bus_width_pcie_x1, 2369 ixgbe_bus_width_pcie_x1 = 1,
2345 ixgbe_bus_width_pcie_x2, 2370 ixgbe_bus_width_pcie_x2 = 2,
2346 ixgbe_bus_width_pcie_x4 = 4, 2371 ixgbe_bus_width_pcie_x4 = 4,
2347 ixgbe_bus_width_pcie_x8 = 8, 2372 ixgbe_bus_width_pcie_x8 = 8,
2348 ixgbe_bus_width_32, 2373 ixgbe_bus_width_32 = 32,
2349 ixgbe_bus_width_64, 2374 ixgbe_bus_width_64 = 64,
2350 ixgbe_bus_width_reserved 2375 ixgbe_bus_width_reserved
2351}; 2376};
2352 2377
2353struct ixgbe_addr_filter_info { 2378struct ixgbe_addr_filter_info {
2354 u32 num_mc_addrs; 2379 u32 num_mc_addrs;
2355 u32 rar_used_count; 2380 u32 rar_used_count;
2356 u32 mc_addr_in_rar_count;
2357 u32 mta_in_use; 2381 u32 mta_in_use;
2358 u32 overflow_promisc; 2382 u32 overflow_promisc;
2359 bool uc_set_promisc; 2383 bool uc_set_promisc;
@@ -2491,6 +2515,8 @@ struct ixgbe_mac_operations {
2491 s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8); 2515 s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
2492 s32 (*setup_sfp)(struct ixgbe_hw *); 2516 s32 (*setup_sfp)(struct ixgbe_hw *);
2493 s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); 2517 s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
2518 s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
2519 void (*release_swfw_sync)(struct ixgbe_hw *, u16);
2494 2520
2495 /* Link */ 2521 /* Link */
2496 void (*disable_tx_laser)(struct ixgbe_hw *); 2522 void (*disable_tx_laser)(struct ixgbe_hw *);
@@ -2513,7 +2539,6 @@ struct ixgbe_mac_operations {
2513 s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); 2539 s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
2514 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); 2540 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
2515 s32 (*init_rx_addrs)(struct ixgbe_hw *); 2541 s32 (*init_rx_addrs)(struct ixgbe_hw *);
2516 s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *);
2517 s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); 2542 s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
2518 s32 (*enable_mc)(struct ixgbe_hw *); 2543 s32 (*enable_mc)(struct ixgbe_hw *);
2519 s32 (*disable_mc)(struct ixgbe_hw *); 2544 s32 (*disable_mc)(struct ixgbe_hw *);
@@ -2554,6 +2579,7 @@ struct ixgbe_eeprom_info {
2554 u16 address_bits; 2579 u16 address_bits;
2555}; 2580};
2556 2581
2582#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
2557struct ixgbe_mac_info { 2583struct ixgbe_mac_info {
2558 struct ixgbe_mac_operations ops; 2584 struct ixgbe_mac_operations ops;
2559 enum ixgbe_mac_type type; 2585 enum ixgbe_mac_type type;
@@ -2564,6 +2590,8 @@ struct ixgbe_mac_info {
2564 u16 wwnn_prefix; 2590 u16 wwnn_prefix;
2565 /* prefix for World Wide Port Name (WWPN) */ 2591 /* prefix for World Wide Port Name (WWPN) */
2566 u16 wwpn_prefix; 2592 u16 wwpn_prefix;
2593#define IXGBE_MAX_MTA 128
2594 u32 mta_shadow[IXGBE_MAX_MTA];
2567 s32 mc_filter_type; 2595 s32 mc_filter_type;
2568 u32 mcft_size; 2596 u32 mcft_size;
2569 u32 vft_size; 2597 u32 vft_size;
@@ -2576,6 +2604,7 @@ struct ixgbe_mac_info {
2576 u32 orig_autoc2; 2604 u32 orig_autoc2;
2577 bool orig_link_settings_stored; 2605 bool orig_link_settings_stored;
2578 bool autotry_restart; 2606 bool autotry_restart;
2607 u8 flags;
2579}; 2608};
2580 2609
2581struct ixgbe_phy_info { 2610struct ixgbe_phy_info {
@@ -2682,7 +2711,9 @@ struct ixgbe_info {
2682#define IXGBE_ERR_EEPROM_VERSION -24 2711#define IXGBE_ERR_EEPROM_VERSION -24
2683#define IXGBE_ERR_NO_SPACE -25 2712#define IXGBE_ERR_NO_SPACE -25
2684#define IXGBE_ERR_OVERTEMP -26 2713#define IXGBE_ERR_OVERTEMP -26
2685#define IXGBE_ERR_RAR_INDEX -27 2714#define IXGBE_ERR_FC_NOT_NEGOTIATED -27
2715#define IXGBE_ERR_FC_NOT_SUPPORTED -28
2716#define IXGBE_ERR_FLOW_CONTROL -29
2686#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30 2717#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
2687#define IXGBE_ERR_PBA_SECTION -31 2718#define IXGBE_ERR_PBA_SECTION -31
2688#define IXGBE_ERR_INVALID_ARGUMENT -32 2719#define IXGBE_ERR_INVALID_ARGUMENT -32
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c
index f2518b01067d..f47e93fe32be 100644
--- a/drivers/net/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ixgbe/ixgbe_x540.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -31,7 +31,6 @@
31 31
32#include "ixgbe.h" 32#include "ixgbe.h"
33#include "ixgbe_phy.h" 33#include "ixgbe_phy.h"
34//#include "ixgbe_mbx.h"
35 34
36#define IXGBE_X540_MAX_TX_QUEUES 128 35#define IXGBE_X540_MAX_TX_QUEUES 128
37#define IXGBE_X540_MAX_RX_QUEUES 128 36#define IXGBE_X540_MAX_RX_QUEUES 128
@@ -110,12 +109,9 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
110 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 109 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
111 * access and verify no pending requests before reset 110 * access and verify no pending requests before reset
112 */ 111 */
113 status = ixgbe_disable_pcie_master(hw); 112 ixgbe_disable_pcie_master(hw);
114 if (status != 0) {
115 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
116 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
117 }
118 113
114mac_reset_top:
119 /* 115 /*
120 * Issue global reset to the MAC. Needs to be SW reset if link is up. 116 * Issue global reset to the MAC. Needs to be SW reset if link is up.
121 * If link reset is used when link is up, it might reset the PHY when 117 * If link reset is used when link is up, it might reset the PHY when
@@ -148,6 +144,19 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
148 hw_dbg(hw, "Reset polling failed to complete.\n"); 144 hw_dbg(hw, "Reset polling failed to complete.\n");
149 } 145 }
150 146
147 /*
148 * Double resets are required for recovery from certain error
149 * conditions. Between resets, it is necessary to stall to allow time
150 * for any pending HW events to complete. We use 1usec since that is
151 * what is needed for ixgbe_disable_pcie_master(). The second reset
152 * then clears out any effects of those events.
153 */
154 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
155 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
156 udelay(1);
157 goto mac_reset_top;
158 }
159
151 /* Clear PF Reset Done bit so PF/VF Mail Ops can work */ 160 /* Clear PF Reset Done bit so PF/VF Mail Ops can work */
152 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 161 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
153 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 162 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
@@ -191,7 +200,7 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
191 * clear the multicast table. Also reset num_rar_entries to 128, 200 * clear the multicast table. Also reset num_rar_entries to 128,
192 * since we modify this value when programming the SAN MAC address. 201 * since we modify this value when programming the SAN MAC address.
193 */ 202 */
194 hw->mac.num_rar_entries = 128; 203 hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES;
195 hw->mac.ops.init_rx_addrs(hw); 204 hw->mac.ops.init_rx_addrs(hw);
196 205
197 /* Store the permanent mac address */ 206 /* Store the permanent mac address */
@@ -242,8 +251,11 @@ static u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
242} 251}
243 252
244/** 253/**
245 * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params 254 * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
246 * @hw: pointer to hardware structure 255 * @hw: pointer to hardware structure
256 *
257 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
258 * ixgbe_hw struct in order to set up EEPROM access.
247 **/ 259 **/
248static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) 260static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
249{ 261{
@@ -262,7 +274,7 @@ static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
262 IXGBE_EEPROM_WORD_SIZE_SHIFT); 274 IXGBE_EEPROM_WORD_SIZE_SHIFT);
263 275
264 hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", 276 hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
265 eeprom->type, eeprom->word_size); 277 eeprom->type, eeprom->word_size);
266 } 278 }
267 279
268 return 0; 280 return 0;
@@ -278,7 +290,7 @@ static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
278{ 290{
279 s32 status; 291 s32 status;
280 292
281 if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM) == 0) 293 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0)
282 status = ixgbe_read_eerd_generic(hw, offset, data); 294 status = ixgbe_read_eerd_generic(hw, offset, data);
283 else 295 else
284 status = IXGBE_ERR_SWFW_SYNC; 296 status = IXGBE_ERR_SWFW_SYNC;
@@ -311,7 +323,7 @@ static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
311 (data << IXGBE_EEPROM_RW_REG_DATA) | 323 (data << IXGBE_EEPROM_RW_REG_DATA) |
312 IXGBE_EEPROM_RW_REG_START; 324 IXGBE_EEPROM_RW_REG_START;
313 325
314 if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM) == 0) { 326 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
315 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 327 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
316 if (status != 0) { 328 if (status != 0) {
317 hw_dbg(hw, "Eeprom write EEWR timed out\n"); 329 hw_dbg(hw, "Eeprom write EEWR timed out\n");
@@ -676,7 +688,6 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
676 .set_vmdq = &ixgbe_set_vmdq_generic, 688 .set_vmdq = &ixgbe_set_vmdq_generic,
677 .clear_vmdq = &ixgbe_clear_vmdq_generic, 689 .clear_vmdq = &ixgbe_clear_vmdq_generic,
678 .init_rx_addrs = &ixgbe_init_rx_addrs_generic, 690 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
679 .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
680 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, 691 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
681 .enable_mc = &ixgbe_enable_mc_generic, 692 .enable_mc = &ixgbe_enable_mc_generic,
682 .disable_mc = &ixgbe_disable_mc_generic, 693 .disable_mc = &ixgbe_disable_mc_generic,
@@ -687,6 +698,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
687 .setup_sfp = NULL, 698 .setup_sfp = NULL,
688 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, 699 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing,
689 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, 700 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
701 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540,
702 .release_swfw_sync = &ixgbe_release_swfw_sync_X540,
690}; 703};
691 704
692static struct ixgbe_eeprom_operations eeprom_ops_X540 = { 705static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
@@ -702,7 +715,7 @@ static struct ixgbe_phy_operations phy_ops_X540 = {
702 .identify = &ixgbe_identify_phy_generic, 715 .identify = &ixgbe_identify_phy_generic,
703 .identify_sfp = &ixgbe_identify_sfp_module_generic, 716 .identify_sfp = &ixgbe_identify_sfp_module_generic,
704 .init = NULL, 717 .init = NULL,
705 .reset = &ixgbe_reset_phy_generic, 718 .reset = NULL,
706 .read_reg = &ixgbe_read_phy_reg_generic, 719 .read_reg = &ixgbe_read_phy_reg_generic,
707 .write_reg = &ixgbe_write_phy_reg_generic, 720 .write_reg = &ixgbe_write_phy_reg_generic,
708 .setup_link = &ixgbe_setup_phy_link_generic, 721 .setup_link = &ixgbe_setup_phy_link_generic,
diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ixgbevf/defines.h
index de643eb2ada6..78abb6f1a866 100644
--- a/drivers/net/ixgbevf/defines.h
+++ b/drivers/net/ixgbevf/defines.h
@@ -65,6 +65,8 @@ typedef u32 ixgbe_link_speed;
65#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ 65#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
66#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ 66#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
67#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ 67#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
68#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */
69#define IXGBE_RXDCTL_RLPML_EN 0x00008000
68 70
69/* DCA Control */ 71/* DCA Control */
70#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ 72#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c
index fa29b3c8c464..0563ab29264e 100644
--- a/drivers/net/ixgbevf/ethtool.c
+++ b/drivers/net/ixgbevf/ethtool.c
@@ -172,7 +172,7 @@ static char *ixgbevf_reg_names[] = {
172 "IXGBE_VFSTATUS", 172 "IXGBE_VFSTATUS",
173 "IXGBE_VFLINKS", 173 "IXGBE_VFLINKS",
174 "IXGBE_VFRXMEMWRAP", 174 "IXGBE_VFRXMEMWRAP",
175 "IXGBE_VFRTIMER", 175 "IXGBE_VFFRTIMER",
176 "IXGBE_VTEICR", 176 "IXGBE_VTEICR",
177 "IXGBE_VTEICS", 177 "IXGBE_VTEICS",
178 "IXGBE_VTEIMS", 178 "IXGBE_VTEIMS",
@@ -240,7 +240,7 @@ static void ixgbevf_get_regs(struct net_device *netdev,
240 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS); 240 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS);
241 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS); 241 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
242 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP); 242 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP);
243 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFRTIMER); 243 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER);
244 244
245 /* Interrupt */ 245 /* Interrupt */
246 /* don't read EICR because it can clear interrupt causes, instead 246 /* don't read EICR because it can clear interrupt causes, instead
diff --git a/drivers/net/ixgbevf/ixgbevf.h b/drivers/net/ixgbevf/ixgbevf.h
index a63efcb2cf1b..b703f60be3b7 100644
--- a/drivers/net/ixgbevf/ixgbevf.h
+++ b/drivers/net/ixgbevf/ixgbevf.h
@@ -207,7 +207,6 @@ struct ixgbevf_adapter {
207 u64 hw_tso_ctxt; 207 u64 hw_tso_ctxt;
208 u64 hw_tso6_ctxt; 208 u64 hw_tso6_ctxt;
209 u32 tx_timeout_count; 209 u32 tx_timeout_count;
210 bool detect_tx_hung;
211 210
212 /* RX */ 211 /* RX */
213 struct ixgbevf_ring *rx_ring; /* One per active queue */ 212 struct ixgbevf_ring *rx_ring; /* One per active queue */
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index 464e6c9d3fc2..054ab05b7c6a 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -49,9 +49,9 @@
49 49
50char ixgbevf_driver_name[] = "ixgbevf"; 50char ixgbevf_driver_name[] = "ixgbevf";
51static const char ixgbevf_driver_string[] = 51static const char ixgbevf_driver_string[] =
52 "Intel(R) 82599 Virtual Function"; 52 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
53 53
54#define DRV_VERSION "1.0.19-k0" 54#define DRV_VERSION "2.0.0-k2"
55const char ixgbevf_driver_version[] = DRV_VERSION; 55const char ixgbevf_driver_version[] = DRV_VERSION;
56static char ixgbevf_copyright[] = 56static char ixgbevf_copyright[] =
57 "Copyright (c) 2009 - 2010 Intel Corporation."; 57 "Copyright (c) 2009 - 2010 Intel Corporation.";
@@ -107,7 +107,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
107} 107}
108 108
109/* 109/*
110 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors 110 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
111 * @adapter: pointer to adapter struct 111 * @adapter: pointer to adapter struct
112 * @direction: 0 for Rx, 1 for Tx, -1 for other causes 112 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
113 * @queue: queue to map the corresponding interrupt to 113 * @queue: queue to map the corresponding interrupt to
@@ -162,42 +162,6 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
162 /* tx_buffer_info must be completely set up in the transmit path */ 162 /* tx_buffer_info must be completely set up in the transmit path */
163} 163}
164 164
165static inline bool ixgbevf_check_tx_hang(struct ixgbevf_adapter *adapter,
166 struct ixgbevf_ring *tx_ring,
167 unsigned int eop)
168{
169 struct ixgbe_hw *hw = &adapter->hw;
170 u32 head, tail;
171
172 /* Detect a transmit hang in hardware, this serializes the
173 * check with the clearing of time_stamp and movement of eop */
174 head = readl(hw->hw_addr + tx_ring->head);
175 tail = readl(hw->hw_addr + tx_ring->tail);
176 adapter->detect_tx_hung = false;
177 if ((head != tail) &&
178 tx_ring->tx_buffer_info[eop].time_stamp &&
179 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ)) {
180 /* detected Tx unit hang */
181 union ixgbe_adv_tx_desc *tx_desc;
182 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
183 printk(KERN_ERR "Detected Tx Unit Hang\n"
184 " Tx Queue <%d>\n"
185 " TDH, TDT <%x>, <%x>\n"
186 " next_to_use <%x>\n"
187 " next_to_clean <%x>\n"
188 "tx_buffer_info[next_to_clean]\n"
189 " time_stamp <%lx>\n"
190 " jiffies <%lx>\n",
191 tx_ring->queue_index,
192 head, tail,
193 tx_ring->next_to_use, eop,
194 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
195 return true;
196 }
197
198 return false;
199}
200
201#define IXGBE_MAX_TXD_PWR 14 165#define IXGBE_MAX_TXD_PWR 14
202#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) 166#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
203 167
@@ -293,16 +257,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
293#endif 257#endif
294 } 258 }
295 259
296 if (adapter->detect_tx_hung) {
297 if (ixgbevf_check_tx_hang(adapter, tx_ring, i)) {
298 /* schedule immediate reset if we believe we hung */
299 printk(KERN_INFO
300 "tx hang %d detected, resetting adapter\n",
301 adapter->tx_timeout_count + 1);
302 ixgbevf_tx_timeout(adapter->netdev);
303 }
304 }
305
306 /* re-arm the interrupt */ 260 /* re-arm the interrupt */
307 if ((count >= tx_ring->work_limit) && 261 if ((count >= tx_ring->work_limit) &&
308 (!test_bit(__IXGBEVF_DOWN, &adapter->state))) { 262 (!test_bit(__IXGBEVF_DOWN, &adapter->state))) {
@@ -334,7 +288,6 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
334 struct ixgbevf_adapter *adapter = q_vector->adapter; 288 struct ixgbevf_adapter *adapter = q_vector->adapter;
335 bool is_vlan = (status & IXGBE_RXD_STAT_VP); 289 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
336 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); 290 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
337 int ret;
338 291
339 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { 292 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
340 if (adapter->vlgrp && is_vlan) 293 if (adapter->vlgrp && is_vlan)
@@ -345,9 +298,9 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
345 napi_gro_receive(&q_vector->napi, skb); 298 napi_gro_receive(&q_vector->napi, skb);
346 } else { 299 } else {
347 if (adapter->vlgrp && is_vlan) 300 if (adapter->vlgrp && is_vlan)
348 ret = vlan_hwaccel_rx(skb, adapter->vlgrp, tag); 301 vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
349 else 302 else
350 ret = netif_rx(skb); 303 netif_rx(skb);
351 } 304 }
352} 305}
353 306
@@ -1017,7 +970,7 @@ static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
1017} 970}
1018 971
1019/** 972/**
1020 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues) 973 * ixgbevf_msix_clean_rx - single unshared vector rx clean (all queues)
1021 * @irq: unused 974 * @irq: unused
1022 * @data: pointer to our q_vector struct for this interrupt vector 975 * @data: pointer to our q_vector struct for this interrupt vector
1023 **/ 976 **/
@@ -1665,6 +1618,11 @@ static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1665 j = adapter->rx_ring[i].reg_idx; 1618 j = adapter->rx_ring[i].reg_idx;
1666 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); 1619 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1667 rxdctl |= IXGBE_RXDCTL_ENABLE; 1620 rxdctl |= IXGBE_RXDCTL_ENABLE;
1621 if (hw->mac.type == ixgbe_mac_X540_vf) {
1622 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1623 rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
1624 IXGBE_RXDCTL_RLPML_EN);
1625 }
1668 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); 1626 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1669 ixgbevf_rx_desc_queue_enable(adapter, i); 1627 ixgbevf_rx_desc_queue_enable(adapter, i);
1670 } 1628 }
@@ -1967,7 +1925,7 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1967} 1925}
1968 1926
1969/* 1927/*
1970 * ixgbe_set_num_queues: Allocate queues for device, feature dependant 1928 * ixgbevf_set_num_queues: Allocate queues for device, feature dependant
1971 * @adapter: board private structure to initialize 1929 * @adapter: board private structure to initialize
1972 * 1930 *
1973 * This is the top level queue allocation routine. The order here is very 1931 * This is the top level queue allocation routine. The order here is very
@@ -2216,7 +2174,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2216 2174
2217 hw->vendor_id = pdev->vendor; 2175 hw->vendor_id = pdev->vendor;
2218 hw->device_id = pdev->device; 2176 hw->device_id = pdev->device;
2219 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 2177 hw->revision_id = pdev->revision;
2220 hw->subsystem_vendor_id = pdev->subsystem_vendor; 2178 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2221 hw->subsystem_device_id = pdev->subsystem_device; 2179 hw->subsystem_device_id = pdev->subsystem_device;
2222 2180
@@ -2410,9 +2368,6 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
2410 10 : 1); 2368 10 : 1);
2411 netif_carrier_on(netdev); 2369 netif_carrier_on(netdev);
2412 netif_tx_wake_all_queues(netdev); 2370 netif_tx_wake_all_queues(netdev);
2413 } else {
2414 /* Force detection of hung controller */
2415 adapter->detect_tx_hung = true;
2416 } 2371 }
2417 } else { 2372 } else {
2418 adapter->link_up = false; 2373 adapter->link_up = false;
@@ -2427,9 +2382,6 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
2427 ixgbevf_update_stats(adapter); 2382 ixgbevf_update_stats(adapter);
2428 2383
2429pf_has_reset: 2384pf_has_reset:
2430 /* Force detection of hung controller every watchdog period */
2431 adapter->detect_tx_hung = true;
2432
2433 /* Reset the timer */ 2385 /* Reset the timer */
2434 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 2386 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2435 mod_timer(&adapter->watchdog_timer, 2387 mod_timer(&adapter->watchdog_timer,
@@ -3217,10 +3169,16 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3217static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) 3169static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3218{ 3170{
3219 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3171 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3172 struct ixgbe_hw *hw = &adapter->hw;
3220 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 3173 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3174 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
3175 u32 msg[2];
3176
3177 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
3178 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3221 3179
3222 /* MTU < 68 is an error and causes problems on some kernels */ 3180 /* MTU < 68 is an error and causes problems on some kernels */
3223 if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE)) 3181 if ((new_mtu < 68) || (max_frame > max_possible_frame))
3224 return -EINVAL; 3182 return -EINVAL;
3225 3183
3226 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n", 3184 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
@@ -3228,6 +3186,10 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3228 /* must set new MTU before calling down or up */ 3186 /* must set new MTU before calling down or up */
3229 netdev->mtu = new_mtu; 3187 netdev->mtu = new_mtu;
3230 3188
3189 msg[0] = IXGBE_VF_SET_LPE;
3190 msg[1] = max_frame;
3191 hw->mbx.ops.write_posted(hw, msg, 2);
3192
3231 if (netif_running(netdev)) 3193 if (netif_running(netdev))
3232 ixgbevf_reinit_locked(adapter); 3194 ixgbevf_reinit_locked(adapter);
3233 3195
@@ -3272,8 +3234,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
3272 3234
3273static void ixgbevf_assign_netdev_ops(struct net_device *dev) 3235static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3274{ 3236{
3275 struct ixgbevf_adapter *adapter;
3276 adapter = netdev_priv(dev);
3277 dev->netdev_ops = &ixgbe_netdev_ops; 3237 dev->netdev_ops = &ixgbe_netdev_ops;
3278 ixgbevf_set_ethtool_ops(dev); 3238 ixgbevf_set_ethtool_ops(dev);
3279 dev->watchdog_timeo = 5 * HZ; 3239 dev->watchdog_timeo = 5 * HZ;
@@ -3519,9 +3479,9 @@ static struct pci_driver ixgbevf_driver = {
3519}; 3479};
3520 3480
3521/** 3481/**
3522 * ixgbe_init_module - Driver Registration Routine 3482 * ixgbevf_init_module - Driver Registration Routine
3523 * 3483 *
3524 * ixgbe_init_module is the first routine called when the driver is 3484 * ixgbevf_init_module is the first routine called when the driver is
3525 * loaded. All it does is register with the PCI subsystem. 3485 * loaded. All it does is register with the PCI subsystem.
3526 **/ 3486 **/
3527static int __init ixgbevf_init_module(void) 3487static int __init ixgbevf_init_module(void)
@@ -3539,9 +3499,9 @@ static int __init ixgbevf_init_module(void)
3539module_init(ixgbevf_init_module); 3499module_init(ixgbevf_init_module);
3540 3500
3541/** 3501/**
3542 * ixgbe_exit_module - Driver Exit Cleanup Routine 3502 * ixgbevf_exit_module - Driver Exit Cleanup Routine
3543 * 3503 *
3544 * ixgbe_exit_module is called just before the driver is removed 3504 * ixgbevf_exit_module is called just before the driver is removed
3545 * from memory. 3505 * from memory.
3546 **/ 3506 **/
3547static void __exit ixgbevf_exit_module(void) 3507static void __exit ixgbevf_exit_module(void)
@@ -3551,7 +3511,7 @@ static void __exit ixgbevf_exit_module(void)
3551 3511
3552#ifdef DEBUG 3512#ifdef DEBUG
3553/** 3513/**
3554 * ixgbe_get_hw_dev_name - return device name string 3514 * ixgbevf_get_hw_dev_name - return device name string
3555 * used by hardware layer to print debugging information 3515 * used by hardware layer to print debugging information
3556 **/ 3516 **/
3557char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) 3517char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
diff --git a/drivers/net/ixgbevf/regs.h b/drivers/net/ixgbevf/regs.h
index fb80ca1bcc93..189200eeca26 100644
--- a/drivers/net/ixgbevf/regs.h
+++ b/drivers/net/ixgbevf/regs.h
@@ -31,7 +31,7 @@
31#define IXGBE_VFCTRL 0x00000 31#define IXGBE_VFCTRL 0x00000
32#define IXGBE_VFSTATUS 0x00008 32#define IXGBE_VFSTATUS 0x00008
33#define IXGBE_VFLINKS 0x00010 33#define IXGBE_VFLINKS 0x00010
34#define IXGBE_VFRTIMER 0x00048 34#define IXGBE_VFFRTIMER 0x00048
35#define IXGBE_VFRXMEMWRAP 0x03190 35#define IXGBE_VFRXMEMWRAP 0x03190
36#define IXGBE_VTEICR 0x00100 36#define IXGBE_VTEICR 0x00100
37#define IXGBE_VTEICS 0x00104 37#define IXGBE_VTEICS 0x00104
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index e97ebef3cf47..f690474f4409 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -161,6 +161,67 @@ jme_setup_wakeup_frame(struct jme_adapter *jme,
161} 161}
162 162
163static inline void 163static inline void
164jme_mac_rxclk_off(struct jme_adapter *jme)
165{
166 jme->reg_gpreg1 |= GPREG1_RXCLKOFF;
167 jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
168}
169
170static inline void
171jme_mac_rxclk_on(struct jme_adapter *jme)
172{
173 jme->reg_gpreg1 &= ~GPREG1_RXCLKOFF;
174 jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
175}
176
177static inline void
178jme_mac_txclk_off(struct jme_adapter *jme)
179{
180 jme->reg_ghc &= ~(GHC_TO_CLK_SRC | GHC_TXMAC_CLK_SRC);
181 jwrite32f(jme, JME_GHC, jme->reg_ghc);
182}
183
184static inline void
185jme_mac_txclk_on(struct jme_adapter *jme)
186{
187 u32 speed = jme->reg_ghc & GHC_SPEED;
188 if (speed == GHC_SPEED_1000M)
189 jme->reg_ghc |= GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
190 else
191 jme->reg_ghc |= GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
192 jwrite32f(jme, JME_GHC, jme->reg_ghc);
193}
194
195static inline void
196jme_reset_ghc_speed(struct jme_adapter *jme)
197{
198 jme->reg_ghc &= ~(GHC_SPEED | GHC_DPX);
199 jwrite32f(jme, JME_GHC, jme->reg_ghc);
200}
201
202static inline void
203jme_reset_250A2_workaround(struct jme_adapter *jme)
204{
205 jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
206 GPREG1_RSSPATCH);
207 jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
208}
209
210static inline void
211jme_assert_ghc_reset(struct jme_adapter *jme)
212{
213 jme->reg_ghc |= GHC_SWRST;
214 jwrite32f(jme, JME_GHC, jme->reg_ghc);
215}
216
217static inline void
218jme_clear_ghc_reset(struct jme_adapter *jme)
219{
220 jme->reg_ghc &= ~GHC_SWRST;
221 jwrite32f(jme, JME_GHC, jme->reg_ghc);
222}
223
224static inline void
164jme_reset_mac_processor(struct jme_adapter *jme) 225jme_reset_mac_processor(struct jme_adapter *jme)
165{ 226{
166 static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0}; 227 static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
@@ -168,9 +229,24 @@ jme_reset_mac_processor(struct jme_adapter *jme)
168 u32 gpreg0; 229 u32 gpreg0;
169 int i; 230 int i;
170 231
171 jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST); 232 jme_reset_ghc_speed(jme);
172 udelay(2); 233 jme_reset_250A2_workaround(jme);
173 jwrite32(jme, JME_GHC, jme->reg_ghc); 234
235 jme_mac_rxclk_on(jme);
236 jme_mac_txclk_on(jme);
237 udelay(1);
238 jme_assert_ghc_reset(jme);
239 udelay(1);
240 jme_mac_rxclk_off(jme);
241 jme_mac_txclk_off(jme);
242 udelay(1);
243 jme_clear_ghc_reset(jme);
244 udelay(1);
245 jme_mac_rxclk_on(jme);
246 jme_mac_txclk_on(jme);
247 udelay(1);
248 jme_mac_rxclk_off(jme);
249 jme_mac_txclk_off(jme);
174 250
175 jwrite32(jme, JME_RXDBA_LO, 0x00000000); 251 jwrite32(jme, JME_RXDBA_LO, 0x00000000);
176 jwrite32(jme, JME_RXDBA_HI, 0x00000000); 252 jwrite32(jme, JME_RXDBA_HI, 0x00000000);
@@ -190,14 +266,6 @@ jme_reset_mac_processor(struct jme_adapter *jme)
190 else 266 else
191 gpreg0 = GPREG0_DEFAULT; 267 gpreg0 = GPREG0_DEFAULT;
192 jwrite32(jme, JME_GPREG0, gpreg0); 268 jwrite32(jme, JME_GPREG0, gpreg0);
193 jwrite32(jme, JME_GPREG1, GPREG1_DEFAULT);
194}
195
196static inline void
197jme_reset_ghc_speed(struct jme_adapter *jme)
198{
199 jme->reg_ghc &= ~(GHC_SPEED_1000M | GHC_DPX);
200 jwrite32(jme, JME_GHC, jme->reg_ghc);
201} 269}
202 270
203static inline void 271static inline void
@@ -336,13 +404,13 @@ jme_linkstat_from_phy(struct jme_adapter *jme)
336} 404}
337 405
338static inline void 406static inline void
339jme_set_phyfifoa(struct jme_adapter *jme) 407jme_set_phyfifo_5level(struct jme_adapter *jme)
340{ 408{
341 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004); 409 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004);
342} 410}
343 411
344static inline void 412static inline void
345jme_set_phyfifob(struct jme_adapter *jme) 413jme_set_phyfifo_8level(struct jme_adapter *jme)
346{ 414{
347 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000); 415 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000);
348} 416}
@@ -351,7 +419,7 @@ static int
351jme_check_link(struct net_device *netdev, int testonly) 419jme_check_link(struct net_device *netdev, int testonly)
352{ 420{
353 struct jme_adapter *jme = netdev_priv(netdev); 421 struct jme_adapter *jme = netdev_priv(netdev);
354 u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr, gpreg1; 422 u32 phylink, cnt = JME_SPDRSV_TIMEOUT, bmcr;
355 char linkmsg[64]; 423 char linkmsg[64];
356 int rc = 0; 424 int rc = 0;
357 425
@@ -414,23 +482,21 @@ jme_check_link(struct net_device *netdev, int testonly)
414 482
415 jme->phylink = phylink; 483 jme->phylink = phylink;
416 484
417 ghc = jme->reg_ghc & ~(GHC_SPEED | GHC_DPX | 485 /*
418 GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE | 486 * The speed/duplex setting of jme->reg_ghc already cleared
419 GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY); 487 * by jme_reset_mac_processor()
488 */
420 switch (phylink & PHY_LINK_SPEED_MASK) { 489 switch (phylink & PHY_LINK_SPEED_MASK) {
421 case PHY_LINK_SPEED_10M: 490 case PHY_LINK_SPEED_10M:
422 ghc |= GHC_SPEED_10M | 491 jme->reg_ghc |= GHC_SPEED_10M;
423 GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
424 strcat(linkmsg, "10 Mbps, "); 492 strcat(linkmsg, "10 Mbps, ");
425 break; 493 break;
426 case PHY_LINK_SPEED_100M: 494 case PHY_LINK_SPEED_100M:
427 ghc |= GHC_SPEED_100M | 495 jme->reg_ghc |= GHC_SPEED_100M;
428 GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
429 strcat(linkmsg, "100 Mbps, "); 496 strcat(linkmsg, "100 Mbps, ");
430 break; 497 break;
431 case PHY_LINK_SPEED_1000M: 498 case PHY_LINK_SPEED_1000M:
432 ghc |= GHC_SPEED_1000M | 499 jme->reg_ghc |= GHC_SPEED_1000M;
433 GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
434 strcat(linkmsg, "1000 Mbps, "); 500 strcat(linkmsg, "1000 Mbps, ");
435 break; 501 break;
436 default: 502 default:
@@ -439,42 +505,40 @@ jme_check_link(struct net_device *netdev, int testonly)
439 505
440 if (phylink & PHY_LINK_DUPLEX) { 506 if (phylink & PHY_LINK_DUPLEX) {
441 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT); 507 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
442 ghc |= GHC_DPX; 508 jwrite32(jme, JME_TXTRHD, TXTRHD_FULLDUPLEX);
509 jme->reg_ghc |= GHC_DPX;
443 } else { 510 } else {
444 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT | 511 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
445 TXMCS_BACKOFF | 512 TXMCS_BACKOFF |
446 TXMCS_CARRIERSENSE | 513 TXMCS_CARRIERSENSE |
447 TXMCS_COLLISION); 514 TXMCS_COLLISION);
448 jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN | 515 jwrite32(jme, JME_TXTRHD, TXTRHD_HALFDUPLEX);
449 ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
450 TXTRHD_TXREN |
451 ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
452 } 516 }
453 517
454 gpreg1 = GPREG1_DEFAULT; 518 jwrite32(jme, JME_GHC, jme->reg_ghc);
519
455 if (is_buggy250(jme->pdev->device, jme->chiprev)) { 520 if (is_buggy250(jme->pdev->device, jme->chiprev)) {
521 jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
522 GPREG1_RSSPATCH);
456 if (!(phylink & PHY_LINK_DUPLEX)) 523 if (!(phylink & PHY_LINK_DUPLEX))
457 gpreg1 |= GPREG1_HALFMODEPATCH; 524 jme->reg_gpreg1 |= GPREG1_HALFMODEPATCH;
458 switch (phylink & PHY_LINK_SPEED_MASK) { 525 switch (phylink & PHY_LINK_SPEED_MASK) {
459 case PHY_LINK_SPEED_10M: 526 case PHY_LINK_SPEED_10M:
460 jme_set_phyfifoa(jme); 527 jme_set_phyfifo_8level(jme);
461 gpreg1 |= GPREG1_RSSPATCH; 528 jme->reg_gpreg1 |= GPREG1_RSSPATCH;
462 break; 529 break;
463 case PHY_LINK_SPEED_100M: 530 case PHY_LINK_SPEED_100M:
464 jme_set_phyfifob(jme); 531 jme_set_phyfifo_5level(jme);
465 gpreg1 |= GPREG1_RSSPATCH; 532 jme->reg_gpreg1 |= GPREG1_RSSPATCH;
466 break; 533 break;
467 case PHY_LINK_SPEED_1000M: 534 case PHY_LINK_SPEED_1000M:
468 jme_set_phyfifoa(jme); 535 jme_set_phyfifo_8level(jme);
469 break; 536 break;
470 default: 537 default:
471 break; 538 break;
472 } 539 }
473 } 540 }
474 541 jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
475 jwrite32(jme, JME_GPREG1, gpreg1);
476 jwrite32(jme, JME_GHC, ghc);
477 jme->reg_ghc = ghc;
478 542
479 strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ? 543 strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ?
480 "Full-Duplex, " : 544 "Full-Duplex, " :
@@ -613,10 +677,14 @@ jme_enable_tx_engine(struct jme_adapter *jme)
613 * Enable TX Engine 677 * Enable TX Engine
614 */ 678 */
615 wmb(); 679 wmb();
616 jwrite32(jme, JME_TXCS, jme->reg_txcs | 680 jwrite32f(jme, JME_TXCS, jme->reg_txcs |
617 TXCS_SELECT_QUEUE0 | 681 TXCS_SELECT_QUEUE0 |
618 TXCS_ENABLE); 682 TXCS_ENABLE);
619 683
684 /*
685 * Start clock for TX MAC Processor
686 */
687 jme_mac_txclk_on(jme);
620} 688}
621 689
622static inline void 690static inline void
@@ -651,6 +719,11 @@ jme_disable_tx_engine(struct jme_adapter *jme)
651 719
652 if (!i) 720 if (!i)
653 pr_err("Disable TX engine timeout\n"); 721 pr_err("Disable TX engine timeout\n");
722
723 /*
724 * Stop clock for TX MAC Processor
725 */
726 jme_mac_txclk_off(jme);
654} 727}
655 728
656static void 729static void
@@ -825,16 +898,22 @@ jme_enable_rx_engine(struct jme_adapter *jme)
825 /* 898 /*
826 * Setup Unicast Filter 899 * Setup Unicast Filter
827 */ 900 */
901 jme_set_unicastaddr(jme->dev);
828 jme_set_multi(jme->dev); 902 jme_set_multi(jme->dev);
829 903
830 /* 904 /*
831 * Enable RX Engine 905 * Enable RX Engine
832 */ 906 */
833 wmb(); 907 wmb();
834 jwrite32(jme, JME_RXCS, jme->reg_rxcs | 908 jwrite32f(jme, JME_RXCS, jme->reg_rxcs |
835 RXCS_QUEUESEL_Q0 | 909 RXCS_QUEUESEL_Q0 |
836 RXCS_ENABLE | 910 RXCS_ENABLE |
837 RXCS_QST); 911 RXCS_QST);
912
913 /*
914 * Start clock for RX MAC Processor
915 */
916 jme_mac_rxclk_on(jme);
838} 917}
839 918
840static inline void 919static inline void
@@ -871,10 +950,40 @@ jme_disable_rx_engine(struct jme_adapter *jme)
871 if (!i) 950 if (!i)
872 pr_err("Disable RX engine timeout\n"); 951 pr_err("Disable RX engine timeout\n");
873 952
953 /*
954 * Stop clock for RX MAC Processor
955 */
956 jme_mac_rxclk_off(jme);
957}
958
959static u16
960jme_udpsum(struct sk_buff *skb)
961{
962 u16 csum = 0xFFFFu;
963
964 if (skb->len < (ETH_HLEN + sizeof(struct iphdr)))
965 return csum;
966 if (skb->protocol != htons(ETH_P_IP))
967 return csum;
968 skb_set_network_header(skb, ETH_HLEN);
969 if ((ip_hdr(skb)->protocol != IPPROTO_UDP) ||
970 (skb->len < (ETH_HLEN +
971 (ip_hdr(skb)->ihl << 2) +
972 sizeof(struct udphdr)))) {
973 skb_reset_network_header(skb);
974 return csum;
975 }
976 skb_set_transport_header(skb,
977 ETH_HLEN + (ip_hdr(skb)->ihl << 2));
978 csum = udp_hdr(skb)->check;
979 skb_reset_transport_header(skb);
980 skb_reset_network_header(skb);
981
982 return csum;
874} 983}
875 984
876static int 985static int
877jme_rxsum_ok(struct jme_adapter *jme, u16 flags) 986jme_rxsum_ok(struct jme_adapter *jme, u16 flags, struct sk_buff *skb)
878{ 987{
879 if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4))) 988 if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
880 return false; 989 return false;
@@ -887,7 +996,7 @@ jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
887 } 996 }
888 997
889 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS)) 998 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
890 == RXWBFLAG_UDPON)) { 999 == RXWBFLAG_UDPON) && jme_udpsum(skb)) {
891 if (flags & RXWBFLAG_IPV4) 1000 if (flags & RXWBFLAG_IPV4)
892 netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n"); 1001 netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n");
893 return false; 1002 return false;
@@ -935,7 +1044,7 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
935 skb_put(skb, framesize); 1044 skb_put(skb, framesize);
936 skb->protocol = eth_type_trans(skb, jme->dev); 1045 skb->protocol = eth_type_trans(skb, jme->dev);
937 1046
938 if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags))) 1047 if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb))
939 skb->ip_summed = CHECKSUM_UNNECESSARY; 1048 skb->ip_summed = CHECKSUM_UNNECESSARY;
940 else 1049 else
941 skb_checksum_none_assert(skb); 1050 skb_checksum_none_assert(skb);
@@ -1207,7 +1316,6 @@ jme_link_change_tasklet(unsigned long arg)
1207 tasklet_disable(&jme->rxempty_task); 1316 tasklet_disable(&jme->rxempty_task);
1208 1317
1209 if (netif_carrier_ok(netdev)) { 1318 if (netif_carrier_ok(netdev)) {
1210 jme_reset_ghc_speed(jme);
1211 jme_disable_rx_engine(jme); 1319 jme_disable_rx_engine(jme);
1212 jme_disable_tx_engine(jme); 1320 jme_disable_tx_engine(jme);
1213 jme_reset_mac_processor(jme); 1321 jme_reset_mac_processor(jme);
@@ -1577,6 +1685,38 @@ jme_free_irq(struct jme_adapter *jme)
1577} 1685}
1578 1686
1579static inline void 1687static inline void
1688jme_new_phy_on(struct jme_adapter *jme)
1689{
1690 u32 reg;
1691
1692 reg = jread32(jme, JME_PHY_PWR);
1693 reg &= ~(PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
1694 PHY_PWR_DWN2 | PHY_PWR_CLKSEL);
1695 jwrite32(jme, JME_PHY_PWR, reg);
1696
1697 pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
1698 reg &= ~PE1_GPREG0_PBG;
1699 reg |= PE1_GPREG0_ENBG;
1700 pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
1701}
1702
1703static inline void
1704jme_new_phy_off(struct jme_adapter *jme)
1705{
1706 u32 reg;
1707
1708 reg = jread32(jme, JME_PHY_PWR);
1709 reg |= PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
1710 PHY_PWR_DWN2 | PHY_PWR_CLKSEL;
1711 jwrite32(jme, JME_PHY_PWR, reg);
1712
1713 pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
1714 reg &= ~PE1_GPREG0_PBG;
1715 reg |= PE1_GPREG0_PDD3COLD;
1716 pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
1717}
1718
1719static inline void
1580jme_phy_on(struct jme_adapter *jme) 1720jme_phy_on(struct jme_adapter *jme)
1581{ 1721{
1582 u32 bmcr; 1722 u32 bmcr;
@@ -1584,6 +1724,22 @@ jme_phy_on(struct jme_adapter *jme)
1584 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); 1724 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1585 bmcr &= ~BMCR_PDOWN; 1725 bmcr &= ~BMCR_PDOWN;
1586 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); 1726 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1727
1728 if (new_phy_power_ctrl(jme->chip_main_rev))
1729 jme_new_phy_on(jme);
1730}
1731
1732static inline void
1733jme_phy_off(struct jme_adapter *jme)
1734{
1735 u32 bmcr;
1736
1737 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1738 bmcr |= BMCR_PDOWN;
1739 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1740
1741 if (new_phy_power_ctrl(jme->chip_main_rev))
1742 jme_new_phy_off(jme);
1587} 1743}
1588 1744
1589static int 1745static int
@@ -1606,12 +1762,11 @@ jme_open(struct net_device *netdev)
1606 1762
1607 jme_start_irq(jme); 1763 jme_start_irq(jme);
1608 1764
1609 if (test_bit(JME_FLAG_SSET, &jme->flags)) { 1765 jme_phy_on(jme);
1610 jme_phy_on(jme); 1766 if (test_bit(JME_FLAG_SSET, &jme->flags))
1611 jme_set_settings(netdev, &jme->old_ecmd); 1767 jme_set_settings(netdev, &jme->old_ecmd);
1612 } else { 1768 else
1613 jme_reset_phy_processor(jme); 1769 jme_reset_phy_processor(jme);
1614 }
1615 1770
1616 jme_reset_link(jme); 1771 jme_reset_link(jme);
1617 1772
@@ -1657,12 +1812,6 @@ jme_wait_link(struct jme_adapter *jme)
1657 } 1812 }
1658} 1813}
1659 1814
1660static inline void
1661jme_phy_off(struct jme_adapter *jme)
1662{
1663 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
1664}
1665
1666static void 1815static void
1667jme_powersave_phy(struct jme_adapter *jme) 1816jme_powersave_phy(struct jme_adapter *jme)
1668{ 1817{
@@ -1696,7 +1845,6 @@ jme_close(struct net_device *netdev)
1696 tasklet_disable(&jme->rxclean_task); 1845 tasklet_disable(&jme->rxclean_task);
1697 tasklet_disable(&jme->rxempty_task); 1846 tasklet_disable(&jme->rxempty_task);
1698 1847
1699 jme_reset_ghc_speed(jme);
1700 jme_disable_rx_engine(jme); 1848 jme_disable_rx_engine(jme);
1701 jme_disable_tx_engine(jme); 1849 jme_disable_tx_engine(jme);
1702 jme_reset_mac_processor(jme); 1850 jme_reset_mac_processor(jme);
@@ -1993,27 +2141,34 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1993 return NETDEV_TX_OK; 2141 return NETDEV_TX_OK;
1994} 2142}
1995 2143
2144static void
2145jme_set_unicastaddr(struct net_device *netdev)
2146{
2147 struct jme_adapter *jme = netdev_priv(netdev);
2148 u32 val;
2149
2150 val = (netdev->dev_addr[3] & 0xff) << 24 |
2151 (netdev->dev_addr[2] & 0xff) << 16 |
2152 (netdev->dev_addr[1] & 0xff) << 8 |
2153 (netdev->dev_addr[0] & 0xff);
2154 jwrite32(jme, JME_RXUMA_LO, val);
2155 val = (netdev->dev_addr[5] & 0xff) << 8 |
2156 (netdev->dev_addr[4] & 0xff);
2157 jwrite32(jme, JME_RXUMA_HI, val);
2158}
2159
1996static int 2160static int
1997jme_set_macaddr(struct net_device *netdev, void *p) 2161jme_set_macaddr(struct net_device *netdev, void *p)
1998{ 2162{
1999 struct jme_adapter *jme = netdev_priv(netdev); 2163 struct jme_adapter *jme = netdev_priv(netdev);
2000 struct sockaddr *addr = p; 2164 struct sockaddr *addr = p;
2001 u32 val;
2002 2165
2003 if (netif_running(netdev)) 2166 if (netif_running(netdev))
2004 return -EBUSY; 2167 return -EBUSY;
2005 2168
2006 spin_lock_bh(&jme->macaddr_lock); 2169 spin_lock_bh(&jme->macaddr_lock);
2007 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2170 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2008 2171 jme_set_unicastaddr(netdev);
2009 val = (addr->sa_data[3] & 0xff) << 24 |
2010 (addr->sa_data[2] & 0xff) << 16 |
2011 (addr->sa_data[1] & 0xff) << 8 |
2012 (addr->sa_data[0] & 0xff);
2013 jwrite32(jme, JME_RXUMA_LO, val);
2014 val = (addr->sa_data[5] & 0xff) << 8 |
2015 (addr->sa_data[4] & 0xff);
2016 jwrite32(jme, JME_RXUMA_HI, val);
2017 spin_unlock_bh(&jme->macaddr_lock); 2172 spin_unlock_bh(&jme->macaddr_lock);
2018 2173
2019 return 0; 2174 return 0;
@@ -2731,6 +2886,8 @@ jme_check_hw_ver(struct jme_adapter *jme)
2731 2886
2732 jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT; 2887 jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
2733 jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT; 2888 jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT;
2889 jme->chip_main_rev = jme->chiprev & 0xF;
2890 jme->chip_sub_rev = (jme->chiprev >> 4) & 0xF;
2734} 2891}
2735 2892
2736static const struct net_device_ops jme_netdev_ops = { 2893static const struct net_device_ops jme_netdev_ops = {
@@ -2880,6 +3037,7 @@ jme_init_one(struct pci_dev *pdev,
2880 jme->reg_rxmcs = RXMCS_DEFAULT; 3037 jme->reg_rxmcs = RXMCS_DEFAULT;
2881 jme->reg_txpfc = 0; 3038 jme->reg_txpfc = 0;
2882 jme->reg_pmcs = PMCS_MFEN; 3039 jme->reg_pmcs = PMCS_MFEN;
3040 jme->reg_gpreg1 = GPREG1_DEFAULT;
2883 set_bit(JME_FLAG_TXCSUM, &jme->flags); 3041 set_bit(JME_FLAG_TXCSUM, &jme->flags);
2884 set_bit(JME_FLAG_TSO, &jme->flags); 3042 set_bit(JME_FLAG_TSO, &jme->flags);
2885 3043
@@ -2936,8 +3094,8 @@ jme_init_one(struct pci_dev *pdev,
2936 jme->mii_if.mdio_write = jme_mdio_write; 3094 jme->mii_if.mdio_write = jme_mdio_write;
2937 3095
2938 jme_clear_pm(jme); 3096 jme_clear_pm(jme);
2939 jme_set_phyfifoa(jme); 3097 jme_set_phyfifo_5level(jme);
2940 pci_read_config_byte(pdev, PCI_REVISION_ID, &jme->rev); 3098 jme->pcirev = pdev->revision;
2941 if (!jme->fpgaver) 3099 if (!jme->fpgaver)
2942 jme_phy_init(jme); 3100 jme_phy_init(jme);
2943 jme_phy_off(jme); 3101 jme_phy_off(jme);
@@ -2964,14 +3122,14 @@ jme_init_one(struct pci_dev *pdev,
2964 goto err_out_unmap; 3122 goto err_out_unmap;
2965 } 3123 }
2966 3124
2967 netif_info(jme, probe, jme->dev, "%s%s ver:%x rev:%x macaddr:%pM\n", 3125 netif_info(jme, probe, jme->dev, "%s%s chiprev:%x pcirev:%x macaddr:%pM\n",
2968 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ? 3126 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
2969 "JMC250 Gigabit Ethernet" : 3127 "JMC250 Gigabit Ethernet" :
2970 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ? 3128 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
2971 "JMC260 Fast Ethernet" : "Unknown", 3129 "JMC260 Fast Ethernet" : "Unknown",
2972 (jme->fpgaver != 0) ? " (FPGA)" : "", 3130 (jme->fpgaver != 0) ? " (FPGA)" : "",
2973 (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev, 3131 (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
2974 jme->rev, netdev->dev_addr); 3132 jme->pcirev, netdev->dev_addr);
2975 3133
2976 return 0; 3134 return 0;
2977 3135
@@ -3035,7 +3193,6 @@ jme_suspend(struct pci_dev *pdev, pm_message_t state)
3035 jme_polling_mode(jme); 3193 jme_polling_mode(jme);
3036 3194
3037 jme_stop_pcc_timer(jme); 3195 jme_stop_pcc_timer(jme);
3038 jme_reset_ghc_speed(jme);
3039 jme_disable_rx_engine(jme); 3196 jme_disable_rx_engine(jme);
3040 jme_disable_tx_engine(jme); 3197 jme_disable_tx_engine(jme);
3041 jme_reset_mac_processor(jme); 3198 jme_reset_mac_processor(jme);
@@ -3066,12 +3223,11 @@ jme_resume(struct pci_dev *pdev)
3066 jme_clear_pm(jme); 3223 jme_clear_pm(jme);
3067 pci_restore_state(pdev); 3224 pci_restore_state(pdev);
3068 3225
3069 if (test_bit(JME_FLAG_SSET, &jme->flags)) { 3226 jme_phy_on(jme);
3070 jme_phy_on(jme); 3227 if (test_bit(JME_FLAG_SSET, &jme->flags))
3071 jme_set_settings(netdev, &jme->old_ecmd); 3228 jme_set_settings(netdev, &jme->old_ecmd);
3072 } else { 3229 else
3073 jme_reset_phy_processor(jme); 3230 jme_reset_phy_processor(jme);
3074 }
3075 3231
3076 jme_start_irq(jme); 3232 jme_start_irq(jme);
3077 netif_device_attach(netdev); 3233 netif_device_attach(netdev);
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index eac09264bf2a..8bf30451e821 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -26,7 +26,7 @@
26#define __JME_H_INCLUDED__ 26#define __JME_H_INCLUDED__
27 27
28#define DRV_NAME "jme" 28#define DRV_NAME "jme"
29#define DRV_VERSION "1.0.7" 29#define DRV_VERSION "1.0.8"
30#define PFX DRV_NAME ": " 30#define PFX DRV_NAME ": "
31 31
32#define PCI_DEVICE_ID_JMICRON_JMC250 0x0250 32#define PCI_DEVICE_ID_JMICRON_JMC250 0x0250
@@ -103,6 +103,37 @@ enum jme_spi_op_bits {
103#define HALF_US 500 /* 500 ns */ 103#define HALF_US 500 /* 500 ns */
104#define JMESPIIOCTL SIOCDEVPRIVATE 104#define JMESPIIOCTL SIOCDEVPRIVATE
105 105
106#define PCI_PRIV_PE1 0xE4
107
108enum pci_priv_pe1_bit_masks {
109 PE1_ASPMSUPRT = 0x00000003, /*
110 * RW:
111 * Aspm_support[1:0]
112 * (R/W Port of 5C[11:10])
113 */
114 PE1_MULTIFUN = 0x00000004, /* RW: Multi_fun_bit */
115 PE1_RDYDMA = 0x00000008, /* RO: ~link.rdy_for_dma */
116 PE1_ASPMOPTL = 0x00000030, /* RW: link.rx10s_option[1:0] */
117 PE1_ASPMOPTH = 0x000000C0, /* RW: 10_req=[3]?HW:[2] */
118 PE1_GPREG0 = 0x0000FF00, /*
119 * SRW:
120 * Cfg_gp_reg0
121 * [7:6] phy_giga BG control
122 * [5] CREQ_N as CREQ_N1 (CPPE# as CREQ#)
123 * [4:0] Reserved
124 */
125 PE1_GPREG0_PBG = 0x0000C000, /* phy_giga BG control */
126 PE1_GPREG1 = 0x00FF0000, /* RW: Cfg_gp_reg1 */
127 PE1_REVID = 0xFF000000, /* RO: Rev ID */
128};
129
130enum pci_priv_pe1_values {
131 PE1_GPREG0_ENBG = 0x00000000, /* en BG */
132 PE1_GPREG0_PDD3COLD = 0x00004000, /* giga_PD + d3cold */
133 PE1_GPREG0_PDPCIESD = 0x00008000, /* giga_PD + pcie_shutdown */
134 PE1_GPREG0_PDPCIEIDDQ = 0x0000C000, /* giga_PD + pcie_iddq */
135};
136
106/* 137/*
107 * Dynamic(adaptive)/Static PCC values 138 * Dynamic(adaptive)/Static PCC values
108 */ 139 */
@@ -403,6 +434,7 @@ struct jme_adapter {
403 u32 reg_rxmcs; 434 u32 reg_rxmcs;
404 u32 reg_ghc; 435 u32 reg_ghc;
405 u32 reg_pmcs; 436 u32 reg_pmcs;
437 u32 reg_gpreg1;
406 u32 phylink; 438 u32 phylink;
407 u32 tx_ring_size; 439 u32 tx_ring_size;
408 u32 tx_ring_mask; 440 u32 tx_ring_mask;
@@ -411,8 +443,10 @@ struct jme_adapter {
411 u32 rx_ring_mask; 443 u32 rx_ring_mask;
412 u8 mrrs; 444 u8 mrrs;
413 unsigned int fpgaver; 445 unsigned int fpgaver;
414 unsigned int chiprev; 446 u8 chiprev;
415 u8 rev; 447 u8 chip_main_rev;
448 u8 chip_sub_rev;
449 u8 pcirev;
416 u32 msg_enable; 450 u32 msg_enable;
417 struct ethtool_cmd old_ecmd; 451 struct ethtool_cmd old_ecmd;
418 unsigned int old_mtu; 452 unsigned int old_mtu;
@@ -497,6 +531,7 @@ enum jme_iomap_regs {
497 JME_PMCS = JME_MAC | 0x60, /* Power Management Control/Stat */ 531 JME_PMCS = JME_MAC | 0x60, /* Power Management Control/Stat */
498 532
499 533
534 JME_PHY_PWR = JME_PHY | 0x24, /* New PHY Power Ctrl Register */
500 JME_PHY_CS = JME_PHY | 0x28, /* PHY Ctrl and Status Register */ 535 JME_PHY_CS = JME_PHY | 0x28, /* PHY Ctrl and Status Register */
501 JME_PHY_LINK = JME_PHY | 0x30, /* PHY Link Status Register */ 536 JME_PHY_LINK = JME_PHY | 0x30, /* PHY Link Status Register */
502 JME_SMBCSR = JME_PHY | 0x40, /* SMB Control and Status */ 537 JME_SMBCSR = JME_PHY | 0x40, /* SMB Control and Status */
@@ -624,6 +659,14 @@ enum jme_txtrhd_shifts {
624 TXTRHD_TXRL_SHIFT = 0, 659 TXTRHD_TXRL_SHIFT = 0,
625}; 660};
626 661
662enum jme_txtrhd_values {
663 TXTRHD_FULLDUPLEX = 0x00000000,
664 TXTRHD_HALFDUPLEX = TXTRHD_TXPEN |
665 ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
666 TXTRHD_TXREN |
667 ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL),
668};
669
627/* 670/*
628 * RX Control/Status Bits 671 * RX Control/Status Bits
629 */ 672 */
@@ -779,6 +822,8 @@ static inline u32 smi_phy_addr(int x)
779 */ 822 */
780enum jme_ghc_bit_mask { 823enum jme_ghc_bit_mask {
781 GHC_SWRST = 0x40000000, 824 GHC_SWRST = 0x40000000,
825 GHC_TO_CLK_SRC = 0x00C00000,
826 GHC_TXMAC_CLK_SRC = 0x00300000,
782 GHC_DPX = 0x00000040, 827 GHC_DPX = 0x00000040,
783 GHC_SPEED = 0x00000030, 828 GHC_SPEED = 0x00000030,
784 GHC_LINK_POLL = 0x00000001, 829 GHC_LINK_POLL = 0x00000001,
@@ -833,6 +878,21 @@ enum jme_pmcs_bit_masks {
833}; 878};
834 879
835/* 880/*
881 * New PHY Power Control Register
882 */
883enum jme_phy_pwr_bit_masks {
884 PHY_PWR_DWN1SEL = 0x01000000, /* Phy_giga.p_PWR_DOWN1_SEL */
885 PHY_PWR_DWN1SW = 0x02000000, /* Phy_giga.p_PWR_DOWN1_SW */
886 PHY_PWR_DWN2 = 0x04000000, /* Phy_giga.p_PWR_DOWN2 */
887 PHY_PWR_CLKSEL = 0x08000000, /*
888 * XTL_OUT Clock select
889 * (an internal free-running clock)
890 * 0: xtl_out = phy_giga.A_XTL25_O
891 * 1: xtl_out = phy_giga.PD_OSC
892 */
893};
894
895/*
836 * Giga PHY Status Registers 896 * Giga PHY Status Registers
837 */ 897 */
838enum jme_phy_link_bit_mask { 898enum jme_phy_link_bit_mask {
@@ -942,18 +1002,17 @@ enum jme_gpreg0_vals {
942 1002
943/* 1003/*
944 * General Purpose REG-1 1004 * General Purpose REG-1
945 * Note: All theses bits defined here are for
946 * Chip mode revision 0x11 only
947 */ 1005 */
948enum jme_gpreg1_masks { 1006enum jme_gpreg1_bit_masks {
1007 GPREG1_RXCLKOFF = 0x04000000,
1008 GPREG1_PCREQN = 0x00020000,
1009 GPREG1_HALFMODEPATCH = 0x00000040, /* For Chip revision 0x11 only */
1010 GPREG1_RSSPATCH = 0x00000020, /* For Chip revision 0x11 only */
949 GPREG1_INTRDELAYUNIT = 0x00000018, 1011 GPREG1_INTRDELAYUNIT = 0x00000018,
950 GPREG1_INTRDELAYENABLE = 0x00000007, 1012 GPREG1_INTRDELAYENABLE = 0x00000007,
951}; 1013};
952 1014
953enum jme_gpreg1_vals { 1015enum jme_gpreg1_vals {
954 GPREG1_RSSPATCH = 0x00000040,
955 GPREG1_HALFMODEPATCH = 0x00000020,
956
957 GPREG1_INTDLYUNIT_16NS = 0x00000000, 1016 GPREG1_INTDLYUNIT_16NS = 0x00000000,
958 GPREG1_INTDLYUNIT_256NS = 0x00000008, 1017 GPREG1_INTDLYUNIT_256NS = 0x00000008,
959 GPREG1_INTDLYUNIT_1US = 0x00000010, 1018 GPREG1_INTDLYUNIT_1US = 0x00000010,
@@ -967,7 +1026,7 @@ enum jme_gpreg1_vals {
967 GPREG1_INTDLYEN_6U = 0x00000006, 1026 GPREG1_INTDLYEN_6U = 0x00000006,
968 GPREG1_INTDLYEN_7U = 0x00000007, 1027 GPREG1_INTDLYEN_7U = 0x00000007,
969 1028
970 GPREG1_DEFAULT = 0x00000000, 1029 GPREG1_DEFAULT = GPREG1_PCREQN,
971}; 1030};
972 1031
973/* 1032/*
@@ -1184,16 +1243,22 @@ enum jme_phy_reg17_vals {
1184/* 1243/*
1185 * Workaround 1244 * Workaround
1186 */ 1245 */
1187static inline int is_buggy250(unsigned short device, unsigned int chiprev) 1246static inline int is_buggy250(unsigned short device, u8 chiprev)
1188{ 1247{
1189 return device == PCI_DEVICE_ID_JMICRON_JMC250 && chiprev == 0x11; 1248 return device == PCI_DEVICE_ID_JMICRON_JMC250 && chiprev == 0x11;
1190} 1249}
1191 1250
1251static inline int new_phy_power_ctrl(u8 chip_main_rev)
1252{
1253 return chip_main_rev >= 5;
1254}
1255
1192/* 1256/*
1193 * Function prototypes 1257 * Function prototypes
1194 */ 1258 */
1195static int jme_set_settings(struct net_device *netdev, 1259static int jme_set_settings(struct net_device *netdev,
1196 struct ethtool_cmd *ecmd); 1260 struct ethtool_cmd *ecmd);
1261static void jme_set_unicastaddr(struct net_device *netdev);
1197static void jme_set_multi(struct net_device *netdev); 1262static void jme_set_multi(struct net_device *netdev);
1198 1263
1199#endif 1264#endif
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 2d9663a1c54d..ea0dc451da9c 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -129,10 +129,6 @@ static u32 always_on(struct net_device *dev)
129 129
130static const struct ethtool_ops loopback_ethtool_ops = { 130static const struct ethtool_ops loopback_ethtool_ops = {
131 .get_link = always_on, 131 .get_link = always_on,
132 .set_tso = ethtool_op_set_tso,
133 .get_tx_csum = always_on,
134 .get_sg = always_on,
135 .get_rx_csum = always_on,
136}; 132};
137 133
138static int loopback_dev_init(struct net_device *dev) 134static int loopback_dev_init(struct net_device *dev)
@@ -169,9 +165,12 @@ static void loopback_setup(struct net_device *dev)
169 dev->type = ARPHRD_LOOPBACK; /* 0x0001*/ 165 dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
170 dev->flags = IFF_LOOPBACK; 166 dev->flags = IFF_LOOPBACK;
171 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 167 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
168 dev->hw_features = NETIF_F_ALL_TSO | NETIF_F_UFO;
172 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST 169 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
173 | NETIF_F_TSO 170 | NETIF_F_ALL_TSO
171 | NETIF_F_UFO
174 | NETIF_F_NO_CSUM 172 | NETIF_F_NO_CSUM
173 | NETIF_F_RXCSUM
175 | NETIF_F_HIGHDMA 174 | NETIF_F_HIGHDMA
176 | NETIF_F_LLTX 175 | NETIF_F_LLTX
177 | NETIF_F_NETNS_LOCAL; 176 | NETIF_F_NETNS_LOCAL;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 6ed577b065df..5b37d3c191e4 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -152,9 +152,10 @@ static void macvlan_broadcast(struct sk_buff *skb,
152} 152}
153 153
154/* called under rcu_read_lock() from netif_receive_skb */ 154/* called under rcu_read_lock() from netif_receive_skb */
155static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb) 155static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
156{ 156{
157 struct macvlan_port *port; 157 struct macvlan_port *port;
158 struct sk_buff *skb = *pskb;
158 const struct ethhdr *eth = eth_hdr(skb); 159 const struct ethhdr *eth = eth_hdr(skb);
159 const struct macvlan_dev *vlan; 160 const struct macvlan_dev *vlan;
160 const struct macvlan_dev *src; 161 const struct macvlan_dev *src;
@@ -184,7 +185,7 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
184 */ 185 */
185 macvlan_broadcast(skb, port, src->dev, 186 macvlan_broadcast(skb, port, src->dev,
186 MACVLAN_MODE_VEPA); 187 MACVLAN_MODE_VEPA);
187 return skb; 188 return RX_HANDLER_PASS;
188 } 189 }
189 190
190 if (port->passthru) 191 if (port->passthru)
@@ -192,12 +193,12 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
192 else 193 else
193 vlan = macvlan_hash_lookup(port, eth->h_dest); 194 vlan = macvlan_hash_lookup(port, eth->h_dest);
194 if (vlan == NULL) 195 if (vlan == NULL)
195 return skb; 196 return RX_HANDLER_PASS;
196 197
197 dev = vlan->dev; 198 dev = vlan->dev;
198 if (unlikely(!(dev->flags & IFF_UP))) { 199 if (unlikely(!(dev->flags & IFF_UP))) {
199 kfree_skb(skb); 200 kfree_skb(skb);
200 return NULL; 201 return RX_HANDLER_CONSUMED;
201 } 202 }
202 len = skb->len + ETH_HLEN; 203 len = skb->len + ETH_HLEN;
203 skb = skb_share_check(skb, GFP_ATOMIC); 204 skb = skb_share_check(skb, GFP_ATOMIC);
@@ -211,7 +212,7 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
211 212
212out: 213out:
213 macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0); 214 macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0);
214 return NULL; 215 return RX_HANDLER_CONSUMED;
215} 216}
216 217
217static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) 218static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -219,9 +220,11 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
219 const struct macvlan_dev *vlan = netdev_priv(dev); 220 const struct macvlan_dev *vlan = netdev_priv(dev);
220 const struct macvlan_port *port = vlan->port; 221 const struct macvlan_port *port = vlan->port;
221 const struct macvlan_dev *dest; 222 const struct macvlan_dev *dest;
223 __u8 ip_summed = skb->ip_summed;
222 224
223 if (vlan->mode == MACVLAN_MODE_BRIDGE) { 225 if (vlan->mode == MACVLAN_MODE_BRIDGE) {
224 const struct ethhdr *eth = (void *)skb->data; 226 const struct ethhdr *eth = (void *)skb->data;
227 skb->ip_summed = CHECKSUM_UNNECESSARY;
225 228
226 /* send to other bridge ports directly */ 229 /* send to other bridge ports directly */
227 if (is_multicast_ether_addr(eth->h_dest)) { 230 if (is_multicast_ether_addr(eth->h_dest)) {
@@ -241,6 +244,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
241 } 244 }
242 245
243xmit_world: 246xmit_world:
247 skb->ip_summed = ip_summed;
244 skb_set_dev(skb, vlan->lowerdev); 248 skb_set_dev(skb, vlan->lowerdev);
245 return dev_queue_xmit(skb); 249 return dev_queue_xmit(skb);
246} 250}
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index fc27a9926d9e..6696e56e6320 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -39,7 +39,7 @@ struct macvtap_queue {
39 struct socket sock; 39 struct socket sock;
40 struct socket_wq wq; 40 struct socket_wq wq;
41 int vnet_hdr_sz; 41 int vnet_hdr_sz;
42 struct macvlan_dev *vlan; 42 struct macvlan_dev __rcu *vlan;
43 struct file *file; 43 struct file *file;
44 unsigned int flags; 44 unsigned int flags;
45}; 45};
@@ -141,7 +141,8 @@ static void macvtap_put_queue(struct macvtap_queue *q)
141 struct macvlan_dev *vlan; 141 struct macvlan_dev *vlan;
142 142
143 spin_lock(&macvtap_lock); 143 spin_lock(&macvtap_lock);
144 vlan = rcu_dereference(q->vlan); 144 vlan = rcu_dereference_protected(q->vlan,
145 lockdep_is_held(&macvtap_lock));
145 if (vlan) { 146 if (vlan) {
146 int index = get_slot(vlan, q); 147 int index = get_slot(vlan, q);
147 148
@@ -219,7 +220,8 @@ static void macvtap_del_queues(struct net_device *dev)
219 /* macvtap_put_queue can free some slots, so go through all slots */ 220 /* macvtap_put_queue can free some slots, so go through all slots */
220 spin_lock(&macvtap_lock); 221 spin_lock(&macvtap_lock);
221 for (i = 0; i < MAX_MACVTAP_QUEUES && vlan->numvtaps; i++) { 222 for (i = 0; i < MAX_MACVTAP_QUEUES && vlan->numvtaps; i++) {
222 q = rcu_dereference(vlan->taps[i]); 223 q = rcu_dereference_protected(vlan->taps[i],
224 lockdep_is_held(&macvtap_lock));
223 if (q) { 225 if (q) {
224 qlist[j++] = q; 226 qlist[j++] = q;
225 rcu_assign_pointer(vlan->taps[i], NULL); 227 rcu_assign_pointer(vlan->taps[i], NULL);
@@ -570,7 +572,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q,
570 } 572 }
571 573
572 rcu_read_lock_bh(); 574 rcu_read_lock_bh();
573 vlan = rcu_dereference(q->vlan); 575 vlan = rcu_dereference_bh(q->vlan);
574 if (vlan) 576 if (vlan)
575 macvlan_start_xmit(skb, vlan->dev); 577 macvlan_start_xmit(skb, vlan->dev);
576 else 578 else
@@ -584,7 +586,7 @@ err_kfree:
584 586
585err: 587err:
586 rcu_read_lock_bh(); 588 rcu_read_lock_bh();
587 vlan = rcu_dereference(q->vlan); 589 vlan = rcu_dereference_bh(q->vlan);
588 if (vlan) 590 if (vlan)
589 vlan->dev->stats.tx_dropped++; 591 vlan->dev->stats.tx_dropped++;
590 rcu_read_unlock_bh(); 592 rcu_read_unlock_bh();
@@ -632,7 +634,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
632 ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len); 634 ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len);
633 635
634 rcu_read_lock_bh(); 636 rcu_read_lock_bh();
635 vlan = rcu_dereference(q->vlan); 637 vlan = rcu_dereference_bh(q->vlan);
636 if (vlan) 638 if (vlan)
637 macvlan_count_rx(vlan, len, ret == 0, 0); 639 macvlan_count_rx(vlan, len, ret == 0, 0);
638 rcu_read_unlock_bh(); 640 rcu_read_unlock_bh();
@@ -728,7 +730,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
728 730
729 case TUNGETIFF: 731 case TUNGETIFF:
730 rcu_read_lock_bh(); 732 rcu_read_lock_bh();
731 vlan = rcu_dereference(q->vlan); 733 vlan = rcu_dereference_bh(q->vlan);
732 if (vlan) 734 if (vlan)
733 dev_hold(vlan->dev); 735 dev_hold(vlan->dev);
734 rcu_read_unlock_bh(); 736 rcu_read_unlock_bh();
@@ -737,7 +739,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
737 return -ENOLINK; 739 return -ENOLINK;
738 740
739 ret = 0; 741 ret = 0;
740 if (copy_to_user(&ifr->ifr_name, q->vlan->dev->name, IFNAMSIZ) || 742 if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
741 put_user(q->flags, &ifr->ifr_flags)) 743 put_user(q->flags, &ifr->ifr_flags))
742 ret = -EFAULT; 744 ret = -EFAULT;
743 dev_put(vlan->dev); 745 dev_put(vlan->dev);
diff --git a/drivers/net/mii.c b/drivers/net/mii.c
index 210b2b164b30..0a6c6a2e7550 100644
--- a/drivers/net/mii.c
+++ b/drivers/net/mii.c
@@ -354,7 +354,7 @@ unsigned int mii_check_media (struct mii_if_info *mii,
354 if (!new_carrier) { 354 if (!new_carrier) {
355 netif_carrier_off(mii->dev); 355 netif_carrier_off(mii->dev);
356 if (ok_to_print) 356 if (ok_to_print)
357 printk(KERN_INFO "%s: link down\n", mii->dev->name); 357 netdev_info(mii->dev, "link down\n");
358 return 0; /* duplex did not change */ 358 return 0; /* duplex did not change */
359 } 359 }
360 360
@@ -381,12 +381,12 @@ unsigned int mii_check_media (struct mii_if_info *mii,
381 duplex = 1; 381 duplex = 1;
382 382
383 if (ok_to_print) 383 if (ok_to_print)
384 printk(KERN_INFO "%s: link up, %sMbps, %s-duplex, lpa 0x%04X\n", 384 netdev_info(mii->dev, "link up, %uMbps, %s-duplex, lpa 0x%04X\n",
385 mii->dev->name, 385 lpa2 & (LPA_1000FULL | LPA_1000HALF) ? 1000 :
386 lpa2 & (LPA_1000FULL | LPA_1000HALF) ? "1000" : 386 media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ?
387 media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? "100" : "10", 387 100 : 10,
388 duplex ? "full" : "half", 388 duplex ? "full" : "half",
389 lpa); 389 lpa);
390 390
391 if ((init_media) || (mii->full_duplex != duplex)) { 391 if ((init_media) || (mii->full_duplex != duplex)) {
392 mii->full_duplex = duplex; 392 mii->full_duplex = duplex;
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 02076e16542a..34425b94452f 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -35,6 +35,8 @@
35 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 35 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
36 */ 36 */
37 37
38#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
39
38#include <linux/init.h> 40#include <linux/init.h>
39#include <linux/dma-mapping.h> 41#include <linux/dma-mapping.h>
40#include <linux/in.h> 42#include <linux/in.h>
@@ -627,9 +629,8 @@ err:
627 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != 629 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
628 (RX_FIRST_DESC | RX_LAST_DESC)) { 630 (RX_FIRST_DESC | RX_LAST_DESC)) {
629 if (net_ratelimit()) 631 if (net_ratelimit())
630 dev_printk(KERN_ERR, &mp->dev->dev, 632 netdev_err(mp->dev,
631 "received packet spanning " 633 "received packet spanning multiple descriptors\n");
632 "multiple descriptors\n");
633 } 634 }
634 635
635 if (cmd_sts & ERROR_SUMMARY) 636 if (cmd_sts & ERROR_SUMMARY)
@@ -868,15 +869,14 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
868 869
869 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { 870 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
870 txq->tx_dropped++; 871 txq->tx_dropped++;
871 dev_printk(KERN_DEBUG, &dev->dev, 872 netdev_printk(KERN_DEBUG, dev,
872 "failed to linearize skb with tiny " 873 "failed to linearize skb with tiny unaligned fragment\n");
873 "unaligned fragment\n");
874 return NETDEV_TX_BUSY; 874 return NETDEV_TX_BUSY;
875 } 875 }
876 876
877 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { 877 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
878 if (net_ratelimit()) 878 if (net_ratelimit())
879 dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n"); 879 netdev_err(dev, "tx queue full?!\n");
880 kfree_skb(skb); 880 kfree_skb(skb);
881 return NETDEV_TX_OK; 881 return NETDEV_TX_OK;
882 } 882 }
@@ -959,7 +959,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
959 skb = __skb_dequeue(&txq->tx_skb); 959 skb = __skb_dequeue(&txq->tx_skb);
960 960
961 if (cmd_sts & ERROR_SUMMARY) { 961 if (cmd_sts & ERROR_SUMMARY) {
962 dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n"); 962 netdev_info(mp->dev, "tx error\n");
963 mp->dev->stats.tx_errors++; 963 mp->dev->stats.tx_errors++;
964 } 964 }
965 965
@@ -1122,20 +1122,20 @@ static int smi_bus_read(struct mii_bus *bus, int addr, int reg)
1122 int ret; 1122 int ret;
1123 1123
1124 if (smi_wait_ready(msp)) { 1124 if (smi_wait_ready(msp)) {
1125 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1125 pr_warn("SMI bus busy timeout\n");
1126 return -ETIMEDOUT; 1126 return -ETIMEDOUT;
1127 } 1127 }
1128 1128
1129 writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg); 1129 writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
1130 1130
1131 if (smi_wait_ready(msp)) { 1131 if (smi_wait_ready(msp)) {
1132 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1132 pr_warn("SMI bus busy timeout\n");
1133 return -ETIMEDOUT; 1133 return -ETIMEDOUT;
1134 } 1134 }
1135 1135
1136 ret = readl(smi_reg); 1136 ret = readl(smi_reg);
1137 if (!(ret & SMI_READ_VALID)) { 1137 if (!(ret & SMI_READ_VALID)) {
1138 printk(KERN_WARNING "mv643xx_eth: SMI bus read not valid\n"); 1138 pr_warn("SMI bus read not valid\n");
1139 return -ENODEV; 1139 return -ENODEV;
1140 } 1140 }
1141 1141
@@ -1148,7 +1148,7 @@ static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
1148 void __iomem *smi_reg = msp->base + SMI_REG; 1148 void __iomem *smi_reg = msp->base + SMI_REG;
1149 1149
1150 if (smi_wait_ready(msp)) { 1150 if (smi_wait_ready(msp)) {
1151 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1151 pr_warn("SMI bus busy timeout\n");
1152 return -ETIMEDOUT; 1152 return -ETIMEDOUT;
1153 } 1153 }
1154 1154
@@ -1156,7 +1156,7 @@ static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
1156 (addr << 16) | (val & 0xffff), smi_reg); 1156 (addr << 16) | (val & 0xffff), smi_reg);
1157 1157
1158 if (smi_wait_ready(msp)) { 1158 if (smi_wait_ready(msp)) {
1159 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1159 pr_warn("SMI bus busy timeout\n");
1160 return -ETIMEDOUT; 1160 return -ETIMEDOUT;
1161 } 1161 }
1162 1162
@@ -1566,9 +1566,8 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
1566 if (netif_running(dev)) { 1566 if (netif_running(dev)) {
1567 mv643xx_eth_stop(dev); 1567 mv643xx_eth_stop(dev);
1568 if (mv643xx_eth_open(dev)) { 1568 if (mv643xx_eth_open(dev)) {
1569 dev_printk(KERN_ERR, &dev->dev, 1569 netdev_err(dev,
1570 "fatal error on re-opening device after " 1570 "fatal error on re-opening device after ring param change\n");
1571 "ring param change\n");
1572 return -ENOMEM; 1571 return -ENOMEM;
1573 } 1572 }
1574 } 1573 }
@@ -1874,7 +1873,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
1874 } 1873 }
1875 1874
1876 if (rxq->rx_desc_area == NULL) { 1875 if (rxq->rx_desc_area == NULL) {
1877 dev_printk(KERN_ERR, &mp->dev->dev, 1876 netdev_err(mp->dev,
1878 "can't allocate rx ring (%d bytes)\n", size); 1877 "can't allocate rx ring (%d bytes)\n", size);
1879 goto out; 1878 goto out;
1880 } 1879 }
@@ -1884,8 +1883,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
1884 rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb), 1883 rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb),
1885 GFP_KERNEL); 1884 GFP_KERNEL);
1886 if (rxq->rx_skb == NULL) { 1885 if (rxq->rx_skb == NULL) {
1887 dev_printk(KERN_ERR, &mp->dev->dev, 1886 netdev_err(mp->dev, "can't allocate rx skb ring\n");
1888 "can't allocate rx skb ring\n");
1889 goto out_free; 1887 goto out_free;
1890 } 1888 }
1891 1889
@@ -1944,8 +1942,7 @@ static void rxq_deinit(struct rx_queue *rxq)
1944 } 1942 }
1945 1943
1946 if (rxq->rx_desc_count) { 1944 if (rxq->rx_desc_count) {
1947 dev_printk(KERN_ERR, &mp->dev->dev, 1945 netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n",
1948 "error freeing rx ring -- %d skbs stuck\n",
1949 rxq->rx_desc_count); 1946 rxq->rx_desc_count);
1950 } 1947 }
1951 1948
@@ -1987,7 +1984,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
1987 } 1984 }
1988 1985
1989 if (txq->tx_desc_area == NULL) { 1986 if (txq->tx_desc_area == NULL) {
1990 dev_printk(KERN_ERR, &mp->dev->dev, 1987 netdev_err(mp->dev,
1991 "can't allocate tx ring (%d bytes)\n", size); 1988 "can't allocate tx ring (%d bytes)\n", size);
1992 return -ENOMEM; 1989 return -ENOMEM;
1993 } 1990 }
@@ -2093,7 +2090,7 @@ static void handle_link_event(struct mv643xx_eth_private *mp)
2093 if (netif_carrier_ok(dev)) { 2090 if (netif_carrier_ok(dev)) {
2094 int i; 2091 int i;
2095 2092
2096 printk(KERN_INFO "%s: link down\n", dev->name); 2093 netdev_info(dev, "link down\n");
2097 2094
2098 netif_carrier_off(dev); 2095 netif_carrier_off(dev);
2099 2096
@@ -2124,10 +2121,8 @@ static void handle_link_event(struct mv643xx_eth_private *mp)
2124 duplex = (port_status & FULL_DUPLEX) ? 1 : 0; 2121 duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
2125 fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0; 2122 fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
2126 2123
2127 printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, " 2124 netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n",
2128 "flow control %sabled\n", dev->name, 2125 speed, duplex ? "full" : "half", fc ? "en" : "dis");
2129 speed, duplex ? "full" : "half",
2130 fc ? "en" : "dis");
2131 2126
2132 if (!netif_carrier_ok(dev)) 2127 if (!netif_carrier_ok(dev))
2133 netif_carrier_on(dev); 2128 netif_carrier_on(dev);
@@ -2337,7 +2332,7 @@ static int mv643xx_eth_open(struct net_device *dev)
2337 err = request_irq(dev->irq, mv643xx_eth_irq, 2332 err = request_irq(dev->irq, mv643xx_eth_irq,
2338 IRQF_SHARED, dev->name, dev); 2333 IRQF_SHARED, dev->name, dev);
2339 if (err) { 2334 if (err) {
2340 dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n"); 2335 netdev_err(dev, "can't assign irq\n");
2341 return -EAGAIN; 2336 return -EAGAIN;
2342 } 2337 }
2343 2338
@@ -2483,9 +2478,8 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
2483 */ 2478 */
2484 mv643xx_eth_stop(dev); 2479 mv643xx_eth_stop(dev);
2485 if (mv643xx_eth_open(dev)) { 2480 if (mv643xx_eth_open(dev)) {
2486 dev_printk(KERN_ERR, &dev->dev, 2481 netdev_err(dev,
2487 "fatal error on re-opening device after " 2482 "fatal error on re-opening device after MTU change\n");
2488 "MTU change\n");
2489 } 2483 }
2490 2484
2491 return 0; 2485 return 0;
@@ -2508,7 +2502,7 @@ static void mv643xx_eth_tx_timeout(struct net_device *dev)
2508{ 2502{
2509 struct mv643xx_eth_private *mp = netdev_priv(dev); 2503 struct mv643xx_eth_private *mp = netdev_priv(dev);
2510 2504
2511 dev_printk(KERN_INFO, &dev->dev, "tx timeout\n"); 2505 netdev_info(dev, "tx timeout\n");
2512 2506
2513 schedule_work(&mp->tx_timeout_task); 2507 schedule_work(&mp->tx_timeout_task);
2514} 2508}
@@ -2603,8 +2597,8 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2603 int ret; 2597 int ret;
2604 2598
2605 if (!mv643xx_eth_version_printed++) 2599 if (!mv643xx_eth_version_printed++)
2606 printk(KERN_NOTICE "MV-643xx 10/100/1000 ethernet " 2600 pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n",
2607 "driver version %s\n", mv643xx_eth_driver_version); 2601 mv643xx_eth_driver_version);
2608 2602
2609 ret = -EINVAL; 2603 ret = -EINVAL;
2610 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2604 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2871,14 +2865,12 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2871 2865
2872 pd = pdev->dev.platform_data; 2866 pd = pdev->dev.platform_data;
2873 if (pd == NULL) { 2867 if (pd == NULL) {
2874 dev_printk(KERN_ERR, &pdev->dev, 2868 dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n");
2875 "no mv643xx_eth_platform_data\n");
2876 return -ENODEV; 2869 return -ENODEV;
2877 } 2870 }
2878 2871
2879 if (pd->shared == NULL) { 2872 if (pd->shared == NULL) {
2880 dev_printk(KERN_ERR, &pdev->dev, 2873 dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n");
2881 "no mv643xx_eth_platform_data->shared\n");
2882 return -ENODEV; 2874 return -ENODEV;
2883 } 2875 }
2884 2876
@@ -2957,11 +2949,11 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2957 if (err) 2949 if (err)
2958 goto out; 2950 goto out;
2959 2951
2960 dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %pM\n", 2952 netdev_notice(dev, "port %d with MAC address %pM\n",
2961 mp->port_num, dev->dev_addr); 2953 mp->port_num, dev->dev_addr);
2962 2954
2963 if (mp->tx_desc_sram_size > 0) 2955 if (mp->tx_desc_sram_size > 0)
2964 dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n"); 2956 netdev_notice(dev, "configured with sram\n");
2965 2957
2966 return 0; 2958 return 0;
2967 2959
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index ea5cfe2c3a04..a7f2eed9a08a 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -253,7 +253,7 @@ struct myri10ge_priv {
253 unsigned long serial_number; 253 unsigned long serial_number;
254 int vendor_specific_offset; 254 int vendor_specific_offset;
255 int fw_multicast_support; 255 int fw_multicast_support;
256 unsigned long features; 256 u32 features;
257 u32 max_tso6; 257 u32 max_tso6;
258 u32 read_dma; 258 u32 read_dma;
259 u32 write_dma; 259 u32 write_dma;
@@ -1776,7 +1776,7 @@ static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled)
1776static int myri10ge_set_tso(struct net_device *netdev, u32 tso_enabled) 1776static int myri10ge_set_tso(struct net_device *netdev, u32 tso_enabled)
1777{ 1777{
1778 struct myri10ge_priv *mgp = netdev_priv(netdev); 1778 struct myri10ge_priv *mgp = netdev_priv(netdev);
1779 unsigned long flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO); 1779 u32 flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO);
1780 1780
1781 if (tso_enabled) 1781 if (tso_enabled)
1782 netdev->features |= flags; 1782 netdev->features |= flags;
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index a11380544e6c..d7299f1a4940 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -739,7 +739,8 @@ struct netxen_recv_context {
739#define NX_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c 739#define NX_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c
740#define NX_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d 740#define NX_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d
741#define NX_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e 741#define NX_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e
742#define NX_CDRP_CMD_MAX 0x0000001f 742#define NX_CDRP_CMD_CONFIG_GBE_PORT 0x0000001f
743#define NX_CDRP_CMD_MAX 0x00000020
743 744
744#define NX_RCODE_SUCCESS 0 745#define NX_RCODE_SUCCESS 0
745#define NX_RCODE_NO_HOST_MEM 1 746#define NX_RCODE_NO_HOST_MEM 1
@@ -1054,6 +1055,7 @@ typedef struct {
1054#define NX_FW_CAPABILITY_BDG (1 << 8) 1055#define NX_FW_CAPABILITY_BDG (1 << 8)
1055#define NX_FW_CAPABILITY_FVLANTX (1 << 9) 1056#define NX_FW_CAPABILITY_FVLANTX (1 << 9)
1056#define NX_FW_CAPABILITY_HW_LRO (1 << 10) 1057#define NX_FW_CAPABILITY_HW_LRO (1 << 10)
1058#define NX_FW_CAPABILITY_GBE_LINK_CFG (1 << 11)
1057 1059
1058/* module types */ 1060/* module types */
1059#define LINKEVENT_MODULE_NOT_PRESENT 1 1061#define LINKEVENT_MODULE_NOT_PRESENT 1
@@ -1349,6 +1351,8 @@ void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup);
1349void netxen_pci_camqm_read_2M(struct netxen_adapter *, u64, u64 *); 1351void netxen_pci_camqm_read_2M(struct netxen_adapter *, u64, u64 *);
1350void netxen_pci_camqm_write_2M(struct netxen_adapter *, u64, u64); 1352void netxen_pci_camqm_write_2M(struct netxen_adapter *, u64, u64);
1351 1353
1354int nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter,
1355 u32 speed, u32 duplex, u32 autoneg);
1352int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu); 1356int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu);
1353int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu); 1357int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu);
1354int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable); 1358int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable);
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index f7d06cbc70ae..f16966afa64e 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -112,6 +112,21 @@ nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
112 return 0; 112 return 0;
113} 113}
114 114
115int
116nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter,
117 u32 speed, u32 duplex, u32 autoneg)
118{
119
120 return netxen_issue_cmd(adapter,
121 adapter->ahw.pci_func,
122 NXHAL_VERSION,
123 speed,
124 duplex,
125 autoneg,
126 NX_CDRP_CMD_CONFIG_GBE_PORT);
127
128}
129
115static int 130static int
116nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter) 131nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
117{ 132{
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index 587498e140bb..653d308e0f5d 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -214,7 +214,6 @@ skip:
214 check_sfp_module = netif_running(dev) && 214 check_sfp_module = netif_running(dev) &&
215 adapter->has_link_events; 215 adapter->has_link_events;
216 } else { 216 } else {
217 ecmd->autoneg = AUTONEG_ENABLE;
218 ecmd->supported |= (SUPPORTED_TP |SUPPORTED_Autoneg); 217 ecmd->supported |= (SUPPORTED_TP |SUPPORTED_Autoneg);
219 ecmd->advertising |= 218 ecmd->advertising |=
220 (ADVERTISED_TP | ADVERTISED_Autoneg); 219 (ADVERTISED_TP | ADVERTISED_Autoneg);
@@ -252,53 +251,24 @@ static int
252netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 251netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
253{ 252{
254 struct netxen_adapter *adapter = netdev_priv(dev); 253 struct netxen_adapter *adapter = netdev_priv(dev);
255 __u32 status; 254 int ret;
256 255
257 /* read which mode */ 256 if (adapter->ahw.port_type != NETXEN_NIC_GBE)
258 if (adapter->ahw.port_type == NETXEN_NIC_GBE) { 257 return -EOPNOTSUPP;
259 /* autonegotiation */
260 if (adapter->phy_write &&
261 adapter->phy_write(adapter,
262 NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
263 ecmd->autoneg) != 0)
264 return -EIO;
265 else
266 adapter->link_autoneg = ecmd->autoneg;
267 258
268 if (adapter->phy_read && 259 if (!(adapter->capabilities & NX_FW_CAPABILITY_GBE_LINK_CFG))
269 adapter->phy_read(adapter, 260 return -EOPNOTSUPP;
270 NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
271 &status) != 0)
272 return -EIO;
273 261
274 /* speed */ 262 ret = nx_fw_cmd_set_gbe_port(adapter, ecmd->speed, ecmd->duplex,
275 switch (ecmd->speed) { 263 ecmd->autoneg);
276 case SPEED_10: 264 if (ret == NX_RCODE_NOT_SUPPORTED)
277 netxen_set_phy_speed(status, 0);
278 break;
279 case SPEED_100:
280 netxen_set_phy_speed(status, 1);
281 break;
282 case SPEED_1000:
283 netxen_set_phy_speed(status, 2);
284 break;
285 }
286 /* set duplex mode */
287 if (ecmd->duplex == DUPLEX_HALF)
288 netxen_clear_phy_duplex(status);
289 if (ecmd->duplex == DUPLEX_FULL)
290 netxen_set_phy_duplex(status);
291 if (adapter->phy_write &&
292 adapter->phy_write(adapter,
293 NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
294 *((int *)&status)) != 0)
295 return -EIO;
296 else {
297 adapter->link_speed = ecmd->speed;
298 adapter->link_duplex = ecmd->duplex;
299 }
300 } else
301 return -EOPNOTSUPP; 265 return -EOPNOTSUPP;
266 else if (ret)
267 return -EIO;
268
269 adapter->link_speed = ecmd->speed;
270 adapter->link_duplex = ecmd->duplex;
271 adapter->link_autoneg = ecmd->autoneg;
302 272
303 if (!netif_running(dev)) 273 if (!netif_running(dev))
304 return 0; 274 return 0;
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 33fac32e0d9f..83348dc4b184 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1032,6 +1032,9 @@ __netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
1032 netif_carrier_off(netdev); 1032 netif_carrier_off(netdev);
1033 netif_tx_disable(netdev); 1033 netif_tx_disable(netdev);
1034 1034
1035 if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)
1036 netxen_linkevent_request(adapter, 0);
1037
1035 if (adapter->stop_port) 1038 if (adapter->stop_port)
1036 adapter->stop_port(adapter); 1039 adapter->stop_port(adapter);
1037 1040
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 35fda5ac8120..392a6c4b72e5 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -77,7 +77,6 @@ config NATIONAL_PHY
77 Currently supports the DP83865 PHY. 77 Currently supports the DP83865 PHY.
78 78
79config STE10XP 79config STE10XP
80 depends on PHYLIB
81 tristate "Driver for STMicroelectronics STe10Xp PHYs" 80 tristate "Driver for STMicroelectronics STe10Xp PHYs"
82 ---help--- 81 ---help---
83 This is the driver for the STe100p and STe101p PHYs. 82 This is the driver for the STe100p and STe101p PHYs.
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 0fd1678bc5a9..590f902deb6b 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -19,13 +19,7 @@
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/phy.h> 21#include <linux/phy.h>
22 22#include <linux/micrel_phy.h>
23#define PHY_ID_KSZ9021 0x00221611
24#define PHY_ID_KS8737 0x00221720
25#define PHY_ID_KS8041 0x00221510
26#define PHY_ID_KS8051 0x00221550
27/* both for ks8001 Rev. A/B, and for ks8721 Rev 3. */
28#define PHY_ID_KS8001 0x0022161A
29 23
30/* general Interrupt control/status reg in vendor specific block. */ 24/* general Interrupt control/status reg in vendor specific block. */
31#define MII_KSZPHY_INTCS 0x1B 25#define MII_KSZPHY_INTCS 0x1B
@@ -46,6 +40,7 @@
46#define KSZPHY_CTRL_INT_ACTIVE_HIGH (1 << 9) 40#define KSZPHY_CTRL_INT_ACTIVE_HIGH (1 << 9)
47#define KSZ9021_CTRL_INT_ACTIVE_HIGH (1 << 14) 41#define KSZ9021_CTRL_INT_ACTIVE_HIGH (1 << 14)
48#define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14) 42#define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14)
43#define KSZ8051_RMII_50MHZ_CLK (1 << 7)
49 44
50static int kszphy_ack_interrupt(struct phy_device *phydev) 45static int kszphy_ack_interrupt(struct phy_device *phydev)
51{ 46{
@@ -106,6 +101,19 @@ static int kszphy_config_init(struct phy_device *phydev)
106 return 0; 101 return 0;
107} 102}
108 103
104static int ks8051_config_init(struct phy_device *phydev)
105{
106 int regval;
107
108 if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) {
109 regval = phy_read(phydev, MII_KSZPHY_CTRL);
110 regval |= KSZ8051_RMII_50MHZ_CLK;
111 phy_write(phydev, MII_KSZPHY_CTRL, regval);
112 }
113
114 return 0;
115}
116
109static struct phy_driver ks8737_driver = { 117static struct phy_driver ks8737_driver = {
110 .phy_id = PHY_ID_KS8737, 118 .phy_id = PHY_ID_KS8737,
111 .phy_id_mask = 0x00fffff0, 119 .phy_id_mask = 0x00fffff0,
@@ -142,7 +150,7 @@ static struct phy_driver ks8051_driver = {
142 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause 150 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
143 | SUPPORTED_Asym_Pause), 151 | SUPPORTED_Asym_Pause),
144 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 152 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
145 .config_init = kszphy_config_init, 153 .config_init = ks8051_config_init,
146 .config_aneg = genphy_config_aneg, 154 .config_aneg = genphy_config_aneg,
147 .read_status = genphy_read_status, 155 .read_status = genphy_read_status,
148 .ack_interrupt = kszphy_ack_interrupt, 156 .ack_interrupt = kszphy_ack_interrupt,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index a8445c72fc13..f7670330f988 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -319,7 +319,8 @@ int phy_mii_ioctl(struct phy_device *phydev,
319 /* fall through */ 319 /* fall through */
320 320
321 case SIOCGMIIREG: 321 case SIOCGMIIREG:
322 mii_data->val_out = phy_read(phydev, mii_data->reg_num); 322 mii_data->val_out = mdiobus_read(phydev->bus, mii_data->phy_id,
323 mii_data->reg_num);
323 break; 324 break;
324 325
325 case SIOCSMIIREG: 326 case SIOCSMIIREG:
@@ -350,8 +351,9 @@ int phy_mii_ioctl(struct phy_device *phydev,
350 } 351 }
351 } 352 }
352 353
353 phy_write(phydev, mii_data->reg_num, val); 354 mdiobus_write(phydev->bus, mii_data->phy_id,
354 355 mii_data->reg_num, val);
356
355 if (mii_data->reg_num == MII_BMCR && 357 if (mii_data->reg_num == MII_BMCR &&
356 val & BMCR_RESET && 358 val & BMCR_RESET &&
357 phydev->drv->config_init) { 359 phydev->drv->config_init) {
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index c7a6c4466978..9f6d670748d1 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -592,8 +592,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
592 ppp_release(NULL, file); 592 ppp_release(NULL, file);
593 err = 0; 593 err = 0;
594 } else 594 } else
595 printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%ld\n", 595 pr_warn("PPPIOCDETACH file->f_count=%ld\n",
596 atomic_long_read(&file->f_count)); 596 atomic_long_read(&file->f_count));
597 mutex_unlock(&ppp_mutex); 597 mutex_unlock(&ppp_mutex);
598 return err; 598 return err;
599 } 599 }
@@ -630,7 +630,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
630 630
631 if (pf->kind != INTERFACE) { 631 if (pf->kind != INTERFACE) {
632 /* can't happen */ 632 /* can't happen */
633 printk(KERN_ERR "PPP: not interface or channel??\n"); 633 pr_err("PPP: not interface or channel??\n");
634 return -EINVAL; 634 return -EINVAL;
635 } 635 }
636 636
@@ -704,7 +704,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
704 } 704 }
705 vj = slhc_init(val2+1, val+1); 705 vj = slhc_init(val2+1, val+1);
706 if (!vj) { 706 if (!vj) {
707 printk(KERN_ERR "PPP: no memory (VJ compressor)\n"); 707 netdev_err(ppp->dev,
708 "PPP: no memory (VJ compressor)\n");
708 err = -ENOMEM; 709 err = -ENOMEM;
709 break; 710 break;
710 } 711 }
@@ -898,17 +899,17 @@ static int __init ppp_init(void)
898{ 899{
899 int err; 900 int err;
900 901
901 printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n"); 902 pr_info("PPP generic driver version " PPP_VERSION "\n");
902 903
903 err = register_pernet_device(&ppp_net_ops); 904 err = register_pernet_device(&ppp_net_ops);
904 if (err) { 905 if (err) {
905 printk(KERN_ERR "failed to register PPP pernet device (%d)\n", err); 906 pr_err("failed to register PPP pernet device (%d)\n", err);
906 goto out; 907 goto out;
907 } 908 }
908 909
909 err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops); 910 err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
910 if (err) { 911 if (err) {
911 printk(KERN_ERR "failed to register PPP device (%d)\n", err); 912 pr_err("failed to register PPP device (%d)\n", err);
912 goto out_net; 913 goto out_net;
913 } 914 }
914 915
@@ -1078,7 +1079,7 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
1078 new_skb = alloc_skb(new_skb_size, GFP_ATOMIC); 1079 new_skb = alloc_skb(new_skb_size, GFP_ATOMIC);
1079 if (!new_skb) { 1080 if (!new_skb) {
1080 if (net_ratelimit()) 1081 if (net_ratelimit())
1081 printk(KERN_ERR "PPP: no memory (comp pkt)\n"); 1082 netdev_err(ppp->dev, "PPP: no memory (comp pkt)\n");
1082 return NULL; 1083 return NULL;
1083 } 1084 }
1084 if (ppp->dev->hard_header_len > PPP_HDRLEN) 1085 if (ppp->dev->hard_header_len > PPP_HDRLEN)
@@ -1108,7 +1109,7 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
1108 * the same number. 1109 * the same number.
1109 */ 1110 */
1110 if (net_ratelimit()) 1111 if (net_ratelimit())
1111 printk(KERN_ERR "ppp: compressor dropped pkt\n"); 1112 netdev_err(ppp->dev, "ppp: compressor dropped pkt\n");
1112 kfree_skb(skb); 1113 kfree_skb(skb);
1113 kfree_skb(new_skb); 1114 kfree_skb(new_skb);
1114 new_skb = NULL; 1115 new_skb = NULL;
@@ -1138,7 +1139,9 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1138 if (ppp->pass_filter && 1139 if (ppp->pass_filter &&
1139 sk_run_filter(skb, ppp->pass_filter) == 0) { 1140 sk_run_filter(skb, ppp->pass_filter) == 0) {
1140 if (ppp->debug & 1) 1141 if (ppp->debug & 1)
1141 printk(KERN_DEBUG "PPP: outbound frame not passed\n"); 1142 netdev_printk(KERN_DEBUG, ppp->dev,
1143 "PPP: outbound frame "
1144 "not passed\n");
1142 kfree_skb(skb); 1145 kfree_skb(skb);
1143 return; 1146 return;
1144 } 1147 }
@@ -1164,7 +1167,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1164 new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2, 1167 new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2,
1165 GFP_ATOMIC); 1168 GFP_ATOMIC);
1166 if (!new_skb) { 1169 if (!new_skb) {
1167 printk(KERN_ERR "PPP: no memory (VJ comp pkt)\n"); 1170 netdev_err(ppp->dev, "PPP: no memory (VJ comp pkt)\n");
1168 goto drop; 1171 goto drop;
1169 } 1172 }
1170 skb_reserve(new_skb, ppp->dev->hard_header_len - 2); 1173 skb_reserve(new_skb, ppp->dev->hard_header_len - 2);
@@ -1202,7 +1205,9 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1202 proto != PPP_LCP && proto != PPP_CCP) { 1205 proto != PPP_LCP && proto != PPP_CCP) {
1203 if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) { 1206 if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) {
1204 if (net_ratelimit()) 1207 if (net_ratelimit())
1205 printk(KERN_ERR "ppp: compression required but down - pkt dropped.\n"); 1208 netdev_err(ppp->dev,
1209 "ppp: compression required but "
1210 "down - pkt dropped.\n");
1206 goto drop; 1211 goto drop;
1207 } 1212 }
1208 skb = pad_compress_skb(ppp, skb); 1213 skb = pad_compress_skb(ppp, skb);
@@ -1505,7 +1510,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1505 noskb: 1510 noskb:
1506 spin_unlock_bh(&pch->downl); 1511 spin_unlock_bh(&pch->downl);
1507 if (ppp->debug & 1) 1512 if (ppp->debug & 1)
1508 printk(KERN_ERR "PPP: no memory (fragment)\n"); 1513 netdev_err(ppp->dev, "PPP: no memory (fragment)\n");
1509 ++ppp->dev->stats.tx_errors; 1514 ++ppp->dev->stats.tx_errors;
1510 ++ppp->nxseq; 1515 ++ppp->nxseq;
1511 return 1; /* abandon the frame */ 1516 return 1; /* abandon the frame */
@@ -1686,7 +1691,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1686 /* copy to a new sk_buff with more tailroom */ 1691 /* copy to a new sk_buff with more tailroom */
1687 ns = dev_alloc_skb(skb->len + 128); 1692 ns = dev_alloc_skb(skb->len + 128);
1688 if (!ns) { 1693 if (!ns) {
1689 printk(KERN_ERR"PPP: no memory (VJ decomp)\n"); 1694 netdev_err(ppp->dev, "PPP: no memory "
1695 "(VJ decomp)\n");
1690 goto err; 1696 goto err;
1691 } 1697 }
1692 skb_reserve(ns, 2); 1698 skb_reserve(ns, 2);
@@ -1699,7 +1705,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1699 1705
1700 len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2); 1706 len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2);
1701 if (len <= 0) { 1707 if (len <= 0) {
1702 printk(KERN_DEBUG "PPP: VJ decompression error\n"); 1708 netdev_printk(KERN_DEBUG, ppp->dev,
1709 "PPP: VJ decompression error\n");
1703 goto err; 1710 goto err;
1704 } 1711 }
1705 len += 2; 1712 len += 2;
@@ -1721,7 +1728,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1721 goto err; 1728 goto err;
1722 1729
1723 if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) { 1730 if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) {
1724 printk(KERN_ERR "PPP: VJ uncompressed error\n"); 1731 netdev_err(ppp->dev, "PPP: VJ uncompressed error\n");
1725 goto err; 1732 goto err;
1726 } 1733 }
1727 proto = PPP_IP; 1734 proto = PPP_IP;
@@ -1762,8 +1769,9 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1762 if (ppp->pass_filter && 1769 if (ppp->pass_filter &&
1763 sk_run_filter(skb, ppp->pass_filter) == 0) { 1770 sk_run_filter(skb, ppp->pass_filter) == 0) {
1764 if (ppp->debug & 1) 1771 if (ppp->debug & 1)
1765 printk(KERN_DEBUG "PPP: inbound frame " 1772 netdev_printk(KERN_DEBUG, ppp->dev,
1766 "not passed\n"); 1773 "PPP: inbound frame "
1774 "not passed\n");
1767 kfree_skb(skb); 1775 kfree_skb(skb);
1768 return; 1776 return;
1769 } 1777 }
@@ -1821,7 +1829,8 @@ ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
1821 1829
1822 ns = dev_alloc_skb(obuff_size); 1830 ns = dev_alloc_skb(obuff_size);
1823 if (!ns) { 1831 if (!ns) {
1824 printk(KERN_ERR "ppp_decompress_frame: no memory\n"); 1832 netdev_err(ppp->dev, "ppp_decompress_frame: "
1833 "no memory\n");
1825 goto err; 1834 goto err;
1826 } 1835 }
1827 /* the decompressor still expects the A/C bytes in the hdr */ 1836 /* the decompressor still expects the A/C bytes in the hdr */
@@ -1989,7 +1998,7 @@ ppp_mp_reconstruct(struct ppp *ppp)
1989 u32 seq = ppp->nextseq; 1998 u32 seq = ppp->nextseq;
1990 u32 minseq = ppp->minseq; 1999 u32 minseq = ppp->minseq;
1991 struct sk_buff_head *list = &ppp->mrq; 2000 struct sk_buff_head *list = &ppp->mrq;
1992 struct sk_buff *p, *next; 2001 struct sk_buff *p, *tmp;
1993 struct sk_buff *head, *tail; 2002 struct sk_buff *head, *tail;
1994 struct sk_buff *skb = NULL; 2003 struct sk_buff *skb = NULL;
1995 int lost = 0, len = 0; 2004 int lost = 0, len = 0;
@@ -1998,13 +2007,15 @@ ppp_mp_reconstruct(struct ppp *ppp)
1998 return NULL; 2007 return NULL;
1999 head = list->next; 2008 head = list->next;
2000 tail = NULL; 2009 tail = NULL;
2001 for (p = head; p != (struct sk_buff *) list; p = next) { 2010 skb_queue_walk_safe(list, p, tmp) {
2002 next = p->next; 2011 again:
2003 if (seq_before(PPP_MP_CB(p)->sequence, seq)) { 2012 if (seq_before(PPP_MP_CB(p)->sequence, seq)) {
2004 /* this can't happen, anyway ignore the skb */ 2013 /* this can't happen, anyway ignore the skb */
2005 printk(KERN_ERR "ppp_mp_reconstruct bad seq %u < %u\n", 2014 netdev_err(ppp->dev, "ppp_mp_reconstruct bad "
2006 PPP_MP_CB(p)->sequence, seq); 2015 "seq %u < %u\n",
2007 head = next; 2016 PPP_MP_CB(p)->sequence, seq);
2017 __skb_unlink(p, list);
2018 kfree_skb(p);
2008 continue; 2019 continue;
2009 } 2020 }
2010 if (PPP_MP_CB(p)->sequence != seq) { 2021 if (PPP_MP_CB(p)->sequence != seq) {
@@ -2016,8 +2027,7 @@ ppp_mp_reconstruct(struct ppp *ppp)
2016 lost = 1; 2027 lost = 1;
2017 seq = seq_before(minseq, PPP_MP_CB(p)->sequence)? 2028 seq = seq_before(minseq, PPP_MP_CB(p)->sequence)?
2018 minseq + 1: PPP_MP_CB(p)->sequence; 2029 minseq + 1: PPP_MP_CB(p)->sequence;
2019 next = p; 2030 goto again;
2020 continue;
2021 } 2031 }
2022 2032
2023 /* 2033 /*
@@ -2042,17 +2052,9 @@ ppp_mp_reconstruct(struct ppp *ppp)
2042 (PPP_MP_CB(head)->BEbits & B)) { 2052 (PPP_MP_CB(head)->BEbits & B)) {
2043 if (len > ppp->mrru + 2) { 2053 if (len > ppp->mrru + 2) {
2044 ++ppp->dev->stats.rx_length_errors; 2054 ++ppp->dev->stats.rx_length_errors;
2045 printk(KERN_DEBUG "PPP: reconstructed packet" 2055 netdev_printk(KERN_DEBUG, ppp->dev,
2046 " is too long (%d)\n", len); 2056 "PPP: reconstructed packet"
2047 } else if (p == head) { 2057 " is too long (%d)\n", len);
2048 /* fragment is complete packet - reuse skb */
2049 tail = p;
2050 skb = skb_get(p);
2051 break;
2052 } else if ((skb = dev_alloc_skb(len)) == NULL) {
2053 ++ppp->dev->stats.rx_missed_errors;
2054 printk(KERN_DEBUG "PPP: no memory for "
2055 "reconstructed packet");
2056 } else { 2058 } else {
2057 tail = p; 2059 tail = p;
2058 break; 2060 break;
@@ -2065,9 +2067,17 @@ ppp_mp_reconstruct(struct ppp *ppp)
2065 * and we haven't found a complete valid packet yet, 2067 * and we haven't found a complete valid packet yet,
2066 * we can discard up to and including this fragment. 2068 * we can discard up to and including this fragment.
2067 */ 2069 */
2068 if (PPP_MP_CB(p)->BEbits & E) 2070 if (PPP_MP_CB(p)->BEbits & E) {
2069 head = next; 2071 struct sk_buff *tmp2;
2070 2072
2073 skb_queue_reverse_walk_from_safe(list, p, tmp2) {
2074 __skb_unlink(p, list);
2075 kfree_skb(p);
2076 }
2077 head = skb_peek(list);
2078 if (!head)
2079 break;
2080 }
2071 ++seq; 2081 ++seq;
2072 } 2082 }
2073 2083
@@ -2077,26 +2087,37 @@ ppp_mp_reconstruct(struct ppp *ppp)
2077 signal a receive error. */ 2087 signal a receive error. */
2078 if (PPP_MP_CB(head)->sequence != ppp->nextseq) { 2088 if (PPP_MP_CB(head)->sequence != ppp->nextseq) {
2079 if (ppp->debug & 1) 2089 if (ppp->debug & 1)
2080 printk(KERN_DEBUG " missed pkts %u..%u\n", 2090 netdev_printk(KERN_DEBUG, ppp->dev,
2081 ppp->nextseq, 2091 " missed pkts %u..%u\n",
2082 PPP_MP_CB(head)->sequence-1); 2092 ppp->nextseq,
2093 PPP_MP_CB(head)->sequence-1);
2083 ++ppp->dev->stats.rx_dropped; 2094 ++ppp->dev->stats.rx_dropped;
2084 ppp_receive_error(ppp); 2095 ppp_receive_error(ppp);
2085 } 2096 }
2086 2097
2087 if (head != tail) 2098 skb = head;
2088 /* copy to a single skb */ 2099 if (head != tail) {
2089 for (p = head; p != tail->next; p = p->next) 2100 struct sk_buff **fragpp = &skb_shinfo(skb)->frag_list;
2090 skb_copy_bits(p, 0, skb_put(skb, p->len), p->len); 2101 p = skb_queue_next(list, head);
2091 ppp->nextseq = PPP_MP_CB(tail)->sequence + 1; 2102 __skb_unlink(skb, list);
2092 head = tail->next; 2103 skb_queue_walk_from_safe(list, p, tmp) {
2093 } 2104 __skb_unlink(p, list);
2105 *fragpp = p;
2106 p->next = NULL;
2107 fragpp = &p->next;
2108
2109 skb->len += p->len;
2110 skb->data_len += p->len;
2111 skb->truesize += p->len;
2112
2113 if (p == tail)
2114 break;
2115 }
2116 } else {
2117 __skb_unlink(skb, list);
2118 }
2094 2119
2095 /* Discard all the skbuffs that we have copied the data out of 2120 ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
2096 or that we can't use. */
2097 while ((p = list->next) != head) {
2098 __skb_unlink(p, list);
2099 kfree_skb(p);
2100 } 2121 }
2101 2122
2102 return skb; 2123 return skb;
@@ -2617,8 +2638,8 @@ ppp_create_interface(struct net *net, int unit, int *retp)
2617 ret = register_netdev(dev); 2638 ret = register_netdev(dev);
2618 if (ret != 0) { 2639 if (ret != 0) {
2619 unit_put(&pn->units_idr, unit); 2640 unit_put(&pn->units_idr, unit);
2620 printk(KERN_ERR "PPP: couldn't register device %s (%d)\n", 2641 netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n",
2621 dev->name, ret); 2642 dev->name, ret);
2622 goto out2; 2643 goto out2;
2623 } 2644 }
2624 2645
@@ -2690,9 +2711,9 @@ static void ppp_destroy_interface(struct ppp *ppp)
2690 2711
2691 if (!ppp->file.dead || ppp->n_channels) { 2712 if (!ppp->file.dead || ppp->n_channels) {
2692 /* "can't happen" */ 2713 /* "can't happen" */
2693 printk(KERN_ERR "ppp: destroying ppp struct %p but dead=%d " 2714 netdev_err(ppp->dev, "ppp: destroying ppp struct %p "
2694 "n_channels=%d !\n", ppp, ppp->file.dead, 2715 "but dead=%d n_channels=%d !\n",
2695 ppp->n_channels); 2716 ppp, ppp->file.dead, ppp->n_channels);
2696 return; 2717 return;
2697 } 2718 }
2698 2719
@@ -2834,8 +2855,7 @@ static void ppp_destroy_channel(struct channel *pch)
2834 2855
2835 if (!pch->file.dead) { 2856 if (!pch->file.dead) {
2836 /* "can't happen" */ 2857 /* "can't happen" */
2837 printk(KERN_ERR "ppp: destroying undead channel %p !\n", 2858 pr_err("ppp: destroying undead channel %p !\n", pch);
2838 pch);
2839 return; 2859 return;
2840 } 2860 }
2841 skb_queue_purge(&pch->file.xq); 2861 skb_queue_purge(&pch->file.xq);
@@ -2847,7 +2867,7 @@ static void __exit ppp_cleanup(void)
2847{ 2867{
2848 /* should never happen */ 2868 /* should never happen */
2849 if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count)) 2869 if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
2850 printk(KERN_ERR "PPP: removing module but units remain!\n"); 2870 pr_err("PPP: removing module but units remain!\n");
2851 unregister_chrdev(PPP_MAJOR, "ppp"); 2871 unregister_chrdev(PPP_MAJOR, "ppp");
2852 device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0)); 2872 device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
2853 class_destroy(ppp_class); 2873 class_destroy(ppp_class);
@@ -2865,7 +2885,7 @@ static int __unit_alloc(struct idr *p, void *ptr, int n)
2865 2885
2866again: 2886again:
2867 if (!idr_pre_get(p, GFP_KERNEL)) { 2887 if (!idr_pre_get(p, GFP_KERNEL)) {
2868 printk(KERN_ERR "PPP: No free memory for idr\n"); 2888 pr_err("PPP: No free memory for idr\n");
2869 return -ENOMEM; 2889 return -ENOMEM;
2870 } 2890 }
2871 2891
diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c
index 164cfad6ce79..51dfcf8023c7 100644
--- a/drivers/net/pptp.c
+++ b/drivers/net/pptp.c
@@ -175,7 +175,6 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
175 struct pptp_opt *opt = &po->proto.pptp; 175 struct pptp_opt *opt = &po->proto.pptp;
176 struct pptp_gre_header *hdr; 176 struct pptp_gre_header *hdr;
177 unsigned int header_len = sizeof(*hdr); 177 unsigned int header_len = sizeof(*hdr);
178 int err = 0;
179 int islcp; 178 int islcp;
180 int len; 179 int len;
181 unsigned char *data; 180 unsigned char *data;
@@ -190,18 +189,14 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
190 if (sk_pppox(po)->sk_state & PPPOX_DEAD) 189 if (sk_pppox(po)->sk_state & PPPOX_DEAD)
191 goto tx_error; 190 goto tx_error;
192 191
193 { 192 rt = ip_route_output_ports(&init_net, NULL,
194 struct flowi fl = { .oif = 0, 193 opt->dst_addr.sin_addr.s_addr,
195 .nl_u = { 194 opt->src_addr.sin_addr.s_addr,
196 .ip4_u = { 195 0, 0, IPPROTO_GRE,
197 .daddr = opt->dst_addr.sin_addr.s_addr, 196 RT_TOS(0), 0);
198 .saddr = opt->src_addr.sin_addr.s_addr, 197 if (IS_ERR(rt))
199 .tos = RT_TOS(0) } }, 198 goto tx_error;
200 .proto = IPPROTO_GRE }; 199
201 err = ip_route_output_key(&init_net, &rt, &fl);
202 if (err)
203 goto tx_error;
204 }
205 tdev = rt->dst.dev; 200 tdev = rt->dst.dev;
206 201
207 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(*iph) + sizeof(*hdr) + 2; 202 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(*iph) + sizeof(*hdr) + 2;
@@ -468,21 +463,17 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
468 po->chan.private = sk; 463 po->chan.private = sk;
469 po->chan.ops = &pptp_chan_ops; 464 po->chan.ops = &pptp_chan_ops;
470 465
471 { 466 rt = ip_route_output_ports(&init_net, sk,
472 struct flowi fl = { 467 opt->dst_addr.sin_addr.s_addr,
473 .nl_u = { 468 opt->src_addr.sin_addr.s_addr,
474 .ip4_u = { 469 0, 0,
475 .daddr = opt->dst_addr.sin_addr.s_addr, 470 IPPROTO_GRE, RT_CONN_FLAGS(sk), 0);
476 .saddr = opt->src_addr.sin_addr.s_addr, 471 if (IS_ERR(rt)) {
477 .tos = RT_CONN_FLAGS(sk) } }, 472 error = -EHOSTUNREACH;
478 .proto = IPPROTO_GRE }; 473 goto end;
479 security_sk_classify_flow(sk, &fl);
480 if (ip_route_output_key(&init_net, &rt, &fl)) {
481 error = -EHOSTUNREACH;
482 goto end;
483 }
484 sk_setup_caps(sk, &rt->dst);
485 } 474 }
475 sk_setup_caps(sk, &rt->dst);
476
486 po->chan.mtu = dst_mtu(&rt->dst); 477 po->chan.mtu = dst_mtu(&rt->dst);
487 if (!po->chan.mtu) 478 if (!po->chan.mtu)
488 po->chan.mtu = PPP_MTU; 479 po->chan.mtu = PPP_MTU;
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 1a3584edd79c..2d21c60085bc 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -379,7 +379,7 @@ static void fm93c56a_select(struct ql3_adapter *qdev)
379{ 379{
380 struct ql3xxx_port_registers __iomem *port_regs = 380 struct ql3xxx_port_registers __iomem *port_regs =
381 qdev->mem_map_registers; 381 qdev->mem_map_registers;
382 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 382 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
383 383
384 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; 384 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
385 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 385 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
@@ -398,7 +398,7 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
398 u32 previousBit; 398 u32 previousBit;
399 struct ql3xxx_port_registers __iomem *port_regs = 399 struct ql3xxx_port_registers __iomem *port_regs =
400 qdev->mem_map_registers; 400 qdev->mem_map_registers;
401 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 401 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
402 402
403 /* Clock in a zero, then do the start bit */ 403 /* Clock in a zero, then do the start bit */
404 ql_write_nvram_reg(qdev, spir, 404 ql_write_nvram_reg(qdev, spir,
@@ -467,7 +467,7 @@ static void fm93c56a_deselect(struct ql3_adapter *qdev)
467{ 467{
468 struct ql3xxx_port_registers __iomem *port_regs = 468 struct ql3xxx_port_registers __iomem *port_regs =
469 qdev->mem_map_registers; 469 qdev->mem_map_registers;
470 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 470 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
471 471
472 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; 472 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
473 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 473 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
@@ -483,7 +483,7 @@ static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
483 u32 dataBit; 483 u32 dataBit;
484 struct ql3xxx_port_registers __iomem *port_regs = 484 struct ql3xxx_port_registers __iomem *port_regs =
485 qdev->mem_map_registers; 485 qdev->mem_map_registers;
486 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 486 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
487 487
488 /* Read the data bits */ 488 /* Read the data bits */
489 /* The first bit is a dummy. Clock right over it. */ 489 /* The first bit is a dummy. Clock right over it. */
@@ -3011,7 +3011,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
3011 u32 value; 3011 u32 value;
3012 struct ql3xxx_port_registers __iomem *port_regs = 3012 struct ql3xxx_port_registers __iomem *port_regs =
3013 qdev->mem_map_registers; 3013 qdev->mem_map_registers;
3014 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 3014 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
3015 struct ql3xxx_host_memory_registers __iomem *hmem_regs = 3015 struct ql3xxx_host_memory_registers __iomem *hmem_regs =
3016 (void __iomem *)port_regs; 3016 (void __iomem *)port_regs;
3017 u32 delay = 10; 3017 u32 delay = 10;
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 44e316fd67b8..dc44564ef6f9 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -867,7 +867,6 @@ struct qlcnic_nic_intr_coalesce {
867#define LINKEVENT_LINKSPEED_MBPS 0 867#define LINKEVENT_LINKSPEED_MBPS 0
868#define LINKEVENT_LINKSPEED_ENCODED 1 868#define LINKEVENT_LINKSPEED_ENCODED 1
869 869
870#define AUTO_FW_RESET_ENABLED 0x01
871/* firmware response header: 870/* firmware response header:
872 * 63:58 - message type 871 * 63:58 - message type
873 * 57:56 - owner 872 * 57:56 - owner
@@ -1133,14 +1132,10 @@ struct qlcnic_eswitch {
1133#define MAX_BW 100 /* % of link speed */ 1132#define MAX_BW 100 /* % of link speed */
1134#define MAX_VLAN_ID 4095 1133#define MAX_VLAN_ID 4095
1135#define MIN_VLAN_ID 2 1134#define MIN_VLAN_ID 2
1136#define MAX_TX_QUEUES 1
1137#define MAX_RX_QUEUES 4
1138#define DEFAULT_MAC_LEARN 1 1135#define DEFAULT_MAC_LEARN 1
1139 1136
1140#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID) 1137#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID)
1141#define IS_VALID_BW(bw) (bw <= MAX_BW) 1138#define IS_VALID_BW(bw) (bw <= MAX_BW)
1142#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES)
1143#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES)
1144 1139
1145struct qlcnic_pci_func_cfg { 1140struct qlcnic_pci_func_cfg {
1146 u16 func_type; 1141 u16 func_type;
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 37c04b4fade3..cd88c7e1bfa9 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -42,7 +42,7 @@ static int use_msi_x = 1;
42module_param(use_msi_x, int, 0444); 42module_param(use_msi_x, int, 0444);
43MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled"); 43MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
44 44
45static int auto_fw_reset = AUTO_FW_RESET_ENABLED; 45static int auto_fw_reset = 1;
46module_param(auto_fw_reset, int, 0644); 46module_param(auto_fw_reset, int, 0644);
47MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); 47MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
48 48
@@ -2959,8 +2959,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
2959 if (adapter->need_fw_reset) 2959 if (adapter->need_fw_reset)
2960 goto detach; 2960 goto detach;
2961 2961
2962 if (adapter->reset_context && 2962 if (adapter->reset_context && auto_fw_reset) {
2963 auto_fw_reset == AUTO_FW_RESET_ENABLED) {
2964 qlcnic_reset_hw_context(adapter); 2963 qlcnic_reset_hw_context(adapter);
2965 adapter->netdev->trans_start = jiffies; 2964 adapter->netdev->trans_start = jiffies;
2966 } 2965 }
@@ -2973,7 +2972,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
2973 2972
2974 qlcnic_dev_request_reset(adapter); 2973 qlcnic_dev_request_reset(adapter);
2975 2974
2976 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED)) 2975 if (auto_fw_reset)
2977 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state); 2976 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
2978 2977
2979 dev_info(&netdev->dev, "firmware hang detected\n"); 2978 dev_info(&netdev->dev, "firmware hang detected\n");
@@ -2982,7 +2981,7 @@ detach:
2982 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state : 2981 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
2983 QLCNIC_DEV_NEED_RESET; 2982 QLCNIC_DEV_NEED_RESET;
2984 2983
2985 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) && 2984 if (auto_fw_reset &&
2986 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) { 2985 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
2987 2986
2988 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0); 2987 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
@@ -3654,10 +3653,8 @@ validate_npar_config(struct qlcnic_adapter *adapter,
3654 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC) 3653 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3655 return QL_STATUS_INVALID_PARAM; 3654 return QL_STATUS_INVALID_PARAM;
3656 3655
3657 if (!IS_VALID_BW(np_cfg[i].min_bw) 3656 if (!IS_VALID_BW(np_cfg[i].min_bw) ||
3658 || !IS_VALID_BW(np_cfg[i].max_bw) 3657 !IS_VALID_BW(np_cfg[i].max_bw))
3659 || !IS_VALID_RX_QUEUES(np_cfg[i].max_rx_queues)
3660 || !IS_VALID_TX_QUEUES(np_cfg[i].max_tx_queues))
3661 return QL_STATUS_INVALID_PARAM; 3658 return QL_STATUS_INVALID_PARAM;
3662 } 3659 }
3663 return 0; 3660 return 0;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 7ffdb80adf40..5e403511289d 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -37,6 +37,7 @@
37 37
38#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw" 38#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
39#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw" 39#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
40#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
40 41
41#ifdef RTL8169_DEBUG 42#ifdef RTL8169_DEBUG
42#define assert(expr) \ 43#define assert(expr) \
@@ -124,6 +125,8 @@ enum mac_version {
124 RTL_GIGA_MAC_VER_26 = 0x1a, // 8168D 125 RTL_GIGA_MAC_VER_26 = 0x1a, // 8168D
125 RTL_GIGA_MAC_VER_27 = 0x1b, // 8168DP 126 RTL_GIGA_MAC_VER_27 = 0x1b, // 8168DP
126 RTL_GIGA_MAC_VER_28 = 0x1c, // 8168DP 127 RTL_GIGA_MAC_VER_28 = 0x1c, // 8168DP
128 RTL_GIGA_MAC_VER_29 = 0x1d, // 8105E
129 RTL_GIGA_MAC_VER_30 = 0x1e, // 8105E
127}; 130};
128 131
129#define _R(NAME,MAC,MASK) \ 132#define _R(NAME,MAC,MASK) \
@@ -161,7 +164,9 @@ static const struct {
161 _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_25, 0xff7e1880), // PCI-E 164 _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_25, 0xff7e1880), // PCI-E
162 _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_26, 0xff7e1880), // PCI-E 165 _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_26, 0xff7e1880), // PCI-E
163 _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_27, 0xff7e1880), // PCI-E 166 _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_27, 0xff7e1880), // PCI-E
164 _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_28, 0xff7e1880) // PCI-E 167 _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_28, 0xff7e1880), // PCI-E
168 _R("RTL8105e", RTL_GIGA_MAC_VER_29, 0xff7e1880), // PCI-E
169 _R("RTL8105e", RTL_GIGA_MAC_VER_30, 0xff7e1880) // PCI-E
165}; 170};
166#undef _R 171#undef _R
167 172
@@ -268,9 +273,15 @@ enum rtl8168_8101_registers {
268#define EPHYAR_REG_MASK 0x1f 273#define EPHYAR_REG_MASK 0x1f
269#define EPHYAR_REG_SHIFT 16 274#define EPHYAR_REG_SHIFT 16
270#define EPHYAR_DATA_MASK 0xffff 275#define EPHYAR_DATA_MASK 0xffff
276 DLLPR = 0xd0,
277#define PM_SWITCH (1 << 6)
271 DBG_REG = 0xd1, 278 DBG_REG = 0xd1,
272#define FIX_NAK_1 (1 << 4) 279#define FIX_NAK_1 (1 << 4)
273#define FIX_NAK_2 (1 << 3) 280#define FIX_NAK_2 (1 << 3)
281 TWSI = 0xd2,
282 MCU = 0xd3,
283#define EN_NDP (1 << 3)
284#define EN_OOB_RESET (1 << 2)
274 EFUSEAR = 0xdc, 285 EFUSEAR = 0xdc,
275#define EFUSEAR_FLAG 0x80000000 286#define EFUSEAR_FLAG 0x80000000
276#define EFUSEAR_WRITE_CMD 0x80000000 287#define EFUSEAR_WRITE_CMD 0x80000000
@@ -527,9 +538,6 @@ struct rtl8169_private {
527 u16 napi_event; 538 u16 napi_event;
528 u16 intr_mask; 539 u16 intr_mask;
529 int phy_1000_ctrl_reg; 540 int phy_1000_ctrl_reg;
530#ifdef CONFIG_R8169_VLAN
531 struct vlan_group *vlgrp;
532#endif
533 541
534 struct mdio_ops { 542 struct mdio_ops {
535 void (*write)(void __iomem *, int, int); 543 void (*write)(void __iomem *, int, int);
@@ -541,7 +549,7 @@ struct rtl8169_private {
541 void (*up)(struct rtl8169_private *); 549 void (*up)(struct rtl8169_private *);
542 } pll_power_ops; 550 } pll_power_ops;
543 551
544 int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex); 552 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
545 int (*get_settings)(struct net_device *, struct ethtool_cmd *); 553 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
546 void (*phy_reset_enable)(struct rtl8169_private *tp); 554 void (*phy_reset_enable)(struct rtl8169_private *tp);
547 void (*hw_start)(struct net_device *); 555 void (*hw_start)(struct net_device *);
@@ -569,6 +577,7 @@ MODULE_LICENSE("GPL");
569MODULE_VERSION(RTL8169_VERSION); 577MODULE_VERSION(RTL8169_VERSION);
570MODULE_FIRMWARE(FIRMWARE_8168D_1); 578MODULE_FIRMWARE(FIRMWARE_8168D_1);
571MODULE_FIRMWARE(FIRMWARE_8168D_2); 579MODULE_FIRMWARE(FIRMWARE_8168D_2);
580MODULE_FIRMWARE(FIRMWARE_8105E_1);
572 581
573static int rtl8169_open(struct net_device *dev); 582static int rtl8169_open(struct net_device *dev);
574static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, 583static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
@@ -1098,7 +1107,7 @@ static int rtl8169_get_regs_len(struct net_device *dev)
1098} 1107}
1099 1108
1100static int rtl8169_set_speed_tbi(struct net_device *dev, 1109static int rtl8169_set_speed_tbi(struct net_device *dev,
1101 u8 autoneg, u16 speed, u8 duplex) 1110 u8 autoneg, u16 speed, u8 duplex, u32 ignored)
1102{ 1111{
1103 struct rtl8169_private *tp = netdev_priv(dev); 1112 struct rtl8169_private *tp = netdev_priv(dev);
1104 void __iomem *ioaddr = tp->mmio_addr; 1113 void __iomem *ioaddr = tp->mmio_addr;
@@ -1121,17 +1130,30 @@ static int rtl8169_set_speed_tbi(struct net_device *dev,
1121} 1130}
1122 1131
1123static int rtl8169_set_speed_xmii(struct net_device *dev, 1132static int rtl8169_set_speed_xmii(struct net_device *dev,
1124 u8 autoneg, u16 speed, u8 duplex) 1133 u8 autoneg, u16 speed, u8 duplex, u32 adv)
1125{ 1134{
1126 struct rtl8169_private *tp = netdev_priv(dev); 1135 struct rtl8169_private *tp = netdev_priv(dev);
1127 int giga_ctrl, bmcr; 1136 int giga_ctrl, bmcr;
1137 int rc = -EINVAL;
1138
1139 rtl_writephy(tp, 0x1f, 0x0000);
1128 1140
1129 if (autoneg == AUTONEG_ENABLE) { 1141 if (autoneg == AUTONEG_ENABLE) {
1130 int auto_nego; 1142 int auto_nego;
1131 1143
1132 auto_nego = rtl_readphy(tp, MII_ADVERTISE); 1144 auto_nego = rtl_readphy(tp, MII_ADVERTISE);
1133 auto_nego |= (ADVERTISE_10HALF | ADVERTISE_10FULL | 1145 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
1134 ADVERTISE_100HALF | ADVERTISE_100FULL); 1146 ADVERTISE_100HALF | ADVERTISE_100FULL);
1147
1148 if (adv & ADVERTISED_10baseT_Half)
1149 auto_nego |= ADVERTISE_10HALF;
1150 if (adv & ADVERTISED_10baseT_Full)
1151 auto_nego |= ADVERTISE_10FULL;
1152 if (adv & ADVERTISED_100baseT_Half)
1153 auto_nego |= ADVERTISE_100HALF;
1154 if (adv & ADVERTISED_100baseT_Full)
1155 auto_nego |= ADVERTISE_100FULL;
1156
1135 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 1157 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1136 1158
1137 giga_ctrl = rtl_readphy(tp, MII_CTRL1000); 1159 giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
@@ -1145,27 +1167,22 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
1145 (tp->mac_version != RTL_GIGA_MAC_VER_13) && 1167 (tp->mac_version != RTL_GIGA_MAC_VER_13) &&
1146 (tp->mac_version != RTL_GIGA_MAC_VER_14) && 1168 (tp->mac_version != RTL_GIGA_MAC_VER_14) &&
1147 (tp->mac_version != RTL_GIGA_MAC_VER_15) && 1169 (tp->mac_version != RTL_GIGA_MAC_VER_15) &&
1148 (tp->mac_version != RTL_GIGA_MAC_VER_16)) { 1170 (tp->mac_version != RTL_GIGA_MAC_VER_16) &&
1149 giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF; 1171 (tp->mac_version != RTL_GIGA_MAC_VER_29) &&
1150 } else { 1172 (tp->mac_version != RTL_GIGA_MAC_VER_30)) {
1173 if (adv & ADVERTISED_1000baseT_Half)
1174 giga_ctrl |= ADVERTISE_1000HALF;
1175 if (adv & ADVERTISED_1000baseT_Full)
1176 giga_ctrl |= ADVERTISE_1000FULL;
1177 } else if (adv & (ADVERTISED_1000baseT_Half |
1178 ADVERTISED_1000baseT_Full)) {
1151 netif_info(tp, link, dev, 1179 netif_info(tp, link, dev,
1152 "PHY does not support 1000Mbps\n"); 1180 "PHY does not support 1000Mbps\n");
1181 goto out;
1153 } 1182 }
1154 1183
1155 bmcr = BMCR_ANENABLE | BMCR_ANRESTART; 1184 bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
1156 1185
1157 if ((tp->mac_version == RTL_GIGA_MAC_VER_11) ||
1158 (tp->mac_version == RTL_GIGA_MAC_VER_12) ||
1159 (tp->mac_version >= RTL_GIGA_MAC_VER_17)) {
1160 /*
1161 * Wake up the PHY.
1162 * Vendor specific (0x1f) and reserved (0x0e) MII
1163 * registers.
1164 */
1165 rtl_writephy(tp, 0x1f, 0x0000);
1166 rtl_writephy(tp, 0x0e, 0x0000);
1167 }
1168
1169 rtl_writephy(tp, MII_ADVERTISE, auto_nego); 1186 rtl_writephy(tp, MII_ADVERTISE, auto_nego);
1170 rtl_writephy(tp, MII_CTRL1000, giga_ctrl); 1187 rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
1171 } else { 1188 } else {
@@ -1176,12 +1193,10 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
1176 else if (speed == SPEED_100) 1193 else if (speed == SPEED_100)
1177 bmcr = BMCR_SPEED100; 1194 bmcr = BMCR_SPEED100;
1178 else 1195 else
1179 return -EINVAL; 1196 goto out;
1180 1197
1181 if (duplex == DUPLEX_FULL) 1198 if (duplex == DUPLEX_FULL)
1182 bmcr |= BMCR_FULLDPLX; 1199 bmcr |= BMCR_FULLDPLX;
1183
1184 rtl_writephy(tp, 0x1f, 0x0000);
1185 } 1200 }
1186 1201
1187 tp->phy_1000_ctrl_reg = giga_ctrl; 1202 tp->phy_1000_ctrl_reg = giga_ctrl;
@@ -1199,16 +1214,18 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
1199 } 1214 }
1200 } 1215 }
1201 1216
1202 return 0; 1217 rc = 0;
1218out:
1219 return rc;
1203} 1220}
1204 1221
1205static int rtl8169_set_speed(struct net_device *dev, 1222static int rtl8169_set_speed(struct net_device *dev,
1206 u8 autoneg, u16 speed, u8 duplex) 1223 u8 autoneg, u16 speed, u8 duplex, u32 advertising)
1207{ 1224{
1208 struct rtl8169_private *tp = netdev_priv(dev); 1225 struct rtl8169_private *tp = netdev_priv(dev);
1209 int ret; 1226 int ret;
1210 1227
1211 ret = tp->set_speed(dev, autoneg, speed, duplex); 1228 ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
1212 1229
1213 if (netif_running(dev) && (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL)) 1230 if (netif_running(dev) && (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
1214 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT); 1231 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
@@ -1223,7 +1240,8 @@ static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1223 int ret; 1240 int ret;
1224 1241
1225 spin_lock_irqsave(&tp->lock, flags); 1242 spin_lock_irqsave(&tp->lock, flags);
1226 ret = rtl8169_set_speed(dev, cmd->autoneg, cmd->speed, cmd->duplex); 1243 ret = rtl8169_set_speed(dev,
1244 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1227 spin_unlock_irqrestore(&tp->lock, flags); 1245 spin_unlock_irqrestore(&tp->lock, flags);
1228 1246
1229 return ret; 1247 return ret;
@@ -1257,8 +1275,6 @@ static int rtl8169_set_rx_csum(struct net_device *dev, u32 data)
1257 return 0; 1275 return 0;
1258} 1276}
1259 1277
1260#ifdef CONFIG_R8169_VLAN
1261
1262static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp, 1278static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1263 struct sk_buff *skb) 1279 struct sk_buff *skb)
1264{ 1280{
@@ -1266,64 +1282,37 @@ static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1266 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; 1282 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
1267} 1283}
1268 1284
1269static void rtl8169_vlan_rx_register(struct net_device *dev, 1285#define NETIF_F_HW_VLAN_TX_RX (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX)
1270 struct vlan_group *grp) 1286
1287static void rtl8169_vlan_mode(struct net_device *dev)
1271{ 1288{
1272 struct rtl8169_private *tp = netdev_priv(dev); 1289 struct rtl8169_private *tp = netdev_priv(dev);
1273 void __iomem *ioaddr = tp->mmio_addr; 1290 void __iomem *ioaddr = tp->mmio_addr;
1274 unsigned long flags; 1291 unsigned long flags;
1275 1292
1276 spin_lock_irqsave(&tp->lock, flags); 1293 spin_lock_irqsave(&tp->lock, flags);
1277 tp->vlgrp = grp; 1294 if (dev->features & NETIF_F_HW_VLAN_RX)
1278 /*
1279 * Do not disable RxVlan on 8110SCd.
1280 */
1281 if (tp->vlgrp || (tp->mac_version == RTL_GIGA_MAC_VER_05))
1282 tp->cp_cmd |= RxVlan; 1295 tp->cp_cmd |= RxVlan;
1283 else 1296 else
1284 tp->cp_cmd &= ~RxVlan; 1297 tp->cp_cmd &= ~RxVlan;
1285 RTL_W16(CPlusCmd, tp->cp_cmd); 1298 RTL_W16(CPlusCmd, tp->cp_cmd);
1299 /* PCI commit */
1286 RTL_R16(CPlusCmd); 1300 RTL_R16(CPlusCmd);
1287 spin_unlock_irqrestore(&tp->lock, flags); 1301 spin_unlock_irqrestore(&tp->lock, flags);
1302
1303 dev->vlan_features = dev->features &~ NETIF_F_HW_VLAN_TX_RX;
1288} 1304}
1289 1305
1290static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc, 1306static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1291 struct sk_buff *skb, int polling)
1292{ 1307{
1293 u32 opts2 = le32_to_cpu(desc->opts2); 1308 u32 opts2 = le32_to_cpu(desc->opts2);
1294 struct vlan_group *vlgrp = tp->vlgrp;
1295 int ret;
1296 1309
1297 if (vlgrp && (opts2 & RxVlanTag)) { 1310 if (opts2 & RxVlanTag)
1298 u16 vtag = swab16(opts2 & 0xffff); 1311 __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
1299 1312
1300 if (likely(polling))
1301 vlan_gro_receive(&tp->napi, vlgrp, vtag, skb);
1302 else
1303 __vlan_hwaccel_rx(skb, vlgrp, vtag, polling);
1304 ret = 0;
1305 } else
1306 ret = -1;
1307 desc->opts2 = 0; 1313 desc->opts2 = 0;
1308 return ret;
1309} 1314}
1310 1315
1311#else /* !CONFIG_R8169_VLAN */
1312
1313static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1314 struct sk_buff *skb)
1315{
1316 return 0;
1317}
1318
1319static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
1320 struct sk_buff *skb, int polling)
1321{
1322 return -1;
1323}
1324
1325#endif
1326
1327static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd) 1316static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
1328{ 1317{
1329 struct rtl8169_private *tp = netdev_priv(dev); 1318 struct rtl8169_private *tp = netdev_priv(dev);
@@ -1494,6 +1483,28 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1494 } 1483 }
1495} 1484}
1496 1485
1486static int rtl8169_set_flags(struct net_device *dev, u32 data)
1487{
1488 struct rtl8169_private *tp = netdev_priv(dev);
1489 unsigned long old_feat = dev->features;
1490 int rc;
1491
1492 if ((tp->mac_version == RTL_GIGA_MAC_VER_05) &&
1493 !(data & ETH_FLAG_RXVLAN)) {
1494 netif_info(tp, drv, dev, "8110SCd requires hardware Rx VLAN\n");
1495 return -EINVAL;
1496 }
1497
1498 rc = ethtool_op_set_flags(dev, data, ETH_FLAG_TXVLAN | ETH_FLAG_RXVLAN);
1499 if (rc)
1500 return rc;
1501
1502 if ((old_feat ^ dev->features) & NETIF_F_HW_VLAN_RX)
1503 rtl8169_vlan_mode(dev);
1504
1505 return 0;
1506}
1507
1497static const struct ethtool_ops rtl8169_ethtool_ops = { 1508static const struct ethtool_ops rtl8169_ethtool_ops = {
1498 .get_drvinfo = rtl8169_get_drvinfo, 1509 .get_drvinfo = rtl8169_get_drvinfo,
1499 .get_regs_len = rtl8169_get_regs_len, 1510 .get_regs_len = rtl8169_get_regs_len,
@@ -1513,6 +1524,8 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
1513 .get_strings = rtl8169_get_strings, 1524 .get_strings = rtl8169_get_strings,
1514 .get_sset_count = rtl8169_get_sset_count, 1525 .get_sset_count = rtl8169_get_sset_count,
1515 .get_ethtool_stats = rtl8169_get_ethtool_stats, 1526 .get_ethtool_stats = rtl8169_get_ethtool_stats,
1527 .set_flags = rtl8169_set_flags,
1528 .get_flags = ethtool_op_get_flags,
1516}; 1529};
1517 1530
1518static void rtl8169_get_mac_version(struct rtl8169_private *tp, 1531static void rtl8169_get_mac_version(struct rtl8169_private *tp,
@@ -1561,6 +1574,9 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1561 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 }, 1574 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
1562 1575
1563 /* 8101 family. */ 1576 /* 8101 family. */
1577 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
1578 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
1579 { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
1564 { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 }, 1580 { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
1565 { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 }, 1581 { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
1566 { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 }, 1582 { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
@@ -2437,6 +2453,33 @@ static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
2437 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 2453 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2438} 2454}
2439 2455
2456static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
2457{
2458 static const struct phy_reg phy_reg_init[] = {
2459 { 0x1f, 0x0005 },
2460 { 0x1a, 0x0000 },
2461 { 0x1f, 0x0000 },
2462
2463 { 0x1f, 0x0004 },
2464 { 0x1c, 0x0000 },
2465 { 0x1f, 0x0000 },
2466
2467 { 0x1f, 0x0001 },
2468 { 0x15, 0x7701 },
2469 { 0x1f, 0x0000 }
2470 };
2471
2472 /* Disable ALDPS before ram code */
2473 rtl_writephy(tp, 0x1f, 0x0000);
2474 rtl_writephy(tp, 0x18, 0x0310);
2475 msleep(100);
2476
2477 if (rtl_apply_firmware(tp, FIRMWARE_8105E_1) < 0)
2478 netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
2479
2480 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2481}
2482
2440static void rtl_hw_phy_config(struct net_device *dev) 2483static void rtl_hw_phy_config(struct net_device *dev)
2441{ 2484{
2442 struct rtl8169_private *tp = netdev_priv(dev); 2485 struct rtl8169_private *tp = netdev_priv(dev);
@@ -2504,6 +2547,10 @@ static void rtl_hw_phy_config(struct net_device *dev)
2504 case RTL_GIGA_MAC_VER_28: 2547 case RTL_GIGA_MAC_VER_28:
2505 rtl8168d_4_hw_phy_config(tp); 2548 rtl8168d_4_hw_phy_config(tp);
2506 break; 2549 break;
2550 case RTL_GIGA_MAC_VER_29:
2551 case RTL_GIGA_MAC_VER_30:
2552 rtl8105e_hw_phy_config(tp);
2553 break;
2507 2554
2508 default: 2555 default:
2509 break; 2556 break;
@@ -2635,11 +2682,12 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
2635 2682
2636 rtl8169_phy_reset(dev, tp); 2683 rtl8169_phy_reset(dev, tp);
2637 2684
2638 /* 2685 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
2639 * rtl8169_set_speed_xmii takes good care of the Fast Ethernet 2686 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
2640 * only 8101. Don't panic. 2687 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
2641 */ 2688 tp->mii.supports_gmii ?
2642 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL); 2689 ADVERTISED_1000baseT_Half |
2690 ADVERTISED_1000baseT_Full : 0);
2643 2691
2644 if (RTL_R8(PHYstatus) & TBI_Enable) 2692 if (RTL_R8(PHYstatus) & TBI_Enable)
2645 netif_info(tp, link, dev, "TBI auto-negotiating\n"); 2693 netif_info(tp, link, dev, "TBI auto-negotiating\n");
@@ -2795,9 +2843,6 @@ static const struct net_device_ops rtl8169_netdev_ops = {
2795 .ndo_set_mac_address = rtl_set_mac_address, 2843 .ndo_set_mac_address = rtl_set_mac_address,
2796 .ndo_do_ioctl = rtl8169_ioctl, 2844 .ndo_do_ioctl = rtl8169_ioctl,
2797 .ndo_set_multicast_list = rtl_set_rx_mode, 2845 .ndo_set_multicast_list = rtl_set_rx_mode,
2798#ifdef CONFIG_R8169_VLAN
2799 .ndo_vlan_rx_register = rtl8169_vlan_rx_register,
2800#endif
2801#ifdef CONFIG_NET_POLL_CONTROLLER 2846#ifdef CONFIG_NET_POLL_CONTROLLER
2802 .ndo_poll_controller = rtl8169_netpoll, 2847 .ndo_poll_controller = rtl8169_netpoll,
2803#endif 2848#endif
@@ -2952,6 +2997,8 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
2952 case RTL_GIGA_MAC_VER_09: 2997 case RTL_GIGA_MAC_VER_09:
2953 case RTL_GIGA_MAC_VER_10: 2998 case RTL_GIGA_MAC_VER_10:
2954 case RTL_GIGA_MAC_VER_16: 2999 case RTL_GIGA_MAC_VER_16:
3000 case RTL_GIGA_MAC_VER_29:
3001 case RTL_GIGA_MAC_VER_30:
2955 ops->down = r810x_pll_power_down; 3002 ops->down = r810x_pll_power_down;
2956 ops->up = r810x_pll_power_up; 3003 ops->up = r810x_pll_power_up;
2957 break; 3004 break;
@@ -3104,6 +3151,13 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3104 /* Identify chip attached to board */ 3151 /* Identify chip attached to board */
3105 rtl8169_get_mac_version(tp, ioaddr); 3152 rtl8169_get_mac_version(tp, ioaddr);
3106 3153
3154 /*
3155 * Pretend we are using VLANs; This bypasses a nasty bug where
3156 * Interrupts stop flowing on high load on 8110SCd controllers.
3157 */
3158 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
3159 tp->cp_cmd |= RxVlan;
3160
3107 rtl_init_mdio_ops(tp); 3161 rtl_init_mdio_ops(tp);
3108 rtl_init_pll_power_ops(tp); 3162 rtl_init_pll_power_ops(tp);
3109 3163
@@ -3172,10 +3226,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3172 3226
3173 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT); 3227 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
3174 3228
3175#ifdef CONFIG_R8169_VLAN 3229 dev->features |= NETIF_F_HW_VLAN_TX_RX | NETIF_F_GRO;
3176 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3177#endif
3178 dev->features |= NETIF_F_GRO;
3179 3230
3180 tp->intr_mask = 0xffff; 3231 tp->intr_mask = 0xffff;
3181 tp->hw_start = cfg->hw_start; 3232 tp->hw_start = cfg->hw_start;
@@ -3293,12 +3344,7 @@ static int rtl8169_open(struct net_device *dev)
3293 3344
3294 rtl8169_init_phy(dev, tp); 3345 rtl8169_init_phy(dev, tp);
3295 3346
3296 /* 3347 rtl8169_vlan_mode(dev);
3297 * Pretend we are using VLANs; This bypasses a nasty bug where
3298 * Interrupts stop flowing on high load on 8110SCd controllers.
3299 */
3300 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
3301 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
3302 3348
3303 rtl_pll_power_up(tp); 3349 rtl_pll_power_up(tp);
3304 3350
@@ -3915,6 +3961,37 @@ static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
3915 rtl_ephy_write(ioaddr, 0x03, 0xc2f9); 3961 rtl_ephy_write(ioaddr, 0x03, 0xc2f9);
3916} 3962}
3917 3963
3964static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev)
3965{
3966 static const struct ephy_info e_info_8105e_1[] = {
3967 { 0x07, 0, 0x4000 },
3968 { 0x19, 0, 0x0200 },
3969 { 0x19, 0, 0x0020 },
3970 { 0x1e, 0, 0x2000 },
3971 { 0x03, 0, 0x0001 },
3972 { 0x19, 0, 0x0100 },
3973 { 0x19, 0, 0x0004 },
3974 { 0x0a, 0, 0x0020 }
3975 };
3976
3977 /* Force LAN exit from ASPM if Rx/Tx are not idel */
3978 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
3979
3980 /* disable Early Tally Counter */
3981 RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
3982
3983 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
3984 RTL_W8(DLLPR, RTL_R8(DLLPR) | PM_SWITCH);
3985
3986 rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
3987}
3988
3989static void rtl_hw_start_8105e_2(void __iomem *ioaddr, struct pci_dev *pdev)
3990{
3991 rtl_hw_start_8105e_1(ioaddr, pdev);
3992 rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000);
3993}
3994
3918static void rtl_hw_start_8101(struct net_device *dev) 3995static void rtl_hw_start_8101(struct net_device *dev)
3919{ 3996{
3920 struct rtl8169_private *tp = netdev_priv(dev); 3997 struct rtl8169_private *tp = netdev_priv(dev);
@@ -3945,6 +4022,13 @@ static void rtl_hw_start_8101(struct net_device *dev)
3945 case RTL_GIGA_MAC_VER_09: 4022 case RTL_GIGA_MAC_VER_09:
3946 rtl_hw_start_8102e_2(ioaddr, pdev); 4023 rtl_hw_start_8102e_2(ioaddr, pdev);
3947 break; 4024 break;
4025
4026 case RTL_GIGA_MAC_VER_29:
4027 rtl_hw_start_8105e_1(ioaddr, pdev);
4028 break;
4029 case RTL_GIGA_MAC_VER_30:
4030 rtl_hw_start_8105e_2(ioaddr, pdev);
4031 break;
3948 } 4032 }
3949 4033
3950 RTL_W8(Cfg9346, Cfg9346_Lock); 4034 RTL_W8(Cfg9346, Cfg9346_Lock);
@@ -4603,12 +4687,12 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
4603 skb_put(skb, pkt_size); 4687 skb_put(skb, pkt_size);
4604 skb->protocol = eth_type_trans(skb, dev); 4688 skb->protocol = eth_type_trans(skb, dev);
4605 4689
4606 if (rtl8169_rx_vlan_skb(tp, desc, skb, polling) < 0) { 4690 rtl8169_rx_vlan_tag(desc, skb);
4607 if (likely(polling)) 4691
4608 napi_gro_receive(&tp->napi, skb); 4692 if (likely(polling))
4609 else 4693 napi_gro_receive(&tp->napi, skb);
4610 netif_rx(skb); 4694 else
4611 } 4695 netif_rx(skb);
4612 4696
4613 dev->stats.rx_bytes += pkt_size; 4697 dev->stats.rx_bytes += pkt_size;
4614 dev->stats.rx_packets++; 4698 dev->stats.rx_packets++;
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 39c17cecb8b9..2ad6364103ea 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -7556,7 +7556,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7556 */ 7556 */
7557 skb->ip_summed = CHECKSUM_UNNECESSARY; 7557 skb->ip_summed = CHECKSUM_UNNECESSARY;
7558 if (ring_data->lro) { 7558 if (ring_data->lro) {
7559 u32 tcp_len; 7559 u32 tcp_len = 0;
7560 u8 *tcp; 7560 u8 *tcp;
7561 int ret = 0; 7561 int ret = 0;
7562 7562
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 002bac743843..b8bd936374f2 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2009 Solarflare Communications Inc. 4 * Copyright 2005-2011 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -21,6 +21,7 @@
21#include <linux/ethtool.h> 21#include <linux/ethtool.h>
22#include <linux/topology.h> 22#include <linux/topology.h>
23#include <linux/gfp.h> 23#include <linux/gfp.h>
24#include <linux/cpu_rmap.h>
24#include "net_driver.h" 25#include "net_driver.h"
25#include "efx.h" 26#include "efx.h"
26#include "nic.h" 27#include "nic.h"
@@ -307,6 +308,8 @@ static int efx_poll(struct napi_struct *napi, int budget)
307 channel->irq_mod_score = 0; 308 channel->irq_mod_score = 0;
308 } 309 }
309 310
311 efx_filter_rfs_expire(channel);
312
310 /* There is no race here; although napi_disable() will 313 /* There is no race here; although napi_disable() will
311 * only wait for napi_complete(), this isn't a problem 314 * only wait for napi_complete(), this isn't a problem
312 * since efx_channel_processed() will have no effect if 315 * since efx_channel_processed() will have no effect if
@@ -673,7 +676,7 @@ static void efx_fini_channels(struct efx_nic *efx)
673 676
674 efx_for_each_channel_rx_queue(rx_queue, channel) 677 efx_for_each_channel_rx_queue(rx_queue, channel)
675 efx_fini_rx_queue(rx_queue); 678 efx_fini_rx_queue(rx_queue);
676 efx_for_each_channel_tx_queue(tx_queue, channel) 679 efx_for_each_possible_channel_tx_queue(tx_queue, channel)
677 efx_fini_tx_queue(tx_queue); 680 efx_fini_tx_queue(tx_queue);
678 efx_fini_eventq(channel); 681 efx_fini_eventq(channel);
679 } 682 }
@@ -689,7 +692,7 @@ static void efx_remove_channel(struct efx_channel *channel)
689 692
690 efx_for_each_channel_rx_queue(rx_queue, channel) 693 efx_for_each_channel_rx_queue(rx_queue, channel)
691 efx_remove_rx_queue(rx_queue); 694 efx_remove_rx_queue(rx_queue);
692 efx_for_each_channel_tx_queue(tx_queue, channel) 695 efx_for_each_possible_channel_tx_queue(tx_queue, channel)
693 efx_remove_tx_queue(tx_queue); 696 efx_remove_tx_queue(tx_queue);
694 efx_remove_eventq(channel); 697 efx_remove_eventq(channel);
695} 698}
@@ -1101,8 +1104,8 @@ static int efx_init_io(struct efx_nic *efx)
1101 rc = -EIO; 1104 rc = -EIO;
1102 goto fail3; 1105 goto fail3;
1103 } 1106 }
1104 efx->membase = ioremap_nocache(efx->membase_phys, 1107 efx->membase = ioremap_wc(efx->membase_phys,
1105 efx->type->mem_map_size); 1108 efx->type->mem_map_size);
1106 if (!efx->membase) { 1109 if (!efx->membase) {
1107 netif_err(efx, probe, efx->net_dev, 1110 netif_err(efx, probe, efx->net_dev,
1108 "could not map memory BAR at %llx+%x\n", 1111 "could not map memory BAR at %llx+%x\n",
@@ -1175,10 +1178,32 @@ static int efx_wanted_channels(void)
1175 return count; 1178 return count;
1176} 1179}
1177 1180
1181static int
1182efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries)
1183{
1184#ifdef CONFIG_RFS_ACCEL
1185 int i, rc;
1186
1187 efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels);
1188 if (!efx->net_dev->rx_cpu_rmap)
1189 return -ENOMEM;
1190 for (i = 0; i < efx->n_rx_channels; i++) {
1191 rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
1192 xentries[i].vector);
1193 if (rc) {
1194 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
1195 efx->net_dev->rx_cpu_rmap = NULL;
1196 return rc;
1197 }
1198 }
1199#endif
1200 return 0;
1201}
1202
1178/* Probe the number and type of interrupts we are able to obtain, and 1203/* Probe the number and type of interrupts we are able to obtain, and
1179 * the resulting numbers of channels and RX queues. 1204 * the resulting numbers of channels and RX queues.
1180 */ 1205 */
1181static void efx_probe_interrupts(struct efx_nic *efx) 1206static int efx_probe_interrupts(struct efx_nic *efx)
1182{ 1207{
1183 int max_channels = 1208 int max_channels =
1184 min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS); 1209 min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
@@ -1220,6 +1245,11 @@ static void efx_probe_interrupts(struct efx_nic *efx)
1220 efx->n_tx_channels = efx->n_channels; 1245 efx->n_tx_channels = efx->n_channels;
1221 efx->n_rx_channels = efx->n_channels; 1246 efx->n_rx_channels = efx->n_channels;
1222 } 1247 }
1248 rc = efx_init_rx_cpu_rmap(efx, xentries);
1249 if (rc) {
1250 pci_disable_msix(efx->pci_dev);
1251 return rc;
1252 }
1223 for (i = 0; i < n_channels; i++) 1253 for (i = 0; i < n_channels; i++)
1224 efx_get_channel(efx, i)->irq = 1254 efx_get_channel(efx, i)->irq =
1225 xentries[i].vector; 1255 xentries[i].vector;
@@ -1253,6 +1283,8 @@ static void efx_probe_interrupts(struct efx_nic *efx)
1253 efx->n_tx_channels = 1; 1283 efx->n_tx_channels = 1;
1254 efx->legacy_irq = efx->pci_dev->irq; 1284 efx->legacy_irq = efx->pci_dev->irq;
1255 } 1285 }
1286
1287 return 0;
1256} 1288}
1257 1289
1258static void efx_remove_interrupts(struct efx_nic *efx) 1290static void efx_remove_interrupts(struct efx_nic *efx)
@@ -1271,21 +1303,8 @@ static void efx_remove_interrupts(struct efx_nic *efx)
1271 1303
1272static void efx_set_channels(struct efx_nic *efx) 1304static void efx_set_channels(struct efx_nic *efx)
1273{ 1305{
1274 struct efx_channel *channel;
1275 struct efx_tx_queue *tx_queue;
1276
1277 efx->tx_channel_offset = 1306 efx->tx_channel_offset =
1278 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; 1307 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
1279
1280 /* Channel pointers were set in efx_init_struct() but we now
1281 * need to clear them for TX queues in any RX-only channels. */
1282 efx_for_each_channel(channel, efx) {
1283 if (channel->channel - efx->tx_channel_offset >=
1284 efx->n_tx_channels) {
1285 efx_for_each_channel_tx_queue(tx_queue, channel)
1286 tx_queue->channel = NULL;
1287 }
1288 }
1289} 1308}
1290 1309
1291static int efx_probe_nic(struct efx_nic *efx) 1310static int efx_probe_nic(struct efx_nic *efx)
@@ -1302,7 +1321,9 @@ static int efx_probe_nic(struct efx_nic *efx)
1302 1321
1303 /* Determine the number of channels and queues by trying to hook 1322 /* Determine the number of channels and queues by trying to hook
1304 * in MSI-X interrupts. */ 1323 * in MSI-X interrupts. */
1305 efx_probe_interrupts(efx); 1324 rc = efx_probe_interrupts(efx);
1325 if (rc)
1326 goto fail;
1306 1327
1307 if (efx->n_channels > 1) 1328 if (efx->n_channels > 1)
1308 get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); 1329 get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
@@ -1317,6 +1338,10 @@ static int efx_probe_nic(struct efx_nic *efx)
1317 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true); 1338 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true);
1318 1339
1319 return 0; 1340 return 0;
1341
1342fail:
1343 efx->type->remove(efx);
1344 return rc;
1320} 1345}
1321 1346
1322static void efx_remove_nic(struct efx_nic *efx) 1347static void efx_remove_nic(struct efx_nic *efx)
@@ -1531,9 +1556,9 @@ void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
1531 efx->irq_rx_adaptive = rx_adaptive; 1556 efx->irq_rx_adaptive = rx_adaptive;
1532 efx->irq_rx_moderation = rx_ticks; 1557 efx->irq_rx_moderation = rx_ticks;
1533 efx_for_each_channel(channel, efx) { 1558 efx_for_each_channel(channel, efx) {
1534 if (efx_channel_get_rx_queue(channel)) 1559 if (efx_channel_has_rx_queue(channel))
1535 channel->irq_moderation = rx_ticks; 1560 channel->irq_moderation = rx_ticks;
1536 else if (efx_channel_get_tx_queue(channel, 0)) 1561 else if (efx_channel_has_tx_queues(channel))
1537 channel->irq_moderation = tx_ticks; 1562 channel->irq_moderation = tx_ticks;
1538 } 1563 }
1539} 1564}
@@ -1849,6 +1874,10 @@ static const struct net_device_ops efx_netdev_ops = {
1849#ifdef CONFIG_NET_POLL_CONTROLLER 1874#ifdef CONFIG_NET_POLL_CONTROLLER
1850 .ndo_poll_controller = efx_netpoll, 1875 .ndo_poll_controller = efx_netpoll,
1851#endif 1876#endif
1877 .ndo_setup_tc = efx_setup_tc,
1878#ifdef CONFIG_RFS_ACCEL
1879 .ndo_rx_flow_steer = efx_filter_rfs,
1880#endif
1852}; 1881};
1853 1882
1854static void efx_update_name(struct efx_nic *efx) 1883static void efx_update_name(struct efx_nic *efx)
@@ -1910,10 +1939,8 @@ static int efx_register_netdev(struct efx_nic *efx)
1910 1939
1911 efx_for_each_channel(channel, efx) { 1940 efx_for_each_channel(channel, efx) {
1912 struct efx_tx_queue *tx_queue; 1941 struct efx_tx_queue *tx_queue;
1913 efx_for_each_channel_tx_queue(tx_queue, channel) { 1942 efx_for_each_channel_tx_queue(tx_queue, channel)
1914 tx_queue->core_txq = netdev_get_tx_queue( 1943 efx_init_tx_queue_core_txq(tx_queue);
1915 efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES);
1916 }
1917 } 1944 }
1918 1945
1919 /* Always start with carrier off; PHY events will detect the link */ 1946 /* Always start with carrier off; PHY events will detect the link */
@@ -2288,6 +2315,10 @@ static void efx_fini_struct(struct efx_nic *efx)
2288 */ 2315 */
2289static void efx_pci_remove_main(struct efx_nic *efx) 2316static void efx_pci_remove_main(struct efx_nic *efx)
2290{ 2317{
2318#ifdef CONFIG_RFS_ACCEL
2319 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
2320 efx->net_dev->rx_cpu_rmap = NULL;
2321#endif
2291 efx_nic_fini_interrupt(efx); 2322 efx_nic_fini_interrupt(efx);
2292 efx_fini_channels(efx); 2323 efx_fini_channels(efx);
2293 efx_fini_port(efx); 2324 efx_fini_port(efx);
@@ -2401,7 +2432,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2401 int i, rc; 2432 int i, rc;
2402 2433
2403 /* Allocate and initialise a struct net_device and struct efx_nic */ 2434 /* Allocate and initialise a struct net_device and struct efx_nic */
2404 net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES); 2435 net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
2436 EFX_MAX_RX_QUEUES);
2405 if (!net_dev) 2437 if (!net_dev)
2406 return -ENOMEM; 2438 return -ENOMEM;
2407 net_dev->features |= (type->offload_features | NETIF_F_SG | 2439 net_dev->features |= (type->offload_features | NETIF_F_SG |
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index d43a7e5212b1..3d83a1f74fef 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -29,6 +29,7 @@
29extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); 29extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
30extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); 30extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
31extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue); 31extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
32extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
32extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); 33extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
33extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue); 34extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
34extern netdev_tx_t 35extern netdev_tx_t
@@ -36,6 +37,7 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
36extern netdev_tx_t 37extern netdev_tx_t
37efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); 38efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
38extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); 39extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
40extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
39 41
40/* RX */ 42/* RX */
41extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); 43extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
@@ -74,6 +76,21 @@ extern int efx_filter_remove_filter(struct efx_nic *efx,
74 struct efx_filter_spec *spec); 76 struct efx_filter_spec *spec);
75extern void efx_filter_clear_rx(struct efx_nic *efx, 77extern void efx_filter_clear_rx(struct efx_nic *efx,
76 enum efx_filter_priority priority); 78 enum efx_filter_priority priority);
79#ifdef CONFIG_RFS_ACCEL
80extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
81 u16 rxq_index, u32 flow_id);
82extern bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
83static inline void efx_filter_rfs_expire(struct efx_channel *channel)
84{
85 if (channel->rfs_filters_added >= 60 &&
86 __efx_filter_rfs_expire(channel->efx, 100))
87 channel->rfs_filters_added -= 60;
88}
89#define efx_filter_rfs_enabled() 1
90#else
91static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
92#define efx_filter_rfs_enabled() 0
93#endif
77 94
78/* Channels */ 95/* Channels */
79extern void efx_process_channel_now(struct efx_channel *channel); 96extern void efx_process_channel_now(struct efx_channel *channel);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index ca886d98bdc7..807178ef65ad 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -28,7 +28,8 @@ struct efx_ethtool_stat {
28 enum { 28 enum {
29 EFX_ETHTOOL_STAT_SOURCE_mac_stats, 29 EFX_ETHTOOL_STAT_SOURCE_mac_stats,
30 EFX_ETHTOOL_STAT_SOURCE_nic, 30 EFX_ETHTOOL_STAT_SOURCE_nic,
31 EFX_ETHTOOL_STAT_SOURCE_channel 31 EFX_ETHTOOL_STAT_SOURCE_channel,
32 EFX_ETHTOOL_STAT_SOURCE_tx_queue
32 } source; 33 } source;
33 unsigned offset; 34 unsigned offset;
34 u64(*get_stat) (void *field); /* Reader function */ 35 u64(*get_stat) (void *field); /* Reader function */
@@ -86,6 +87,10 @@ static u64 efx_get_atomic_stat(void *field)
86 EFX_ETHTOOL_STAT(field, channel, n_##field, \ 87 EFX_ETHTOOL_STAT(field, channel, n_##field, \
87 unsigned int, efx_get_uint_stat) 88 unsigned int, efx_get_uint_stat)
88 89
90#define EFX_ETHTOOL_UINT_TXQ_STAT(field) \
91 EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
92 unsigned int, efx_get_uint_stat)
93
89static struct efx_ethtool_stat efx_ethtool_stats[] = { 94static struct efx_ethtool_stat efx_ethtool_stats[] = {
90 EFX_ETHTOOL_U64_MAC_STAT(tx_bytes), 95 EFX_ETHTOOL_U64_MAC_STAT(tx_bytes),
91 EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes), 96 EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes),
@@ -116,6 +121,10 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = {
116 EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp), 121 EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp),
117 EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error), 122 EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error),
118 EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error), 123 EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error),
124 EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
125 EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
126 EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
127 EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
119 EFX_ETHTOOL_U64_MAC_STAT(rx_bytes), 128 EFX_ETHTOOL_U64_MAC_STAT(rx_bytes),
120 EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes), 129 EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes),
121 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes), 130 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes),
@@ -237,8 +246,8 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
237 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 246 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
238 strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version)); 247 strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
239 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 248 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
240 siena_print_fwver(efx, info->fw_version, 249 efx_mcdi_print_fwver(efx, info->fw_version,
241 sizeof(info->fw_version)); 250 sizeof(info->fw_version));
242 strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); 251 strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
243} 252}
244 253
@@ -470,6 +479,7 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
470 struct efx_mac_stats *mac_stats = &efx->mac_stats; 479 struct efx_mac_stats *mac_stats = &efx->mac_stats;
471 struct efx_ethtool_stat *stat; 480 struct efx_ethtool_stat *stat;
472 struct efx_channel *channel; 481 struct efx_channel *channel;
482 struct efx_tx_queue *tx_queue;
473 struct rtnl_link_stats64 temp; 483 struct rtnl_link_stats64 temp;
474 int i; 484 int i;
475 485
@@ -495,6 +505,15 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
495 data[i] += stat->get_stat((void *)channel + 505 data[i] += stat->get_stat((void *)channel +
496 stat->offset); 506 stat->offset);
497 break; 507 break;
508 case EFX_ETHTOOL_STAT_SOURCE_tx_queue:
509 data[i] = 0;
510 efx_for_each_channel(channel, efx) {
511 efx_for_each_channel_tx_queue(tx_queue, channel)
512 data[i] +=
513 stat->get_stat((void *)tx_queue
514 + stat->offset);
515 }
516 break;
498 } 517 }
499 } 518 }
500} 519}
@@ -502,7 +521,7 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
502static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable) 521static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
503{ 522{
504 struct efx_nic *efx __attribute__ ((unused)) = netdev_priv(net_dev); 523 struct efx_nic *efx __attribute__ ((unused)) = netdev_priv(net_dev);
505 unsigned long features; 524 u32 features;
506 525
507 features = NETIF_F_TSO; 526 features = NETIF_F_TSO;
508 if (efx->type->offload_features & NETIF_F_V6_CSUM) 527 if (efx->type->offload_features & NETIF_F_V6_CSUM)
@@ -519,7 +538,7 @@ static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
519static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable) 538static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
520{ 539{
521 struct efx_nic *efx = netdev_priv(net_dev); 540 struct efx_nic *efx = netdev_priv(net_dev);
522 unsigned long features = efx->type->offload_features & NETIF_F_ALL_CSUM; 541 u32 features = efx->type->offload_features & NETIF_F_ALL_CSUM;
523 542
524 if (enable) 543 if (enable)
525 net_dev->features |= features; 544 net_dev->features |= features;
@@ -635,7 +654,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
635 /* Find lowest IRQ moderation across all used TX queues */ 654 /* Find lowest IRQ moderation across all used TX queues */
636 coalesce->tx_coalesce_usecs_irq = ~((u32) 0); 655 coalesce->tx_coalesce_usecs_irq = ~((u32) 0);
637 efx_for_each_channel(channel, efx) { 656 efx_for_each_channel(channel, efx) {
638 if (!efx_channel_get_tx_queue(channel, 0)) 657 if (!efx_channel_has_tx_queues(channel))
639 continue; 658 continue;
640 if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) { 659 if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
641 if (channel->channel < efx->n_rx_channels) 660 if (channel->channel < efx->n_rx_channels)
@@ -680,8 +699,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
680 699
681 /* If the channel is shared only allow RX parameters to be set */ 700 /* If the channel is shared only allow RX parameters to be set */
682 efx_for_each_channel(channel, efx) { 701 efx_for_each_channel(channel, efx) {
683 if (efx_channel_get_rx_queue(channel) && 702 if (efx_channel_has_rx_queue(channel) &&
684 efx_channel_get_tx_queue(channel, 0) && 703 efx_channel_has_tx_queues(channel) &&
685 tx_usecs) { 704 tx_usecs) {
686 netif_err(efx, drv, efx->net_dev, "Channel is shared. " 705 netif_err(efx, drv, efx->net_dev, "Channel is shared. "
687 "Only RX coalescing may be set\n"); 706 "Only RX coalescing may be set\n");
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 61ddd2c6e750..734fcfb52e85 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -1478,36 +1478,26 @@ static void falcon_init_rx_cfg(struct efx_nic *efx)
1478 /* RX control FIFO thresholds (32 entries) */ 1478 /* RX control FIFO thresholds (32 entries) */
1479 const unsigned ctrl_xon_thr = 20; 1479 const unsigned ctrl_xon_thr = 20;
1480 const unsigned ctrl_xoff_thr = 25; 1480 const unsigned ctrl_xoff_thr = 25;
1481 /* RX data FIFO thresholds (256-byte units; size varies) */
1482 int data_xon_thr = efx_nic_rx_xon_thresh >> 8;
1483 int data_xoff_thr = efx_nic_rx_xoff_thresh >> 8;
1484 efx_oword_t reg; 1481 efx_oword_t reg;
1485 1482
1486 efx_reado(efx, &reg, FR_AZ_RX_CFG); 1483 efx_reado(efx, &reg, FR_AZ_RX_CFG);
1487 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) { 1484 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
1488 /* Data FIFO size is 5.5K */ 1485 /* Data FIFO size is 5.5K */
1489 if (data_xon_thr < 0)
1490 data_xon_thr = 512 >> 8;
1491 if (data_xoff_thr < 0)
1492 data_xoff_thr = 2048 >> 8;
1493 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0); 1486 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
1494 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE, 1487 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
1495 huge_buf_size); 1488 huge_buf_size);
1496 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, data_xon_thr); 1489 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8);
1497 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, data_xoff_thr); 1490 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8);
1498 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr); 1491 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
1499 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr); 1492 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
1500 } else { 1493 } else {
1501 /* Data FIFO size is 80K; register fields moved */ 1494 /* Data FIFO size is 80K; register fields moved */
1502 if (data_xon_thr < 0)
1503 data_xon_thr = 27648 >> 8; /* ~3*max MTU */
1504 if (data_xoff_thr < 0)
1505 data_xoff_thr = 54272 >> 8; /* ~80Kb - 3*max MTU */
1506 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0); 1495 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
1507 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE, 1496 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
1508 huge_buf_size); 1497 huge_buf_size);
1509 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, data_xon_thr); 1498 /* Send XON and XOFF at ~3 * max MTU away from empty/full */
1510 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, data_xoff_thr); 1499 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8);
1500 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8);
1511 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr); 1501 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
1512 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr); 1502 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
1513 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1); 1503 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index 2dd16f0b3ced..b9cc846811d6 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007-2009 Solarflare Communications Inc. 3 * Copyright 2007-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index b49e84394641..2c9ee5db3bf7 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/filter.c b/drivers/net/sfc/filter.c
index d4722c41c4ce..95a980fd63d5 100644
--- a/drivers/net/sfc/filter.c
+++ b/drivers/net/sfc/filter.c
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/in.h> 10#include <linux/in.h>
11#include <net/ip.h>
11#include "efx.h" 12#include "efx.h"
12#include "filter.h" 13#include "filter.h"
13#include "io.h" 14#include "io.h"
@@ -27,6 +28,10 @@
27 */ 28 */
28#define FILTER_CTL_SRCH_MAX 200 29#define FILTER_CTL_SRCH_MAX 200
29 30
31/* Don't try very hard to find space for performance hints, as this is
32 * counter-productive. */
33#define FILTER_CTL_SRCH_HINT_MAX 5
34
30enum efx_filter_table_id { 35enum efx_filter_table_id {
31 EFX_FILTER_TABLE_RX_IP = 0, 36 EFX_FILTER_TABLE_RX_IP = 0,
32 EFX_FILTER_TABLE_RX_MAC, 37 EFX_FILTER_TABLE_RX_MAC,
@@ -47,6 +52,10 @@ struct efx_filter_table {
47struct efx_filter_state { 52struct efx_filter_state {
48 spinlock_t lock; 53 spinlock_t lock;
49 struct efx_filter_table table[EFX_FILTER_TABLE_COUNT]; 54 struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
55#ifdef CONFIG_RFS_ACCEL
56 u32 *rps_flow_id;
57 unsigned rps_expire_index;
58#endif
50}; 59};
51 60
52/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit 61/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
@@ -325,15 +334,16 @@ static int efx_filter_search(struct efx_filter_table *table,
325 struct efx_filter_spec *spec, u32 key, 334 struct efx_filter_spec *spec, u32 key,
326 bool for_insert, int *depth_required) 335 bool for_insert, int *depth_required)
327{ 336{
328 unsigned hash, incr, filter_idx, depth; 337 unsigned hash, incr, filter_idx, depth, depth_max;
329 struct efx_filter_spec *cmp; 338 struct efx_filter_spec *cmp;
330 339
331 hash = efx_filter_hash(key); 340 hash = efx_filter_hash(key);
332 incr = efx_filter_increment(key); 341 incr = efx_filter_increment(key);
342 depth_max = (spec->priority <= EFX_FILTER_PRI_HINT ?
343 FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX);
333 344
334 for (depth = 1, filter_idx = hash & (table->size - 1); 345 for (depth = 1, filter_idx = hash & (table->size - 1);
335 depth <= FILTER_CTL_SRCH_MAX && 346 depth <= depth_max && test_bit(filter_idx, table->used_bitmap);
336 test_bit(filter_idx, table->used_bitmap);
337 ++depth) { 347 ++depth) {
338 cmp = &table->spec[filter_idx]; 348 cmp = &table->spec[filter_idx];
339 if (efx_filter_equal(spec, cmp)) 349 if (efx_filter_equal(spec, cmp))
@@ -342,7 +352,7 @@ static int efx_filter_search(struct efx_filter_table *table,
342 } 352 }
343 if (!for_insert) 353 if (!for_insert)
344 return -ENOENT; 354 return -ENOENT;
345 if (depth > FILTER_CTL_SRCH_MAX) 355 if (depth > depth_max)
346 return -EBUSY; 356 return -EBUSY;
347found: 357found:
348 *depth_required = depth; 358 *depth_required = depth;
@@ -562,6 +572,13 @@ int efx_probe_filters(struct efx_nic *efx)
562 spin_lock_init(&state->lock); 572 spin_lock_init(&state->lock);
563 573
564 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 574 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
575#ifdef CONFIG_RFS_ACCEL
576 state->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS,
577 sizeof(*state->rps_flow_id),
578 GFP_KERNEL);
579 if (!state->rps_flow_id)
580 goto fail;
581#endif
565 table = &state->table[EFX_FILTER_TABLE_RX_IP]; 582 table = &state->table[EFX_FILTER_TABLE_RX_IP];
566 table->id = EFX_FILTER_TABLE_RX_IP; 583 table->id = EFX_FILTER_TABLE_RX_IP;
567 table->offset = FR_BZ_RX_FILTER_TBL0; 584 table->offset = FR_BZ_RX_FILTER_TBL0;
@@ -607,5 +624,97 @@ void efx_remove_filters(struct efx_nic *efx)
607 kfree(state->table[table_id].used_bitmap); 624 kfree(state->table[table_id].used_bitmap);
608 vfree(state->table[table_id].spec); 625 vfree(state->table[table_id].spec);
609 } 626 }
627#ifdef CONFIG_RFS_ACCEL
628 kfree(state->rps_flow_id);
629#endif
610 kfree(state); 630 kfree(state);
611} 631}
632
633#ifdef CONFIG_RFS_ACCEL
634
635int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
636 u16 rxq_index, u32 flow_id)
637{
638 struct efx_nic *efx = netdev_priv(net_dev);
639 struct efx_channel *channel;
640 struct efx_filter_state *state = efx->filter_state;
641 struct efx_filter_spec spec;
642 const struct iphdr *ip;
643 const __be16 *ports;
644 int nhoff;
645 int rc;
646
647 nhoff = skb_network_offset(skb);
648
649 if (skb->protocol != htons(ETH_P_IP))
650 return -EPROTONOSUPPORT;
651
652 /* RFS must validate the IP header length before calling us */
653 EFX_BUG_ON_PARANOID(!pskb_may_pull(skb, nhoff + sizeof(*ip)));
654 ip = (const struct iphdr *)(skb->data + nhoff);
655 if (ip->frag_off & htons(IP_MF | IP_OFFSET))
656 return -EPROTONOSUPPORT;
657 EFX_BUG_ON_PARANOID(!pskb_may_pull(skb, nhoff + 4 * ip->ihl + 4));
658 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
659
660 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 0, rxq_index);
661 rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
662 ip->daddr, ports[1], ip->saddr, ports[0]);
663 if (rc)
664 return rc;
665
666 rc = efx_filter_insert_filter(efx, &spec, true);
667 if (rc < 0)
668 return rc;
669
670 /* Remember this so we can check whether to expire the filter later */
671 state->rps_flow_id[rc] = flow_id;
672 channel = efx_get_channel(efx, skb_get_rx_queue(skb));
673 ++channel->rfs_filters_added;
674
675 netif_info(efx, rx_status, efx->net_dev,
676 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
677 (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
678 &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
679 rxq_index, flow_id, rc);
680
681 return rc;
682}
683
684bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota)
685{
686 struct efx_filter_state *state = efx->filter_state;
687 struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_IP];
688 unsigned mask = table->size - 1;
689 unsigned index;
690 unsigned stop;
691
692 if (!spin_trylock_bh(&state->lock))
693 return false;
694
695 index = state->rps_expire_index;
696 stop = (index + quota) & mask;
697
698 while (index != stop) {
699 if (test_bit(index, table->used_bitmap) &&
700 table->spec[index].priority == EFX_FILTER_PRI_HINT &&
701 rps_may_expire_flow(efx->net_dev,
702 table->spec[index].dmaq_id,
703 state->rps_flow_id[index], index)) {
704 netif_info(efx, rx_status, efx->net_dev,
705 "expiring filter %d [flow %u]\n",
706 index, state->rps_flow_id[index]);
707 efx_filter_table_clear_entry(efx, table, index);
708 }
709 index = (index + 1) & mask;
710 }
711
712 state->rps_expire_index = stop;
713 if (table->used == 0)
714 efx_filter_table_reset_search_depth(table);
715
716 spin_unlock_bh(&state->lock);
717 return true;
718}
719
720#endif /* CONFIG_RFS_ACCEL */
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
index 6da4ae20a039..d9d8c2ef1074 100644
--- a/drivers/net/sfc/io.h
+++ b/drivers/net/sfc/io.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -48,9 +48,9 @@
48 * replacing the low 96 bits with zero does not affect functionality. 48 * replacing the low 96 bits with zero does not affect functionality.
49 * - If the host writes to the last dword address of such a register 49 * - If the host writes to the last dword address of such a register
50 * (i.e. the high 32 bits) the underlying register will always be 50 * (i.e. the high 32 bits) the underlying register will always be
51 * written. If the collector does not hold values for the low 96 51 * written. If the collector and the current write together do not
52 * bits of the register, they will be written as zero. Writing to 52 * provide values for all 128 bits of the register, the low 96 bits
53 * the last qword does not have this effect and must not be done. 53 * will be written as zero.
54 * - If the host writes to the address of any other part of such a 54 * - If the host writes to the address of any other part of such a
55 * register while the collector already holds values for some other 55 * register while the collector already holds values for some other
56 * register, the write is discarded and the collector maintains its 56 * register, the write is discarded and the collector maintains its
@@ -103,6 +103,7 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
103 _efx_writed(efx, value->u32[2], reg + 8); 103 _efx_writed(efx, value->u32[2], reg + 8);
104 _efx_writed(efx, value->u32[3], reg + 12); 104 _efx_writed(efx, value->u32[3], reg + 12);
105#endif 105#endif
106 wmb();
106 mmiowb(); 107 mmiowb();
107 spin_unlock_irqrestore(&efx->biu_lock, flags); 108 spin_unlock_irqrestore(&efx->biu_lock, flags);
108} 109}
@@ -125,6 +126,7 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
125 __raw_writel((__force u32)value->u32[0], membase + addr); 126 __raw_writel((__force u32)value->u32[0], membase + addr);
126 __raw_writel((__force u32)value->u32[1], membase + addr + 4); 127 __raw_writel((__force u32)value->u32[1], membase + addr + 4);
127#endif 128#endif
129 wmb();
128 mmiowb(); 130 mmiowb();
129 spin_unlock_irqrestore(&efx->biu_lock, flags); 131 spin_unlock_irqrestore(&efx->biu_lock, flags);
130} 132}
@@ -139,6 +141,7 @@ static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
139 141
140 /* No lock required */ 142 /* No lock required */
141 _efx_writed(efx, value->u32[0], reg); 143 _efx_writed(efx, value->u32[0], reg);
144 wmb();
142} 145}
143 146
144/* Read a 128-bit CSR, locking as appropriate. */ 147/* Read a 128-bit CSR, locking as appropriate. */
@@ -237,12 +240,14 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
237 240
238#ifdef EFX_USE_QWORD_IO 241#ifdef EFX_USE_QWORD_IO
239 _efx_writeq(efx, value->u64[0], reg + 0); 242 _efx_writeq(efx, value->u64[0], reg + 0);
243 _efx_writeq(efx, value->u64[1], reg + 8);
240#else 244#else
241 _efx_writed(efx, value->u32[0], reg + 0); 245 _efx_writed(efx, value->u32[0], reg + 0);
242 _efx_writed(efx, value->u32[1], reg + 4); 246 _efx_writed(efx, value->u32[1], reg + 4);
243#endif
244 _efx_writed(efx, value->u32[2], reg + 8); 247 _efx_writed(efx, value->u32[2], reg + 8);
245 _efx_writed(efx, value->u32[3], reg + 12); 248 _efx_writed(efx, value->u32[3], reg + 12);
249#endif
250 wmb();
246} 251}
247#define efx_writeo_page(efx, value, reg, page) \ 252#define efx_writeo_page(efx, value, reg, page) \
248 _efx_writeo_page(efx, value, \ 253 _efx_writeo_page(efx, value, \
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index b716e827b291..5e118f0d2479 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2008-2009 Solarflare Communications Inc. 3 * Copyright 2008-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -94,14 +94,15 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
94 94
95 efx_writed(efx, &hdr, pdu); 95 efx_writed(efx, &hdr, pdu);
96 96
97 for (i = 0; i < inlen; i += 4) 97 for (i = 0; i < inlen; i += 4) {
98 _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); 98 _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
99 99 /* use wmb() within loop to inhibit write combining */
100 /* Ensure the payload is written out before the header */ 100 wmb();
101 wmb(); 101 }
102 102
103 /* ring the doorbell with a distinctive value */ 103 /* ring the doorbell with a distinctive value */
104 _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); 104 _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
105 wmb();
105} 106}
106 107
107static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) 108static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
@@ -602,7 +603,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
602 ************************************************************************** 603 **************************************************************************
603 */ 604 */
604 605
605int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build) 606void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
606{ 607{
607 u8 outbuf[ALIGN(MC_CMD_GET_VERSION_V1_OUT_LEN, 4)]; 608 u8 outbuf[ALIGN(MC_CMD_GET_VERSION_V1_OUT_LEN, 4)];
608 size_t outlength; 609 size_t outlength;
@@ -616,29 +617,20 @@ int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build)
616 if (rc) 617 if (rc)
617 goto fail; 618 goto fail;
618 619
619 if (outlength == MC_CMD_GET_VERSION_V0_OUT_LEN) {
620 *version = 0;
621 *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE);
622 return 0;
623 }
624
625 if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) { 620 if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) {
626 rc = -EIO; 621 rc = -EIO;
627 goto fail; 622 goto fail;
628 } 623 }
629 624
630 ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); 625 ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
631 *version = (((u64)le16_to_cpu(ver_words[0]) << 48) | 626 snprintf(buf, len, "%u.%u.%u.%u",
632 ((u64)le16_to_cpu(ver_words[1]) << 32) | 627 le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
633 ((u64)le16_to_cpu(ver_words[2]) << 16) | 628 le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
634 le16_to_cpu(ver_words[3])); 629 return;
635 *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE);
636
637 return 0;
638 630
639fail: 631fail:
640 netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 632 netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
641 return rc; 633 buf[0] = 0;
642} 634}
643 635
644int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, 636int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
diff --git a/drivers/net/sfc/mcdi.h b/drivers/net/sfc/mcdi.h
index c792f1d65e48..aced2a7856fc 100644
--- a/drivers/net/sfc/mcdi.h
+++ b/drivers/net/sfc/mcdi.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2008-2009 Solarflare Communications Inc. 3 * Copyright 2008-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -93,7 +93,7 @@ extern void efx_mcdi_process_event(struct efx_channel *channel,
93#define MCDI_EVENT_FIELD(_ev, _field) \ 93#define MCDI_EVENT_FIELD(_ev, _field) \
94 EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field) 94 EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
95 95
96extern int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build); 96extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
97extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, 97extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
98 bool *was_attached_out); 98 bool *was_attached_out);
99extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, 99extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
diff --git a/drivers/net/sfc/mcdi_mac.c b/drivers/net/sfc/mcdi_mac.c
index f88f4bf986ff..33f7294edb47 100644
--- a/drivers/net/sfc/mcdi_mac.c
+++ b/drivers/net/sfc/mcdi_mac.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2009 Solarflare Communications Inc. 3 * Copyright 2009-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h
index 90359e644006..b86a15f221ad 100644
--- a/drivers/net/sfc/mcdi_pcol.h
+++ b/drivers/net/sfc/mcdi_pcol.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2009 Solarflare Communications Inc. 3 * Copyright 2009-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index 0e97eed663c6..ec3f740f5465 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2009 Solarflare Communications Inc. 3 * Copyright 2009-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index 56b0266b441f..19e68c26d103 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2009 Solarflare Communications Inc. 3 * Copyright 2006-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -51,13 +51,10 @@ int efx_mdio_reset_mmd(struct efx_nic *port, int mmd,
51 return spins ? spins : -ETIMEDOUT; 51 return spins ? spins : -ETIMEDOUT;
52} 52}
53 53
54static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd, int fault_fatal) 54static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd)
55{ 55{
56 int status; 56 int status;
57 57
58 if (LOOPBACK_INTERNAL(efx))
59 return 0;
60
61 if (mmd != MDIO_MMD_AN) { 58 if (mmd != MDIO_MMD_AN) {
62 /* Read MMD STATUS2 to check it is responding. */ 59 /* Read MMD STATUS2 to check it is responding. */
63 status = efx_mdio_read(efx, mmd, MDIO_STAT2); 60 status = efx_mdio_read(efx, mmd, MDIO_STAT2);
@@ -68,20 +65,6 @@ static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd, int fault_fatal)
68 } 65 }
69 } 66 }
70 67
71 /* Read MMD STATUS 1 to check for fault. */
72 status = efx_mdio_read(efx, mmd, MDIO_STAT1);
73 if (status & MDIO_STAT1_FAULT) {
74 if (fault_fatal) {
75 netif_err(efx, hw, efx->net_dev,
76 "PHY MMD %d reporting fatal"
77 " fault: status %x\n", mmd, status);
78 return -EIO;
79 } else {
80 netif_dbg(efx, hw, efx->net_dev,
81 "PHY MMD %d reporting status"
82 " %x (expected)\n", mmd, status);
83 }
84 }
85 return 0; 68 return 0;
86} 69}
87 70
@@ -130,8 +113,7 @@ int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask)
130 return rc; 113 return rc;
131} 114}
132 115
133int efx_mdio_check_mmds(struct efx_nic *efx, 116int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask)
134 unsigned int mmd_mask, unsigned int fatal_mask)
135{ 117{
136 int mmd = 0, probe_mmd, devs1, devs2; 118 int mmd = 0, probe_mmd, devs1, devs2;
137 u32 devices; 119 u32 devices;
@@ -161,13 +143,9 @@ int efx_mdio_check_mmds(struct efx_nic *efx,
161 143
162 /* Check all required MMDs are responding and happy. */ 144 /* Check all required MMDs are responding and happy. */
163 while (mmd_mask) { 145 while (mmd_mask) {
164 if (mmd_mask & 1) { 146 if ((mmd_mask & 1) && efx_mdio_check_mmd(efx, mmd))
165 int fault_fatal = fatal_mask & 1; 147 return -EIO;
166 if (efx_mdio_check_mmd(efx, mmd, fault_fatal))
167 return -EIO;
168 }
169 mmd_mask = mmd_mask >> 1; 148 mmd_mask = mmd_mask >> 1;
170 fatal_mask = fatal_mask >> 1;
171 mmd++; 149 mmd++;
172 } 150 }
173 151
@@ -337,7 +315,7 @@ int efx_mdio_test_alive(struct efx_nic *efx)
337 "no MDIO PHY present with ID %d\n", efx->mdio.prtad); 315 "no MDIO PHY present with ID %d\n", efx->mdio.prtad);
338 rc = -EINVAL; 316 rc = -EINVAL;
339 } else { 317 } else {
340 rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0); 318 rc = efx_mdio_check_mmds(efx, efx->mdio.mmds);
341 } 319 }
342 320
343 mutex_unlock(&efx->mac_lock); 321 mutex_unlock(&efx->mac_lock);
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index 75791d3d4963..df0703940c83 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2009 Solarflare Communications Inc. 3 * Copyright 2006-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -68,8 +68,7 @@ extern int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd,
68 int spins, int spintime); 68 int spins, int spintime);
69 69
70/* As efx_mdio_check_mmd but for multiple MMDs */ 70/* As efx_mdio_check_mmd but for multiple MMDs */
71int efx_mdio_check_mmds(struct efx_nic *efx, 71int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask);
72 unsigned int mmd_mask, unsigned int fatal_mask);
73 72
74/* Check the link status of specified mmds in bit mask */ 73/* Check the link status of specified mmds in bit mask */
75extern bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask); 74extern bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c
index d38627448c22..e646bfce2d84 100644
--- a/drivers/net/sfc/mtd.c
+++ b/drivers/net/sfc/mtd.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 28df8665256a..215d5c51bfa0 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2009 Solarflare Communications Inc. 4 * Copyright 2005-2011 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -41,7 +41,7 @@
41 * 41 *
42 **************************************************************************/ 42 **************************************************************************/
43 43
44#define EFX_DRIVER_VERSION "3.0" 44#define EFX_DRIVER_VERSION "3.1"
45 45
46#ifdef EFX_ENABLE_DEBUG 46#ifdef EFX_ENABLE_DEBUG
47#define EFX_BUG_ON_PARANOID(x) BUG_ON(x) 47#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
@@ -63,10 +63,12 @@
63/* Checksum generation is a per-queue option in hardware, so each 63/* Checksum generation is a per-queue option in hardware, so each
64 * queue visible to the networking core is backed by two hardware TX 64 * queue visible to the networking core is backed by two hardware TX
65 * queues. */ 65 * queues. */
66#define EFX_MAX_CORE_TX_QUEUES EFX_MAX_CHANNELS 66#define EFX_MAX_TX_TC 2
67#define EFX_TXQ_TYPE_OFFLOAD 1 67#define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
68#define EFX_TXQ_TYPES 2 68#define EFX_TXQ_TYPE_OFFLOAD 1 /* flag */
69#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES) 69#define EFX_TXQ_TYPE_HIGHPRI 2 /* flag */
70#define EFX_TXQ_TYPES 4
71#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
70 72
71/** 73/**
72 * struct efx_special_buffer - An Efx special buffer 74 * struct efx_special_buffer - An Efx special buffer
@@ -140,6 +142,7 @@ struct efx_tx_buffer {
140 * @buffer: The software buffer ring 142 * @buffer: The software buffer ring
141 * @txd: The hardware descriptor ring 143 * @txd: The hardware descriptor ring
142 * @ptr_mask: The size of the ring minus 1. 144 * @ptr_mask: The size of the ring minus 1.
145 * @initialised: Has hardware queue been initialised?
143 * @flushed: Used when handling queue flushing 146 * @flushed: Used when handling queue flushing
144 * @read_count: Current read pointer. 147 * @read_count: Current read pointer.
145 * This is the number of buffers that have been removed from both rings. 148 * This is the number of buffers that have been removed from both rings.
@@ -182,6 +185,7 @@ struct efx_tx_queue {
182 struct efx_tx_buffer *buffer; 185 struct efx_tx_buffer *buffer;
183 struct efx_special_buffer txd; 186 struct efx_special_buffer txd;
184 unsigned int ptr_mask; 187 unsigned int ptr_mask;
188 bool initialised;
185 enum efx_flush_state flushed; 189 enum efx_flush_state flushed;
186 190
187 /* Members used mainly on the completion path */ 191 /* Members used mainly on the completion path */
@@ -210,15 +214,17 @@ struct efx_tx_queue {
210 * If both this and page are %NULL, the buffer slot is currently free. 214 * If both this and page are %NULL, the buffer slot is currently free.
211 * @page: The associated page buffer, if any. 215 * @page: The associated page buffer, if any.
212 * If both this and skb are %NULL, the buffer slot is currently free. 216 * If both this and skb are %NULL, the buffer slot is currently free.
213 * @data: Pointer to ethernet header
214 * @len: Buffer length, in bytes. 217 * @len: Buffer length, in bytes.
218 * @is_page: Indicates if @page is valid. If false, @skb is valid.
215 */ 219 */
216struct efx_rx_buffer { 220struct efx_rx_buffer {
217 dma_addr_t dma_addr; 221 dma_addr_t dma_addr;
218 struct sk_buff *skb; 222 union {
219 struct page *page; 223 struct sk_buff *skb;
220 char *data; 224 struct page *page;
225 } u;
221 unsigned int len; 226 unsigned int len;
227 bool is_page;
222}; 228};
223 229
224/** 230/**
@@ -358,6 +364,9 @@ struct efx_channel {
358 364
359 unsigned int irq_count; 365 unsigned int irq_count;
360 unsigned int irq_mod_score; 366 unsigned int irq_mod_score;
367#ifdef CONFIG_RFS_ACCEL
368 unsigned int rfs_filters_added;
369#endif
361 370
362 int rx_alloc_level; 371 int rx_alloc_level;
363 int rx_alloc_push_pages; 372 int rx_alloc_push_pages;
@@ -377,7 +386,7 @@ struct efx_channel {
377 bool rx_pkt_csummed; 386 bool rx_pkt_csummed;
378 387
379 struct efx_rx_queue rx_queue; 388 struct efx_rx_queue rx_queue;
380 struct efx_tx_queue tx_queue[2]; 389 struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
381}; 390};
382 391
383enum efx_led_mode { 392enum efx_led_mode {
@@ -906,7 +915,7 @@ struct efx_nic_type {
906 unsigned int phys_addr_channels; 915 unsigned int phys_addr_channels;
907 unsigned int tx_dc_base; 916 unsigned int tx_dc_base;
908 unsigned int rx_dc_base; 917 unsigned int rx_dc_base;
909 unsigned long offload_features; 918 u32 offload_features;
910 u32 reset_world_flags; 919 u32 reset_world_flags;
911}; 920};
912 921
@@ -938,18 +947,40 @@ efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
938 return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type]; 947 return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
939} 948}
940 949
950static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
951{
952 return channel->channel - channel->efx->tx_channel_offset <
953 channel->efx->n_tx_channels;
954}
955
941static inline struct efx_tx_queue * 956static inline struct efx_tx_queue *
942efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type) 957efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
943{ 958{
944 struct efx_tx_queue *tx_queue = channel->tx_queue; 959 EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) ||
945 EFX_BUG_ON_PARANOID(type >= EFX_TXQ_TYPES); 960 type >= EFX_TXQ_TYPES);
946 return tx_queue->channel ? tx_queue + type : NULL; 961 return &channel->tx_queue[type];
962}
963
964static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
965{
966 return !(tx_queue->efx->net_dev->num_tc < 2 &&
967 tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI);
947} 968}
948 969
949/* Iterate over all TX queues belonging to a channel */ 970/* Iterate over all TX queues belonging to a channel */
950#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ 971#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
951 for (_tx_queue = efx_channel_get_tx_queue(channel, 0); \ 972 if (!efx_channel_has_tx_queues(_channel)) \
952 _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ 973 ; \
974 else \
975 for (_tx_queue = (_channel)->tx_queue; \
976 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
977 efx_tx_queue_used(_tx_queue); \
978 _tx_queue++)
979
980/* Iterate over all possible TX queues belonging to a channel */
981#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \
982 for (_tx_queue = (_channel)->tx_queue; \
983 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
953 _tx_queue++) 984 _tx_queue++)
954 985
955static inline struct efx_rx_queue * 986static inline struct efx_rx_queue *
@@ -959,18 +990,26 @@ efx_get_rx_queue(struct efx_nic *efx, unsigned index)
959 return &efx->channel[index]->rx_queue; 990 return &efx->channel[index]->rx_queue;
960} 991}
961 992
993static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
994{
995 return channel->channel < channel->efx->n_rx_channels;
996}
997
962static inline struct efx_rx_queue * 998static inline struct efx_rx_queue *
963efx_channel_get_rx_queue(struct efx_channel *channel) 999efx_channel_get_rx_queue(struct efx_channel *channel)
964{ 1000{
965 return channel->channel < channel->efx->n_rx_channels ? 1001 EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel));
966 &channel->rx_queue : NULL; 1002 return &channel->rx_queue;
967} 1003}
968 1004
969/* Iterate over all RX queues belonging to a channel */ 1005/* Iterate over all RX queues belonging to a channel */
970#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ 1006#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
971 for (_rx_queue = efx_channel_get_rx_queue(channel); \ 1007 if (!efx_channel_has_rx_queue(_channel)) \
972 _rx_queue; \ 1008 ; \
973 _rx_queue = NULL) 1009 else \
1010 for (_rx_queue = &(_channel)->rx_queue; \
1011 _rx_queue; \
1012 _rx_queue = NULL)
974 1013
975static inline struct efx_channel * 1014static inline struct efx_channel *
976efx_rx_queue_channel(struct efx_rx_queue *rx_queue) 1015efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index da386599ab68..e8396614daf3 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2011 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -41,26 +41,6 @@
41#define RX_DC_ENTRIES 64 41#define RX_DC_ENTRIES 64
42#define RX_DC_ENTRIES_ORDER 3 42#define RX_DC_ENTRIES_ORDER 3
43 43
44/* RX FIFO XOFF watermark
45 *
46 * When the amount of the RX FIFO increases used increases past this
47 * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
48 * This also has an effect on RX/TX arbitration
49 */
50int efx_nic_rx_xoff_thresh = -1;
51module_param_named(rx_xoff_thresh_bytes, efx_nic_rx_xoff_thresh, int, 0644);
52MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold");
53
54/* RX FIFO XON watermark
55 *
56 * When the amount of the RX FIFO used decreases below this
57 * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
58 * This also has an effect on RX/TX arbitration
59 */
60int efx_nic_rx_xon_thresh = -1;
61module_param_named(rx_xon_thresh_bytes, efx_nic_rx_xon_thresh, int, 0644);
62MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
63
64/* If EFX_MAX_INT_ERRORS internal errors occur within 44/* If EFX_MAX_INT_ERRORS internal errors occur within
65 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and 45 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
66 * disable it. 46 * disable it.
@@ -445,8 +425,8 @@ int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
445 425
446void efx_nic_init_tx(struct efx_tx_queue *tx_queue) 426void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
447{ 427{
448 efx_oword_t tx_desc_ptr;
449 struct efx_nic *efx = tx_queue->efx; 428 struct efx_nic *efx = tx_queue->efx;
429 efx_oword_t reg;
450 430
451 tx_queue->flushed = FLUSH_NONE; 431 tx_queue->flushed = FLUSH_NONE;
452 432
@@ -454,7 +434,7 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
454 efx_init_special_buffer(efx, &tx_queue->txd); 434 efx_init_special_buffer(efx, &tx_queue->txd);
455 435
456 /* Push TX descriptor ring to card */ 436 /* Push TX descriptor ring to card */
457 EFX_POPULATE_OWORD_10(tx_desc_ptr, 437 EFX_POPULATE_OWORD_10(reg,
458 FRF_AZ_TX_DESCQ_EN, 1, 438 FRF_AZ_TX_DESCQ_EN, 1,
459 FRF_AZ_TX_ISCSI_DDIG_EN, 0, 439 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
460 FRF_AZ_TX_ISCSI_HDIG_EN, 0, 440 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
@@ -470,17 +450,15 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
470 450
471 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 451 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
472 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; 452 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
473 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum); 453 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
474 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS, 454 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
475 !csum); 455 !csum);
476 } 456 }
477 457
478 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 458 efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
479 tx_queue->queue); 459 tx_queue->queue);
480 460
481 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { 461 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
482 efx_oword_t reg;
483
484 /* Only 128 bits in this register */ 462 /* Only 128 bits in this register */
485 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); 463 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
486 464
@@ -491,6 +469,16 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
491 set_bit_le(tx_queue->queue, (void *)&reg); 469 set_bit_le(tx_queue->queue, (void *)&reg);
492 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG); 470 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
493 } 471 }
472
473 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
474 EFX_POPULATE_OWORD_1(reg,
475 FRF_BZ_TX_PACE,
476 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
477 FFE_BZ_TX_PACE_OFF :
478 FFE_BZ_TX_PACE_RESERVED);
479 efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
480 tx_queue->queue);
481 }
494} 482}
495 483
496static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) 484static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
@@ -1238,8 +1226,10 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1238 1226
1239 /* Flush all tx queues in parallel */ 1227 /* Flush all tx queues in parallel */
1240 efx_for_each_channel(channel, efx) { 1228 efx_for_each_channel(channel, efx) {
1241 efx_for_each_channel_tx_queue(tx_queue, channel) 1229 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1242 efx_flush_tx_queue(tx_queue); 1230 if (tx_queue->initialised)
1231 efx_flush_tx_queue(tx_queue);
1232 }
1243 } 1233 }
1244 1234
1245 /* The hardware supports four concurrent rx flushes, each of which may 1235 /* The hardware supports four concurrent rx flushes, each of which may
@@ -1262,8 +1252,9 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1262 ++rx_pending; 1252 ++rx_pending;
1263 } 1253 }
1264 } 1254 }
1265 efx_for_each_channel_tx_queue(tx_queue, channel) { 1255 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1266 if (tx_queue->flushed != FLUSH_DONE) 1256 if (tx_queue->initialised &&
1257 tx_queue->flushed != FLUSH_DONE)
1267 ++tx_pending; 1258 ++tx_pending;
1268 } 1259 }
1269 } 1260 }
@@ -1278,8 +1269,9 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1278 /* Mark the queues as all flushed. We're going to return failure 1269 /* Mark the queues as all flushed. We're going to return failure
1279 * leading to a reset, or fake up success anyway */ 1270 * leading to a reset, or fake up success anyway */
1280 efx_for_each_channel(channel, efx) { 1271 efx_for_each_channel(channel, efx) {
1281 efx_for_each_channel_tx_queue(tx_queue, channel) { 1272 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1282 if (tx_queue->flushed != FLUSH_DONE) 1273 if (tx_queue->initialised &&
1274 tx_queue->flushed != FLUSH_DONE)
1283 netif_err(efx, hw, efx->net_dev, 1275 netif_err(efx, hw, efx->net_dev,
1284 "tx queue %d flush command timed out\n", 1276 "tx queue %d flush command timed out\n",
1285 tx_queue->queue); 1277 tx_queue->queue);
@@ -1682,6 +1674,19 @@ void efx_nic_init_common(struct efx_nic *efx)
1682 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1674 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1683 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); 1675 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1684 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); 1676 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1677
1678 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1679 EFX_POPULATE_OWORD_4(temp,
1680 /* Default values */
1681 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1682 FRF_BZ_TX_PACE_SB_AF, 0xb,
1683 FRF_BZ_TX_PACE_FB_BASE, 0,
1684 /* Allow large pace values in the
1685 * fast bin. */
1686 FRF_BZ_TX_PACE_BIN_TH,
1687 FFE_BZ_TX_PACE_RESERVED);
1688 efx_writeo(efx, &temp, FR_BZ_TX_PACE);
1689 }
1685} 1690}
1686 1691
1687/* Register dump */ 1692/* Register dump */
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index eb0586925b51..d9de1b647d41 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2011 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -142,20 +142,14 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
142 142
143/** 143/**
144 * struct siena_nic_data - Siena NIC state 144 * struct siena_nic_data - Siena NIC state
145 * @fw_version: Management controller firmware version
146 * @fw_build: Firmware build number
147 * @mcdi: Management-Controller-to-Driver Interface 145 * @mcdi: Management-Controller-to-Driver Interface
148 * @wol_filter_id: Wake-on-LAN packet filter id 146 * @wol_filter_id: Wake-on-LAN packet filter id
149 */ 147 */
150struct siena_nic_data { 148struct siena_nic_data {
151 u64 fw_version;
152 u32 fw_build;
153 struct efx_mcdi_iface mcdi; 149 struct efx_mcdi_iface mcdi;
154 int wol_filter_id; 150 int wol_filter_id;
155}; 151};
156 152
157extern void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len);
158
159extern struct efx_nic_type falcon_a1_nic_type; 153extern struct efx_nic_type falcon_a1_nic_type;
160extern struct efx_nic_type falcon_b0_nic_type; 154extern struct efx_nic_type falcon_b0_nic_type;
161extern struct efx_nic_type siena_a0_nic_type; 155extern struct efx_nic_type siena_a0_nic_type;
@@ -194,7 +188,6 @@ extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
194/* MAC/PHY */ 188/* MAC/PHY */
195extern void falcon_drain_tx_fifo(struct efx_nic *efx); 189extern void falcon_drain_tx_fifo(struct efx_nic *efx);
196extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx); 190extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
197extern int efx_nic_rx_xoff_thresh, efx_nic_rx_xon_thresh;
198 191
199/* Interrupts and test events */ 192/* Interrupts and test events */
200extern int efx_nic_init_interrupt(struct efx_nic *efx); 193extern int efx_nic_init_interrupt(struct efx_nic *efx);
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h
index 1dab609757fb..b3b79472421e 100644
--- a/drivers/net/sfc/phy.h
+++ b/drivers/net/sfc/phy.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007-2009 Solarflare Communications Inc. 3 * Copyright 2007-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index ea3ae0089315..55f90924247e 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2009 Solarflare Communications Inc. 3 * Copyright 2006-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/regs.h b/drivers/net/sfc/regs.h
index 96430ed81c36..cc2c86b76a7b 100644
--- a/drivers/net/sfc/regs.h
+++ b/drivers/net/sfc/regs.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -2907,6 +2907,12 @@
2907#define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44 2907#define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44
2908#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16 2908#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16
2909 2909
2910/* TX_PACE_TBL */
2911/* Values >20 are documented as reserved, but will result in a queue going
2912 * into the fast bin with a pace value of zero. */
2913#define FFE_BZ_TX_PACE_OFF 0
2914#define FFE_BZ_TX_PACE_RESERVED 21
2915
2910/* DRIVER_EV */ 2916/* DRIVER_EV */
2911/* Sub-fields of an RX flush completion event */ 2917/* Sub-fields of an RX flush completion event */
2912#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12 2918#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 3925fd621177..c0fdb59030fb 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2009 Solarflare Communications Inc. 4 * Copyright 2005-2011 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -89,24 +89,37 @@ static unsigned int rx_refill_limit = 95;
89 */ 89 */
90#define EFX_RXD_HEAD_ROOM 2 90#define EFX_RXD_HEAD_ROOM 2
91 91
92static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf) 92/* Offset of ethernet header within page */
93static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
94 struct efx_rx_buffer *buf)
93{ 95{
94 /* Offset is always within one page, so we don't need to consider 96 /* Offset is always within one page, so we don't need to consider
95 * the page order. 97 * the page order.
96 */ 98 */
97 return (__force unsigned long) buf->data & (PAGE_SIZE - 1); 99 return (((__force unsigned long) buf->dma_addr & (PAGE_SIZE - 1)) +
100 efx->type->rx_buffer_hash_size);
98} 101}
99static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) 102static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
100{ 103{
101 return PAGE_SIZE << efx->rx_buffer_order; 104 return PAGE_SIZE << efx->rx_buffer_order;
102} 105}
103 106
104static inline u32 efx_rx_buf_hash(struct efx_rx_buffer *buf) 107static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
105{ 108{
109 if (buf->is_page)
110 return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf);
111 else
112 return ((u8 *)buf->u.skb->data +
113 efx->type->rx_buffer_hash_size);
114}
115
116static inline u32 efx_rx_buf_hash(const u8 *eh)
117{
118 /* The ethernet header is always directly after any hash. */
106#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0 119#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
107 return __le32_to_cpup((const __le32 *)(buf->data - 4)); 120 return __le32_to_cpup((const __le32 *)(eh - 4));
108#else 121#else
109 const u8 *data = (const u8 *)(buf->data - 4); 122 const u8 *data = eh - 4;
110 return ((u32)data[0] | 123 return ((u32)data[0] |
111 (u32)data[1] << 8 | 124 (u32)data[1] << 8 |
112 (u32)data[2] << 16 | 125 (u32)data[2] << 16 |
@@ -129,6 +142,7 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
129 struct efx_nic *efx = rx_queue->efx; 142 struct efx_nic *efx = rx_queue->efx;
130 struct net_device *net_dev = efx->net_dev; 143 struct net_device *net_dev = efx->net_dev;
131 struct efx_rx_buffer *rx_buf; 144 struct efx_rx_buffer *rx_buf;
145 struct sk_buff *skb;
132 int skb_len = efx->rx_buffer_len; 146 int skb_len = efx->rx_buffer_len;
133 unsigned index, count; 147 unsigned index, count;
134 148
@@ -136,24 +150,23 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
136 index = rx_queue->added_count & rx_queue->ptr_mask; 150 index = rx_queue->added_count & rx_queue->ptr_mask;
137 rx_buf = efx_rx_buffer(rx_queue, index); 151 rx_buf = efx_rx_buffer(rx_queue, index);
138 152
139 rx_buf->skb = netdev_alloc_skb(net_dev, skb_len); 153 rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len);
140 if (unlikely(!rx_buf->skb)) 154 if (unlikely(!skb))
141 return -ENOMEM; 155 return -ENOMEM;
142 rx_buf->page = NULL;
143 156
144 /* Adjust the SKB for padding and checksum */ 157 /* Adjust the SKB for padding and checksum */
145 skb_reserve(rx_buf->skb, NET_IP_ALIGN); 158 skb_reserve(skb, NET_IP_ALIGN);
146 rx_buf->len = skb_len - NET_IP_ALIGN; 159 rx_buf->len = skb_len - NET_IP_ALIGN;
147 rx_buf->data = (char *)rx_buf->skb->data; 160 rx_buf->is_page = false;
148 rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY; 161 skb->ip_summed = CHECKSUM_UNNECESSARY;
149 162
150 rx_buf->dma_addr = pci_map_single(efx->pci_dev, 163 rx_buf->dma_addr = pci_map_single(efx->pci_dev,
151 rx_buf->data, rx_buf->len, 164 skb->data, rx_buf->len,
152 PCI_DMA_FROMDEVICE); 165 PCI_DMA_FROMDEVICE);
153 if (unlikely(pci_dma_mapping_error(efx->pci_dev, 166 if (unlikely(pci_dma_mapping_error(efx->pci_dev,
154 rx_buf->dma_addr))) { 167 rx_buf->dma_addr))) {
155 dev_kfree_skb_any(rx_buf->skb); 168 dev_kfree_skb_any(skb);
156 rx_buf->skb = NULL; 169 rx_buf->u.skb = NULL;
157 return -EIO; 170 return -EIO;
158 } 171 }
159 172
@@ -211,10 +224,9 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
211 index = rx_queue->added_count & rx_queue->ptr_mask; 224 index = rx_queue->added_count & rx_queue->ptr_mask;
212 rx_buf = efx_rx_buffer(rx_queue, index); 225 rx_buf = efx_rx_buffer(rx_queue, index);
213 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; 226 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
214 rx_buf->skb = NULL; 227 rx_buf->u.page = page;
215 rx_buf->page = page;
216 rx_buf->data = page_addr + EFX_PAGE_IP_ALIGN;
217 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; 228 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
229 rx_buf->is_page = true;
218 ++rx_queue->added_count; 230 ++rx_queue->added_count;
219 ++rx_queue->alloc_page_count; 231 ++rx_queue->alloc_page_count;
220 ++state->refcnt; 232 ++state->refcnt;
@@ -235,19 +247,17 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
235static void efx_unmap_rx_buffer(struct efx_nic *efx, 247static void efx_unmap_rx_buffer(struct efx_nic *efx,
236 struct efx_rx_buffer *rx_buf) 248 struct efx_rx_buffer *rx_buf)
237{ 249{
238 if (rx_buf->page) { 250 if (rx_buf->is_page && rx_buf->u.page) {
239 struct efx_rx_page_state *state; 251 struct efx_rx_page_state *state;
240 252
241 EFX_BUG_ON_PARANOID(rx_buf->skb); 253 state = page_address(rx_buf->u.page);
242
243 state = page_address(rx_buf->page);
244 if (--state->refcnt == 0) { 254 if (--state->refcnt == 0) {
245 pci_unmap_page(efx->pci_dev, 255 pci_unmap_page(efx->pci_dev,
246 state->dma_addr, 256 state->dma_addr,
247 efx_rx_buf_size(efx), 257 efx_rx_buf_size(efx),
248 PCI_DMA_FROMDEVICE); 258 PCI_DMA_FROMDEVICE);
249 } 259 }
250 } else if (likely(rx_buf->skb)) { 260 } else if (!rx_buf->is_page && rx_buf->u.skb) {
251 pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, 261 pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
252 rx_buf->len, PCI_DMA_FROMDEVICE); 262 rx_buf->len, PCI_DMA_FROMDEVICE);
253 } 263 }
@@ -256,12 +266,12 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
256static void efx_free_rx_buffer(struct efx_nic *efx, 266static void efx_free_rx_buffer(struct efx_nic *efx,
257 struct efx_rx_buffer *rx_buf) 267 struct efx_rx_buffer *rx_buf)
258{ 268{
259 if (rx_buf->page) { 269 if (rx_buf->is_page && rx_buf->u.page) {
260 __free_pages(rx_buf->page, efx->rx_buffer_order); 270 __free_pages(rx_buf->u.page, efx->rx_buffer_order);
261 rx_buf->page = NULL; 271 rx_buf->u.page = NULL;
262 } else if (likely(rx_buf->skb)) { 272 } else if (!rx_buf->is_page && rx_buf->u.skb) {
263 dev_kfree_skb_any(rx_buf->skb); 273 dev_kfree_skb_any(rx_buf->u.skb);
264 rx_buf->skb = NULL; 274 rx_buf->u.skb = NULL;
265 } 275 }
266} 276}
267 277
@@ -277,7 +287,7 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
277static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue, 287static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
278 struct efx_rx_buffer *rx_buf) 288 struct efx_rx_buffer *rx_buf)
279{ 289{
280 struct efx_rx_page_state *state = page_address(rx_buf->page); 290 struct efx_rx_page_state *state = page_address(rx_buf->u.page);
281 struct efx_rx_buffer *new_buf; 291 struct efx_rx_buffer *new_buf;
282 unsigned fill_level, index; 292 unsigned fill_level, index;
283 293
@@ -292,16 +302,14 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
292 } 302 }
293 303
294 ++state->refcnt; 304 ++state->refcnt;
295 get_page(rx_buf->page); 305 get_page(rx_buf->u.page);
296 306
297 index = rx_queue->added_count & rx_queue->ptr_mask; 307 index = rx_queue->added_count & rx_queue->ptr_mask;
298 new_buf = efx_rx_buffer(rx_queue, index); 308 new_buf = efx_rx_buffer(rx_queue, index);
299 new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); 309 new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
300 new_buf->skb = NULL; 310 new_buf->u.page = rx_buf->u.page;
301 new_buf->page = rx_buf->page;
302 new_buf->data = (void *)
303 ((__force unsigned long)rx_buf->data ^ (PAGE_SIZE >> 1));
304 new_buf->len = rx_buf->len; 311 new_buf->len = rx_buf->len;
312 new_buf->is_page = true;
305 ++rx_queue->added_count; 313 ++rx_queue->added_count;
306} 314}
307 315
@@ -315,16 +323,15 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
315 struct efx_rx_buffer *new_buf; 323 struct efx_rx_buffer *new_buf;
316 unsigned index; 324 unsigned index;
317 325
318 if (rx_buf->page != NULL && efx->rx_buffer_len <= EFX_RX_HALF_PAGE && 326 if (rx_buf->is_page && efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
319 page_count(rx_buf->page) == 1) 327 page_count(rx_buf->u.page) == 1)
320 efx_resurrect_rx_buffer(rx_queue, rx_buf); 328 efx_resurrect_rx_buffer(rx_queue, rx_buf);
321 329
322 index = rx_queue->added_count & rx_queue->ptr_mask; 330 index = rx_queue->added_count & rx_queue->ptr_mask;
323 new_buf = efx_rx_buffer(rx_queue, index); 331 new_buf = efx_rx_buffer(rx_queue, index);
324 332
325 memcpy(new_buf, rx_buf, sizeof(*new_buf)); 333 memcpy(new_buf, rx_buf, sizeof(*new_buf));
326 rx_buf->page = NULL; 334 rx_buf->u.page = NULL;
327 rx_buf->skb = NULL;
328 ++rx_queue->added_count; 335 ++rx_queue->added_count;
329} 336}
330 337
@@ -428,7 +435,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
428 * data at the end of the skb will be trashed. So 435 * data at the end of the skb will be trashed. So
429 * we have no choice but to leak the fragment. 436 * we have no choice but to leak the fragment.
430 */ 437 */
431 *leak_packet = (rx_buf->skb != NULL); 438 *leak_packet = !rx_buf->is_page;
432 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); 439 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
433 } else { 440 } else {
434 if (net_ratelimit()) 441 if (net_ratelimit())
@@ -448,19 +455,18 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
448 */ 455 */
449static void efx_rx_packet_gro(struct efx_channel *channel, 456static void efx_rx_packet_gro(struct efx_channel *channel,
450 struct efx_rx_buffer *rx_buf, 457 struct efx_rx_buffer *rx_buf,
451 bool checksummed) 458 const u8 *eh, bool checksummed)
452{ 459{
453 struct napi_struct *napi = &channel->napi_str; 460 struct napi_struct *napi = &channel->napi_str;
454 gro_result_t gro_result; 461 gro_result_t gro_result;
455 462
456 /* Pass the skb/page into the GRO engine */ 463 /* Pass the skb/page into the GRO engine */
457 if (rx_buf->page) { 464 if (rx_buf->is_page) {
458 struct efx_nic *efx = channel->efx; 465 struct efx_nic *efx = channel->efx;
459 struct page *page = rx_buf->page; 466 struct page *page = rx_buf->u.page;
460 struct sk_buff *skb; 467 struct sk_buff *skb;
461 468
462 EFX_BUG_ON_PARANOID(rx_buf->skb); 469 rx_buf->u.page = NULL;
463 rx_buf->page = NULL;
464 470
465 skb = napi_get_frags(napi); 471 skb = napi_get_frags(napi);
466 if (!skb) { 472 if (!skb) {
@@ -469,11 +475,11 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
469 } 475 }
470 476
471 if (efx->net_dev->features & NETIF_F_RXHASH) 477 if (efx->net_dev->features & NETIF_F_RXHASH)
472 skb->rxhash = efx_rx_buf_hash(rx_buf); 478 skb->rxhash = efx_rx_buf_hash(eh);
473 479
474 skb_shinfo(skb)->frags[0].page = page; 480 skb_shinfo(skb)->frags[0].page = page;
475 skb_shinfo(skb)->frags[0].page_offset = 481 skb_shinfo(skb)->frags[0].page_offset =
476 efx_rx_buf_offset(rx_buf); 482 efx_rx_buf_offset(efx, rx_buf);
477 skb_shinfo(skb)->frags[0].size = rx_buf->len; 483 skb_shinfo(skb)->frags[0].size = rx_buf->len;
478 skb_shinfo(skb)->nr_frags = 1; 484 skb_shinfo(skb)->nr_frags = 1;
479 485
@@ -487,11 +493,10 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
487 493
488 gro_result = napi_gro_frags(napi); 494 gro_result = napi_gro_frags(napi);
489 } else { 495 } else {
490 struct sk_buff *skb = rx_buf->skb; 496 struct sk_buff *skb = rx_buf->u.skb;
491 497
492 EFX_BUG_ON_PARANOID(!skb);
493 EFX_BUG_ON_PARANOID(!checksummed); 498 EFX_BUG_ON_PARANOID(!checksummed);
494 rx_buf->skb = NULL; 499 rx_buf->u.skb = NULL;
495 500
496 gro_result = napi_gro_receive(napi, skb); 501 gro_result = napi_gro_receive(napi, skb);
497 } 502 }
@@ -513,9 +518,6 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
513 bool leak_packet = false; 518 bool leak_packet = false;
514 519
515 rx_buf = efx_rx_buffer(rx_queue, index); 520 rx_buf = efx_rx_buffer(rx_queue, index);
516 EFX_BUG_ON_PARANOID(!rx_buf->data);
517 EFX_BUG_ON_PARANOID(rx_buf->skb && rx_buf->page);
518 EFX_BUG_ON_PARANOID(!(rx_buf->skb || rx_buf->page));
519 521
520 /* This allows the refill path to post another buffer. 522 /* This allows the refill path to post another buffer.
521 * EFX_RXD_HEAD_ROOM ensures that the slot we are using 523 * EFX_RXD_HEAD_ROOM ensures that the slot we are using
@@ -554,12 +556,12 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
554 /* Prefetch nice and early so data will (hopefully) be in cache by 556 /* Prefetch nice and early so data will (hopefully) be in cache by
555 * the time we look at it. 557 * the time we look at it.
556 */ 558 */
557 prefetch(rx_buf->data); 559 prefetch(efx_rx_buf_eh(efx, rx_buf));
558 560
559 /* Pipeline receives so that we give time for packet headers to be 561 /* Pipeline receives so that we give time for packet headers to be
560 * prefetched into cache. 562 * prefetched into cache.
561 */ 563 */
562 rx_buf->len = len; 564 rx_buf->len = len - efx->type->rx_buffer_hash_size;
563out: 565out:
564 if (channel->rx_pkt) 566 if (channel->rx_pkt)
565 __efx_rx_packet(channel, 567 __efx_rx_packet(channel,
@@ -574,45 +576,43 @@ void __efx_rx_packet(struct efx_channel *channel,
574{ 576{
575 struct efx_nic *efx = channel->efx; 577 struct efx_nic *efx = channel->efx;
576 struct sk_buff *skb; 578 struct sk_buff *skb;
577 579 u8 *eh = efx_rx_buf_eh(efx, rx_buf);
578 rx_buf->data += efx->type->rx_buffer_hash_size;
579 rx_buf->len -= efx->type->rx_buffer_hash_size;
580 580
581 /* If we're in loopback test, then pass the packet directly to the 581 /* If we're in loopback test, then pass the packet directly to the
582 * loopback layer, and free the rx_buf here 582 * loopback layer, and free the rx_buf here
583 */ 583 */
584 if (unlikely(efx->loopback_selftest)) { 584 if (unlikely(efx->loopback_selftest)) {
585 efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len); 585 efx_loopback_rx_packet(efx, eh, rx_buf->len);
586 efx_free_rx_buffer(efx, rx_buf); 586 efx_free_rx_buffer(efx, rx_buf);
587 return; 587 return;
588 } 588 }
589 589
590 if (rx_buf->skb) { 590 if (!rx_buf->is_page) {
591 prefetch(skb_shinfo(rx_buf->skb)); 591 skb = rx_buf->u.skb;
592 592
593 skb_reserve(rx_buf->skb, efx->type->rx_buffer_hash_size); 593 prefetch(skb_shinfo(skb));
594 skb_put(rx_buf->skb, rx_buf->len); 594
595 skb_reserve(skb, efx->type->rx_buffer_hash_size);
596 skb_put(skb, rx_buf->len);
595 597
596 if (efx->net_dev->features & NETIF_F_RXHASH) 598 if (efx->net_dev->features & NETIF_F_RXHASH)
597 rx_buf->skb->rxhash = efx_rx_buf_hash(rx_buf); 599 skb->rxhash = efx_rx_buf_hash(eh);
598 600
599 /* Move past the ethernet header. rx_buf->data still points 601 /* Move past the ethernet header. rx_buf->data still points
600 * at the ethernet header */ 602 * at the ethernet header */
601 rx_buf->skb->protocol = eth_type_trans(rx_buf->skb, 603 skb->protocol = eth_type_trans(skb, efx->net_dev);
602 efx->net_dev);
603 604
604 skb_record_rx_queue(rx_buf->skb, channel->channel); 605 skb_record_rx_queue(skb, channel->channel);
605 } 606 }
606 607
607 if (likely(checksummed || rx_buf->page)) { 608 if (likely(checksummed || rx_buf->is_page)) {
608 efx_rx_packet_gro(channel, rx_buf, checksummed); 609 efx_rx_packet_gro(channel, rx_buf, eh, checksummed);
609 return; 610 return;
610 } 611 }
611 612
612 /* We now own the SKB */ 613 /* We now own the SKB */
613 skb = rx_buf->skb; 614 skb = rx_buf->u.skb;
614 rx_buf->skb = NULL; 615 rx_buf->u.skb = NULL;
615 EFX_BUG_ON_PARANOID(!skb);
616 616
617 /* Set the SKB flags */ 617 /* Set the SKB flags */
618 skb_checksum_none_assert(skb); 618 skb_checksum_none_assert(skb);
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 0ebfb99f1299..a0f49b348d62 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -644,7 +644,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
644 goto out; 644 goto out;
645 } 645 }
646 646
647 /* Test both types of TX queue */ 647 /* Test all enabled types of TX queue */
648 efx_for_each_channel_tx_queue(tx_queue, channel) { 648 efx_for_each_channel_tx_queue(tx_queue, channel) {
649 state->offload_csum = (tx_queue->queue & 649 state->offload_csum = (tx_queue->queue &
650 EFX_TXQ_TYPE_OFFLOAD); 650 EFX_TXQ_TYPE_OFFLOAD);
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
index aed495a4dad7..dba5456e70f3 100644
--- a/drivers/net/sfc/selftest.h
+++ b/drivers/net/sfc/selftest.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index bf8456176443..e4dd8986b1fe 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -227,13 +227,6 @@ static int siena_probe_nic(struct efx_nic *efx)
227 if (rc) 227 if (rc)
228 goto fail1; 228 goto fail1;
229 229
230 rc = efx_mcdi_fwver(efx, &nic_data->fw_version, &nic_data->fw_build);
231 if (rc) {
232 netif_err(efx, probe, efx->net_dev,
233 "Failed to read MCPU firmware version - rc %d\n", rc);
234 goto fail1; /* MCPU absent? */
235 }
236
237 /* Let the BMC know that the driver is now in charge of link and 230 /* Let the BMC know that the driver is now in charge of link and
238 * filter settings. We must do this before we reset the NIC */ 231 * filter settings. We must do this before we reset the NIC */
239 rc = efx_mcdi_drv_attach(efx, true, &already_attached); 232 rc = efx_mcdi_drv_attach(efx, true, &already_attached);
@@ -348,11 +341,6 @@ static int siena_init_nic(struct efx_nic *efx)
348 FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); 341 FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
349 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); 342 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
350 343
351 if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0)
352 /* No MCDI operation has been defined to set thresholds */
353 netif_err(efx, hw, efx->net_dev,
354 "ignoring RX flow control thresholds\n");
355
356 /* Enable event logging */ 344 /* Enable event logging */
357 rc = efx_mcdi_log_ctrl(efx, true, false, 0); 345 rc = efx_mcdi_log_ctrl(efx, true, false, 0);
358 if (rc) 346 if (rc)
@@ -514,16 +502,6 @@ static void siena_stop_nic_stats(struct efx_nic *efx)
514 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0); 502 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
515} 503}
516 504
517void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len)
518{
519 struct siena_nic_data *nic_data = efx->nic_data;
520 snprintf(buf, len, "%u.%u.%u.%u",
521 (unsigned int)(nic_data->fw_version >> 48),
522 (unsigned int)(nic_data->fw_version >> 32 & 0xffff),
523 (unsigned int)(nic_data->fw_version >> 16 & 0xffff),
524 (unsigned int)(nic_data->fw_version & 0xffff));
525}
526
527/************************************************************************** 505/**************************************************************************
528 * 506 *
529 * Wake on LAN 507 * Wake on LAN
diff --git a/drivers/net/sfc/spi.h b/drivers/net/sfc/spi.h
index 879b7f6bde3d..71f2e3ebe1c7 100644
--- a/drivers/net/sfc/spi.h
+++ b/drivers/net/sfc/spi.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005 Fen Systems Ltd. 3 * Copyright 2005 Fen Systems Ltd.
4 * Copyright 2006 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index f102912eba91..efdceb35aaae 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007-2009 Solarflare Communications Inc. 3 * Copyright 2007-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -196,7 +196,7 @@ static int tenxpress_phy_init(struct efx_nic *efx)
196 if (rc < 0) 196 if (rc < 0)
197 return rc; 197 return rc;
198 198
199 rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0); 199 rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS);
200 if (rc < 0) 200 if (rc < 0)
201 return rc; 201 return rc;
202 } 202 }
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 2f5e9da657bf..139801908217 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2009 Solarflare Communications Inc. 4 * Copyright 2005-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -336,17 +336,91 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
336{ 336{
337 struct efx_nic *efx = netdev_priv(net_dev); 337 struct efx_nic *efx = netdev_priv(net_dev);
338 struct efx_tx_queue *tx_queue; 338 struct efx_tx_queue *tx_queue;
339 unsigned index, type;
339 340
340 if (unlikely(efx->port_inhibited)) 341 if (unlikely(efx->port_inhibited))
341 return NETDEV_TX_BUSY; 342 return NETDEV_TX_BUSY;
342 343
343 tx_queue = efx_get_tx_queue(efx, skb_get_queue_mapping(skb), 344 index = skb_get_queue_mapping(skb);
344 skb->ip_summed == CHECKSUM_PARTIAL ? 345 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
345 EFX_TXQ_TYPE_OFFLOAD : 0); 346 if (index >= efx->n_tx_channels) {
347 index -= efx->n_tx_channels;
348 type |= EFX_TXQ_TYPE_HIGHPRI;
349 }
350 tx_queue = efx_get_tx_queue(efx, index, type);
346 351
347 return efx_enqueue_skb(tx_queue, skb); 352 return efx_enqueue_skb(tx_queue, skb);
348} 353}
349 354
355void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
356{
357 struct efx_nic *efx = tx_queue->efx;
358
359 /* Must be inverse of queue lookup in efx_hard_start_xmit() */
360 tx_queue->core_txq =
361 netdev_get_tx_queue(efx->net_dev,
362 tx_queue->queue / EFX_TXQ_TYPES +
363 ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
364 efx->n_tx_channels : 0));
365}
366
367int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
368{
369 struct efx_nic *efx = netdev_priv(net_dev);
370 struct efx_channel *channel;
371 struct efx_tx_queue *tx_queue;
372 unsigned tc;
373 int rc;
374
375 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
376 return -EINVAL;
377
378 if (num_tc == net_dev->num_tc)
379 return 0;
380
381 for (tc = 0; tc < num_tc; tc++) {
382 net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
383 net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
384 }
385
386 if (num_tc > net_dev->num_tc) {
387 /* Initialise high-priority queues as necessary */
388 efx_for_each_channel(channel, efx) {
389 efx_for_each_possible_channel_tx_queue(tx_queue,
390 channel) {
391 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
392 continue;
393 if (!tx_queue->buffer) {
394 rc = efx_probe_tx_queue(tx_queue);
395 if (rc)
396 return rc;
397 }
398 if (!tx_queue->initialised)
399 efx_init_tx_queue(tx_queue);
400 efx_init_tx_queue_core_txq(tx_queue);
401 }
402 }
403 } else {
404 /* Reduce number of classes before number of queues */
405 net_dev->num_tc = num_tc;
406 }
407
408 rc = netif_set_real_num_tx_queues(net_dev,
409 max_t(int, num_tc, 1) *
410 efx->n_tx_channels);
411 if (rc)
412 return rc;
413
414 /* Do not destroy high-priority queues when they become
415 * unused. We would have to flush them first, and it is
416 * fairly difficult to flush a subset of TX queues. Leave
417 * it to efx_fini_channels().
418 */
419
420 net_dev->num_tc = num_tc;
421 return 0;
422}
423
350void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) 424void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
351{ 425{
352 unsigned fill_level; 426 unsigned fill_level;
@@ -430,6 +504,8 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
430 504
431 /* Set up TX descriptor ring */ 505 /* Set up TX descriptor ring */
432 efx_nic_init_tx(tx_queue); 506 efx_nic_init_tx(tx_queue);
507
508 tx_queue->initialised = true;
433} 509}
434 510
435void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) 511void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
@@ -452,9 +528,14 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
452 528
453void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) 529void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
454{ 530{
531 if (!tx_queue->initialised)
532 return;
533
455 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 534 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
456 "shutting down TX queue %d\n", tx_queue->queue); 535 "shutting down TX queue %d\n", tx_queue->queue);
457 536
537 tx_queue->initialised = false;
538
458 /* Flush TX queue, remove descriptor ring */ 539 /* Flush TX queue, remove descriptor ring */
459 efx_nic_fini_tx(tx_queue); 540 efx_nic_fini_tx(tx_queue);
460 541
@@ -466,6 +547,9 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
466 547
467void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) 548void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
468{ 549{
550 if (!tx_queue->buffer)
551 return;
552
469 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 553 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
470 "destroying TX queue %d\n", tx_queue->queue); 554 "destroying TX queue %d\n", tx_queue->queue);
471 efx_nic_remove_tx(tx_queue); 555 efx_nic_remove_tx(tx_queue);
diff --git a/drivers/net/sfc/txc43128_phy.c b/drivers/net/sfc/txc43128_phy.c
index 351794a79215..d9886addcc99 100644
--- a/drivers/net/sfc/txc43128_phy.c
+++ b/drivers/net/sfc/txc43128_phy.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2010 Solarflare Communications Inc. 3 * Copyright 2006-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -193,7 +193,7 @@ static int txc_reset_phy(struct efx_nic *efx)
193 goto fail; 193 goto fail;
194 194
195 /* Check that all the MMDs we expect are present and responding. */ 195 /* Check that all the MMDs we expect are present and responding. */
196 rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS, 0); 196 rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS);
197 if (rc < 0) 197 if (rc < 0)
198 goto fail; 198 goto fail;
199 199
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index e0d63083c3a8..e4dd3a7f304b 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2009 Solarflare Communications Inc. 3 * Copyright 2006-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 819c1750e2ab..e9e7a530552c 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -32,35 +32,40 @@
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/pm_runtime.h> 33#include <linux/pm_runtime.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/ethtool.h>
35#include <asm/cacheflush.h> 36#include <asm/cacheflush.h>
36 37
37#include "sh_eth.h" 38#include "sh_eth.h"
38 39
40#define SH_ETH_DEF_MSG_ENABLE \
41 (NETIF_MSG_LINK | \
42 NETIF_MSG_TIMER | \
43 NETIF_MSG_RX_ERR| \
44 NETIF_MSG_TX_ERR)
45
39/* There is CPU dependent code */ 46/* There is CPU dependent code */
40#if defined(CONFIG_CPU_SUBTYPE_SH7724) 47#if defined(CONFIG_CPU_SUBTYPE_SH7724)
41#define SH_ETH_RESET_DEFAULT 1 48#define SH_ETH_RESET_DEFAULT 1
42static void sh_eth_set_duplex(struct net_device *ndev) 49static void sh_eth_set_duplex(struct net_device *ndev)
43{ 50{
44 struct sh_eth_private *mdp = netdev_priv(ndev); 51 struct sh_eth_private *mdp = netdev_priv(ndev);
45 u32 ioaddr = ndev->base_addr;
46 52
47 if (mdp->duplex) /* Full */ 53 if (mdp->duplex) /* Full */
48 writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR); 54 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
49 else /* Half */ 55 else /* Half */
50 writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR); 56 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
51} 57}
52 58
53static void sh_eth_set_rate(struct net_device *ndev) 59static void sh_eth_set_rate(struct net_device *ndev)
54{ 60{
55 struct sh_eth_private *mdp = netdev_priv(ndev); 61 struct sh_eth_private *mdp = netdev_priv(ndev);
56 u32 ioaddr = ndev->base_addr;
57 62
58 switch (mdp->speed) { 63 switch (mdp->speed) {
59 case 10: /* 10BASE */ 64 case 10: /* 10BASE */
60 writel(readl(ioaddr + ECMR) & ~ECMR_RTM, ioaddr + ECMR); 65 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
61 break; 66 break;
62 case 100:/* 100BASE */ 67 case 100:/* 100BASE */
63 writel(readl(ioaddr + ECMR) | ECMR_RTM, ioaddr + ECMR); 68 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
64 break; 69 break;
65 default: 70 default:
66 break; 71 break;
@@ -89,29 +94,28 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
89 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */ 94 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
90}; 95};
91#elif defined(CONFIG_CPU_SUBTYPE_SH7757) 96#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
92#define SH_ETH_RESET_DEFAULT 1 97#define SH_ETH_HAS_BOTH_MODULES 1
98#define SH_ETH_HAS_TSU 1
93static void sh_eth_set_duplex(struct net_device *ndev) 99static void sh_eth_set_duplex(struct net_device *ndev)
94{ 100{
95 struct sh_eth_private *mdp = netdev_priv(ndev); 101 struct sh_eth_private *mdp = netdev_priv(ndev);
96 u32 ioaddr = ndev->base_addr;
97 102
98 if (mdp->duplex) /* Full */ 103 if (mdp->duplex) /* Full */
99 writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR); 104 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
100 else /* Half */ 105 else /* Half */
101 writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR); 106 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
102} 107}
103 108
104static void sh_eth_set_rate(struct net_device *ndev) 109static void sh_eth_set_rate(struct net_device *ndev)
105{ 110{
106 struct sh_eth_private *mdp = netdev_priv(ndev); 111 struct sh_eth_private *mdp = netdev_priv(ndev);
107 u32 ioaddr = ndev->base_addr;
108 112
109 switch (mdp->speed) { 113 switch (mdp->speed) {
110 case 10: /* 10BASE */ 114 case 10: /* 10BASE */
111 writel(0, ioaddr + RTRATE); 115 sh_eth_write(ndev, 0, RTRATE);
112 break; 116 break;
113 case 100:/* 100BASE */ 117 case 100:/* 100BASE */
114 writel(1, ioaddr + RTRATE); 118 sh_eth_write(ndev, 1, RTRATE);
115 break; 119 break;
116 default: 120 default:
117 break; 121 break;
@@ -138,24 +142,154 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
138 .no_ade = 1, 142 .no_ade = 1,
139}; 143};
140 144
145#define SH_GIGA_ETH_BASE 0xfee00000
146#define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
147#define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
148static void sh_eth_chip_reset_giga(struct net_device *ndev)
149{
150 int i;
151 unsigned long mahr[2], malr[2];
152
153 /* save MAHR and MALR */
154 for (i = 0; i < 2; i++) {
155 malr[i] = readl(GIGA_MALR(i));
156 mahr[i] = readl(GIGA_MAHR(i));
157 }
158
159 /* reset device */
160 writel(ARSTR_ARSTR, SH_GIGA_ETH_BASE + 0x1800);
161 mdelay(1);
162
163 /* restore MAHR and MALR */
164 for (i = 0; i < 2; i++) {
165 writel(malr[i], GIGA_MALR(i));
166 writel(mahr[i], GIGA_MAHR(i));
167 }
168}
169
170static int sh_eth_is_gether(struct sh_eth_private *mdp);
171static void sh_eth_reset(struct net_device *ndev)
172{
173 struct sh_eth_private *mdp = netdev_priv(ndev);
174 int cnt = 100;
175
176 if (sh_eth_is_gether(mdp)) {
177 sh_eth_write(ndev, 0x03, EDSR);
178 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
179 EDMR);
180 while (cnt > 0) {
181 if (!(sh_eth_read(ndev, EDMR) & 0x3))
182 break;
183 mdelay(1);
184 cnt--;
185 }
186 if (cnt < 0)
187 printk(KERN_ERR "Device reset fail\n");
188
189 /* Table Init */
190 sh_eth_write(ndev, 0x0, TDLAR);
191 sh_eth_write(ndev, 0x0, TDFAR);
192 sh_eth_write(ndev, 0x0, TDFXR);
193 sh_eth_write(ndev, 0x0, TDFFR);
194 sh_eth_write(ndev, 0x0, RDLAR);
195 sh_eth_write(ndev, 0x0, RDFAR);
196 sh_eth_write(ndev, 0x0, RDFXR);
197 sh_eth_write(ndev, 0x0, RDFFR);
198 } else {
199 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
200 EDMR);
201 mdelay(3);
202 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
203 EDMR);
204 }
205}
206
207static void sh_eth_set_duplex_giga(struct net_device *ndev)
208{
209 struct sh_eth_private *mdp = netdev_priv(ndev);
210
211 if (mdp->duplex) /* Full */
212 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
213 else /* Half */
214 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
215}
216
217static void sh_eth_set_rate_giga(struct net_device *ndev)
218{
219 struct sh_eth_private *mdp = netdev_priv(ndev);
220
221 switch (mdp->speed) {
222 case 10: /* 10BASE */
223 sh_eth_write(ndev, 0x00000000, GECMR);
224 break;
225 case 100:/* 100BASE */
226 sh_eth_write(ndev, 0x00000010, GECMR);
227 break;
228 case 1000: /* 1000BASE */
229 sh_eth_write(ndev, 0x00000020, GECMR);
230 break;
231 default:
232 break;
233 }
234}
235
236/* SH7757(GETHERC) */
237static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
238 .chip_reset = sh_eth_chip_reset_giga,
239 .set_duplex = sh_eth_set_duplex_giga,
240 .set_rate = sh_eth_set_rate_giga,
241
242 .ecsr_value = ECSR_ICD | ECSR_MPD,
243 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
244 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
245
246 .tx_check = EESR_TC1 | EESR_FTC,
247 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
248 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
249 EESR_ECI,
250 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
251 EESR_TFE,
252 .fdr_value = 0x0000072f,
253 .rmcr_value = 0x00000001,
254
255 .apr = 1,
256 .mpr = 1,
257 .tpauser = 1,
258 .bculr = 1,
259 .hw_swap = 1,
260 .rpadir = 1,
261 .rpadir_value = 2 << 16,
262 .no_trimd = 1,
263 .no_ade = 1,
264};
265
266static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp)
267{
268 if (sh_eth_is_gether(mdp))
269 return &sh_eth_my_cpu_data_giga;
270 else
271 return &sh_eth_my_cpu_data;
272}
273
141#elif defined(CONFIG_CPU_SUBTYPE_SH7763) 274#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
142#define SH_ETH_HAS_TSU 1 275#define SH_ETH_HAS_TSU 1
143static void sh_eth_chip_reset(struct net_device *ndev) 276static void sh_eth_chip_reset(struct net_device *ndev)
144{ 277{
278 struct sh_eth_private *mdp = netdev_priv(ndev);
279
145 /* reset device */ 280 /* reset device */
146 writel(ARSTR_ARSTR, ARSTR); 281 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
147 mdelay(1); 282 mdelay(1);
148} 283}
149 284
150static void sh_eth_reset(struct net_device *ndev) 285static void sh_eth_reset(struct net_device *ndev)
151{ 286{
152 u32 ioaddr = ndev->base_addr;
153 int cnt = 100; 287 int cnt = 100;
154 288
155 writel(EDSR_ENALL, ioaddr + EDSR); 289 sh_eth_write(ndev, EDSR_ENALL, EDSR);
156 writel(readl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR); 290 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
157 while (cnt > 0) { 291 while (cnt > 0) {
158 if (!(readl(ioaddr + EDMR) & 0x3)) 292 if (!(sh_eth_read(ndev, EDMR) & 0x3))
159 break; 293 break;
160 mdelay(1); 294 mdelay(1);
161 cnt--; 295 cnt--;
@@ -164,41 +298,39 @@ static void sh_eth_reset(struct net_device *ndev)
164 printk(KERN_ERR "Device reset fail\n"); 298 printk(KERN_ERR "Device reset fail\n");
165 299
166 /* Table Init */ 300 /* Table Init */
167 writel(0x0, ioaddr + TDLAR); 301 sh_eth_write(ndev, 0x0, TDLAR);
168 writel(0x0, ioaddr + TDFAR); 302 sh_eth_write(ndev, 0x0, TDFAR);
169 writel(0x0, ioaddr + TDFXR); 303 sh_eth_write(ndev, 0x0, TDFXR);
170 writel(0x0, ioaddr + TDFFR); 304 sh_eth_write(ndev, 0x0, TDFFR);
171 writel(0x0, ioaddr + RDLAR); 305 sh_eth_write(ndev, 0x0, RDLAR);
172 writel(0x0, ioaddr + RDFAR); 306 sh_eth_write(ndev, 0x0, RDFAR);
173 writel(0x0, ioaddr + RDFXR); 307 sh_eth_write(ndev, 0x0, RDFXR);
174 writel(0x0, ioaddr + RDFFR); 308 sh_eth_write(ndev, 0x0, RDFFR);
175} 309}
176 310
177static void sh_eth_set_duplex(struct net_device *ndev) 311static void sh_eth_set_duplex(struct net_device *ndev)
178{ 312{
179 struct sh_eth_private *mdp = netdev_priv(ndev); 313 struct sh_eth_private *mdp = netdev_priv(ndev);
180 u32 ioaddr = ndev->base_addr;
181 314
182 if (mdp->duplex) /* Full */ 315 if (mdp->duplex) /* Full */
183 writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR); 316 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
184 else /* Half */ 317 else /* Half */
185 writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR); 318 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
186} 319}
187 320
188static void sh_eth_set_rate(struct net_device *ndev) 321static void sh_eth_set_rate(struct net_device *ndev)
189{ 322{
190 struct sh_eth_private *mdp = netdev_priv(ndev); 323 struct sh_eth_private *mdp = netdev_priv(ndev);
191 u32 ioaddr = ndev->base_addr;
192 324
193 switch (mdp->speed) { 325 switch (mdp->speed) {
194 case 10: /* 10BASE */ 326 case 10: /* 10BASE */
195 writel(GECMR_10, ioaddr + GECMR); 327 sh_eth_write(ndev, GECMR_10, GECMR);
196 break; 328 break;
197 case 100:/* 100BASE */ 329 case 100:/* 100BASE */
198 writel(GECMR_100, ioaddr + GECMR); 330 sh_eth_write(ndev, GECMR_100, GECMR);
199 break; 331 break;
200 case 1000: /* 1000BASE */ 332 case 1000: /* 1000BASE */
201 writel(GECMR_1000, ioaddr + GECMR); 333 sh_eth_write(ndev, GECMR_1000, GECMR);
202 break; 334 break;
203 default: 335 default:
204 break; 336 break;
@@ -229,6 +361,7 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
229 .hw_swap = 1, 361 .hw_swap = 1,
230 .no_trimd = 1, 362 .no_trimd = 1,
231 .no_ade = 1, 363 .no_ade = 1,
364 .tsu = 1,
232}; 365};
233 366
234#elif defined(CONFIG_CPU_SUBTYPE_SH7619) 367#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
@@ -246,6 +379,7 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
246#define SH_ETH_HAS_TSU 1 379#define SH_ETH_HAS_TSU 1
247static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 380static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
248 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 381 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
382 .tsu = 1,
249}; 383};
250#endif 384#endif
251 385
@@ -281,11 +415,9 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
281/* Chip Reset */ 415/* Chip Reset */
282static void sh_eth_reset(struct net_device *ndev) 416static void sh_eth_reset(struct net_device *ndev)
283{ 417{
284 u32 ioaddr = ndev->base_addr; 418 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR);
285
286 writel(readl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
287 mdelay(3); 419 mdelay(3);
288 writel(readl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR); 420 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR);
289} 421}
290#endif 422#endif
291 423
@@ -334,13 +466,11 @@ static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
334 */ 466 */
335static void update_mac_address(struct net_device *ndev) 467static void update_mac_address(struct net_device *ndev)
336{ 468{
337 u32 ioaddr = ndev->base_addr; 469 sh_eth_write(ndev,
338 470 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
339 writel((ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | 471 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
340 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), 472 sh_eth_write(ndev,
341 ioaddr + MAHR); 473 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
342 writel((ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]),
343 ioaddr + MALR);
344} 474}
345 475
346/* 476/*
@@ -353,21 +483,36 @@ static void update_mac_address(struct net_device *ndev)
353 */ 483 */
354static void read_mac_address(struct net_device *ndev, unsigned char *mac) 484static void read_mac_address(struct net_device *ndev, unsigned char *mac)
355{ 485{
356 u32 ioaddr = ndev->base_addr;
357
358 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) { 486 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
359 memcpy(ndev->dev_addr, mac, 6); 487 memcpy(ndev->dev_addr, mac, 6);
360 } else { 488 } else {
361 ndev->dev_addr[0] = (readl(ioaddr + MAHR) >> 24); 489 ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
362 ndev->dev_addr[1] = (readl(ioaddr + MAHR) >> 16) & 0xFF; 490 ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
363 ndev->dev_addr[2] = (readl(ioaddr + MAHR) >> 8) & 0xFF; 491 ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
364 ndev->dev_addr[3] = (readl(ioaddr + MAHR) & 0xFF); 492 ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
365 ndev->dev_addr[4] = (readl(ioaddr + MALR) >> 8) & 0xFF; 493 ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
366 ndev->dev_addr[5] = (readl(ioaddr + MALR) & 0xFF); 494 ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
367 } 495 }
368} 496}
369 497
498static int sh_eth_is_gether(struct sh_eth_private *mdp)
499{
500 if (mdp->reg_offset == sh_eth_offset_gigabit)
501 return 1;
502 else
503 return 0;
504}
505
506static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
507{
508 if (sh_eth_is_gether(mdp))
509 return EDTRR_TRNS_GETHER;
510 else
511 return EDTRR_TRNS_ETHER;
512}
513
370struct bb_info { 514struct bb_info {
515 void (*set_gate)(unsigned long addr);
371 struct mdiobb_ctrl ctrl; 516 struct mdiobb_ctrl ctrl;
372 u32 addr; 517 u32 addr;
373 u32 mmd_msk;/* MMD */ 518 u32 mmd_msk;/* MMD */
@@ -398,6 +543,10 @@ static int bb_read(u32 addr, u32 msk)
398static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit) 543static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
399{ 544{
400 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 545 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
546
547 if (bitbang->set_gate)
548 bitbang->set_gate(bitbang->addr);
549
401 if (bit) 550 if (bit)
402 bb_set(bitbang->addr, bitbang->mmd_msk); 551 bb_set(bitbang->addr, bitbang->mmd_msk);
403 else 552 else
@@ -409,6 +558,9 @@ static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
409{ 558{
410 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 559 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
411 560
561 if (bitbang->set_gate)
562 bitbang->set_gate(bitbang->addr);
563
412 if (bit) 564 if (bit)
413 bb_set(bitbang->addr, bitbang->mdo_msk); 565 bb_set(bitbang->addr, bitbang->mdo_msk);
414 else 566 else
@@ -419,6 +571,10 @@ static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
419static int sh_get_mdio(struct mdiobb_ctrl *ctrl) 571static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
420{ 572{
421 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 573 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
574
575 if (bitbang->set_gate)
576 bitbang->set_gate(bitbang->addr);
577
422 return bb_read(bitbang->addr, bitbang->mdi_msk); 578 return bb_read(bitbang->addr, bitbang->mdi_msk);
423} 579}
424 580
@@ -427,6 +583,9 @@ static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
427{ 583{
428 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 584 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
429 585
586 if (bitbang->set_gate)
587 bitbang->set_gate(bitbang->addr);
588
430 if (bit) 589 if (bit)
431 bb_set(bitbang->addr, bitbang->mdc_msk); 590 bb_set(bitbang->addr, bitbang->mdc_msk);
432 else 591 else
@@ -470,7 +629,6 @@ static void sh_eth_ring_free(struct net_device *ndev)
470/* format skb and descriptor buffer */ 629/* format skb and descriptor buffer */
471static void sh_eth_ring_format(struct net_device *ndev) 630static void sh_eth_ring_format(struct net_device *ndev)
472{ 631{
473 u32 ioaddr = ndev->base_addr;
474 struct sh_eth_private *mdp = netdev_priv(ndev); 632 struct sh_eth_private *mdp = netdev_priv(ndev);
475 int i; 633 int i;
476 struct sk_buff *skb; 634 struct sk_buff *skb;
@@ -506,10 +664,9 @@ static void sh_eth_ring_format(struct net_device *ndev)
506 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 664 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
507 /* Rx descriptor address set */ 665 /* Rx descriptor address set */
508 if (i == 0) { 666 if (i == 0) {
509 writel(mdp->rx_desc_dma, ioaddr + RDLAR); 667 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
510#if defined(CONFIG_CPU_SUBTYPE_SH7763) 668 if (sh_eth_is_gether(mdp))
511 writel(mdp->rx_desc_dma, ioaddr + RDFAR); 669 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
512#endif
513 } 670 }
514 } 671 }
515 672
@@ -528,10 +685,9 @@ static void sh_eth_ring_format(struct net_device *ndev)
528 txdesc->buffer_length = 0; 685 txdesc->buffer_length = 0;
529 if (i == 0) { 686 if (i == 0) {
530 /* Tx descriptor address set */ 687 /* Tx descriptor address set */
531 writel(mdp->tx_desc_dma, ioaddr + TDLAR); 688 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
532#if defined(CONFIG_CPU_SUBTYPE_SH7763) 689 if (sh_eth_is_gether(mdp))
533 writel(mdp->tx_desc_dma, ioaddr + TDFAR); 690 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
534#endif
535 } 691 }
536 } 692 }
537 693
@@ -613,7 +769,6 @@ static int sh_eth_dev_init(struct net_device *ndev)
613{ 769{
614 int ret = 0; 770 int ret = 0;
615 struct sh_eth_private *mdp = netdev_priv(ndev); 771 struct sh_eth_private *mdp = netdev_priv(ndev);
616 u32 ioaddr = ndev->base_addr;
617 u_int32_t rx_int_var, tx_int_var; 772 u_int32_t rx_int_var, tx_int_var;
618 u32 val; 773 u32 val;
619 774
@@ -623,71 +778,71 @@ static int sh_eth_dev_init(struct net_device *ndev)
623 /* Descriptor format */ 778 /* Descriptor format */
624 sh_eth_ring_format(ndev); 779 sh_eth_ring_format(ndev);
625 if (mdp->cd->rpadir) 780 if (mdp->cd->rpadir)
626 writel(mdp->cd->rpadir_value, ioaddr + RPADIR); 781 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
627 782
628 /* all sh_eth int mask */ 783 /* all sh_eth int mask */
629 writel(0, ioaddr + EESIPR); 784 sh_eth_write(ndev, 0, EESIPR);
630 785
631#if defined(__LITTLE_ENDIAN__) 786#if defined(__LITTLE_ENDIAN__)
632 if (mdp->cd->hw_swap) 787 if (mdp->cd->hw_swap)
633 writel(EDMR_EL, ioaddr + EDMR); 788 sh_eth_write(ndev, EDMR_EL, EDMR);
634 else 789 else
635#endif 790#endif
636 writel(0, ioaddr + EDMR); 791 sh_eth_write(ndev, 0, EDMR);
637 792
638 /* FIFO size set */ 793 /* FIFO size set */
639 writel(mdp->cd->fdr_value, ioaddr + FDR); 794 sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
640 writel(0, ioaddr + TFTR); 795 sh_eth_write(ndev, 0, TFTR);
641 796
642 /* Frame recv control */ 797 /* Frame recv control */
643 writel(mdp->cd->rmcr_value, ioaddr + RMCR); 798 sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
644 799
645 rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5; 800 rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
646 tx_int_var = mdp->tx_int_var = DESC_I_TINT2; 801 tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
647 writel(rx_int_var | tx_int_var, ioaddr + TRSCER); 802 sh_eth_write(ndev, rx_int_var | tx_int_var, TRSCER);
648 803
649 if (mdp->cd->bculr) 804 if (mdp->cd->bculr)
650 writel(0x800, ioaddr + BCULR); /* Burst sycle set */ 805 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
651 806
652 writel(mdp->cd->fcftr_value, ioaddr + FCFTR); 807 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
653 808
654 if (!mdp->cd->no_trimd) 809 if (!mdp->cd->no_trimd)
655 writel(0, ioaddr + TRIMD); 810 sh_eth_write(ndev, 0, TRIMD);
656 811
657 /* Recv frame limit set register */ 812 /* Recv frame limit set register */
658 writel(RFLR_VALUE, ioaddr + RFLR); 813 sh_eth_write(ndev, RFLR_VALUE, RFLR);
659 814
660 writel(readl(ioaddr + EESR), ioaddr + EESR); 815 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
661 writel(mdp->cd->eesipr_value, ioaddr + EESIPR); 816 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
662 817
663 /* PAUSE Prohibition */ 818 /* PAUSE Prohibition */
664 val = (readl(ioaddr + ECMR) & ECMR_DM) | 819 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
665 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; 820 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
666 821
667 writel(val, ioaddr + ECMR); 822 sh_eth_write(ndev, val, ECMR);
668 823
669 if (mdp->cd->set_rate) 824 if (mdp->cd->set_rate)
670 mdp->cd->set_rate(ndev); 825 mdp->cd->set_rate(ndev);
671 826
672 /* E-MAC Status Register clear */ 827 /* E-MAC Status Register clear */
673 writel(mdp->cd->ecsr_value, ioaddr + ECSR); 828 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
674 829
675 /* E-MAC Interrupt Enable register */ 830 /* E-MAC Interrupt Enable register */
676 writel(mdp->cd->ecsipr_value, ioaddr + ECSIPR); 831 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
677 832
678 /* Set MAC address */ 833 /* Set MAC address */
679 update_mac_address(ndev); 834 update_mac_address(ndev);
680 835
681 /* mask reset */ 836 /* mask reset */
682 if (mdp->cd->apr) 837 if (mdp->cd->apr)
683 writel(APR_AP, ioaddr + APR); 838 sh_eth_write(ndev, APR_AP, APR);
684 if (mdp->cd->mpr) 839 if (mdp->cd->mpr)
685 writel(MPR_MP, ioaddr + MPR); 840 sh_eth_write(ndev, MPR_MP, MPR);
686 if (mdp->cd->tpauser) 841 if (mdp->cd->tpauser)
687 writel(TPAUSER_UNLIMITED, ioaddr + TPAUSER); 842 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
688 843
689 /* Setting the Rx mode will start the Rx process. */ 844 /* Setting the Rx mode will start the Rx process. */
690 writel(EDRRR_R, ioaddr + EDRRR); 845 sh_eth_write(ndev, EDRRR_R, EDRRR);
691 846
692 netif_start_queue(ndev); 847 netif_start_queue(ndev);
693 848
@@ -811,24 +966,37 @@ static int sh_eth_rx(struct net_device *ndev)
811 966
812 /* Restart Rx engine if stopped. */ 967 /* Restart Rx engine if stopped. */
813 /* If we don't need to check status, don't. -KDU */ 968 /* If we don't need to check status, don't. -KDU */
814 if (!(readl(ndev->base_addr + EDRRR) & EDRRR_R)) 969 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R))
815 writel(EDRRR_R, ndev->base_addr + EDRRR); 970 sh_eth_write(ndev, EDRRR_R, EDRRR);
816 971
817 return 0; 972 return 0;
818} 973}
819 974
975static void sh_eth_rcv_snd_disable(struct net_device *ndev)
976{
977 /* disable tx and rx */
978 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
979 ~(ECMR_RE | ECMR_TE), ECMR);
980}
981
982static void sh_eth_rcv_snd_enable(struct net_device *ndev)
983{
984 /* enable tx and rx */
985 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
986 (ECMR_RE | ECMR_TE), ECMR);
987}
988
820/* error control function */ 989/* error control function */
821static void sh_eth_error(struct net_device *ndev, int intr_status) 990static void sh_eth_error(struct net_device *ndev, int intr_status)
822{ 991{
823 struct sh_eth_private *mdp = netdev_priv(ndev); 992 struct sh_eth_private *mdp = netdev_priv(ndev);
824 u32 ioaddr = ndev->base_addr;
825 u32 felic_stat; 993 u32 felic_stat;
826 u32 link_stat; 994 u32 link_stat;
827 u32 mask; 995 u32 mask;
828 996
829 if (intr_status & EESR_ECI) { 997 if (intr_status & EESR_ECI) {
830 felic_stat = readl(ioaddr + ECSR); 998 felic_stat = sh_eth_read(ndev, ECSR);
831 writel(felic_stat, ioaddr + ECSR); /* clear int */ 999 sh_eth_write(ndev, felic_stat, ECSR); /* clear int */
832 if (felic_stat & ECSR_ICD) 1000 if (felic_stat & ECSR_ICD)
833 mdp->stats.tx_carrier_errors++; 1001 mdp->stats.tx_carrier_errors++;
834 if (felic_stat & ECSR_LCHNG) { 1002 if (felic_stat & ECSR_LCHNG) {
@@ -839,26 +1007,23 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
839 else 1007 else
840 link_stat = PHY_ST_LINK; 1008 link_stat = PHY_ST_LINK;
841 } else { 1009 } else {
842 link_stat = (readl(ioaddr + PSR)); 1010 link_stat = (sh_eth_read(ndev, PSR));
843 if (mdp->ether_link_active_low) 1011 if (mdp->ether_link_active_low)
844 link_stat = ~link_stat; 1012 link_stat = ~link_stat;
845 } 1013 }
846 if (!(link_stat & PHY_ST_LINK)) { 1014 if (!(link_stat & PHY_ST_LINK))
847 /* Link Down : disable tx and rx */ 1015 sh_eth_rcv_snd_disable(ndev);
848 writel(readl(ioaddr + ECMR) & 1016 else {
849 ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
850 } else {
851 /* Link Up */ 1017 /* Link Up */
852 writel(readl(ioaddr + EESIPR) & 1018 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
853 ~DMAC_M_ECI, ioaddr + EESIPR); 1019 ~DMAC_M_ECI, EESIPR);
854 /*clear int */ 1020 /*clear int */
855 writel(readl(ioaddr + ECSR), 1021 sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
856 ioaddr + ECSR); 1022 ECSR);
857 writel(readl(ioaddr + EESIPR) | 1023 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
858 DMAC_M_ECI, ioaddr + EESIPR); 1024 DMAC_M_ECI, EESIPR);
859 /* enable tx and rx */ 1025 /* enable tx and rx */
860 writel(readl(ioaddr + ECMR) | 1026 sh_eth_rcv_snd_enable(ndev);
861 (ECMR_RE | ECMR_TE), ioaddr + ECMR);
862 } 1027 }
863 } 1028 }
864 } 1029 }
@@ -867,6 +1032,8 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
867 /* Write buck end. unused write back interrupt */ 1032 /* Write buck end. unused write back interrupt */
868 if (intr_status & EESR_TABT) /* Transmit Abort int */ 1033 if (intr_status & EESR_TABT) /* Transmit Abort int */
869 mdp->stats.tx_aborted_errors++; 1034 mdp->stats.tx_aborted_errors++;
1035 if (netif_msg_tx_err(mdp))
1036 dev_err(&ndev->dev, "Transmit Abort\n");
870 } 1037 }
871 1038
872 if (intr_status & EESR_RABT) { 1039 if (intr_status & EESR_RABT) {
@@ -874,28 +1041,47 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
874 if (intr_status & EESR_RFRMER) { 1041 if (intr_status & EESR_RFRMER) {
875 /* Receive Frame Overflow int */ 1042 /* Receive Frame Overflow int */
876 mdp->stats.rx_frame_errors++; 1043 mdp->stats.rx_frame_errors++;
877 dev_err(&ndev->dev, "Receive Frame Overflow\n"); 1044 if (netif_msg_rx_err(mdp))
1045 dev_err(&ndev->dev, "Receive Abort\n");
878 } 1046 }
879 } 1047 }
880 1048
881 if (!mdp->cd->no_ade) { 1049 if (intr_status & EESR_TDE) {
882 if (intr_status & EESR_ADE && intr_status & EESR_TDE && 1050 /* Transmit Descriptor Empty int */
883 intr_status & EESR_TFE) 1051 mdp->stats.tx_fifo_errors++;
884 mdp->stats.tx_fifo_errors++; 1052 if (netif_msg_tx_err(mdp))
1053 dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
1054 }
1055
1056 if (intr_status & EESR_TFE) {
1057 /* FIFO under flow */
1058 mdp->stats.tx_fifo_errors++;
1059 if (netif_msg_tx_err(mdp))
1060 dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
885 } 1061 }
886 1062
887 if (intr_status & EESR_RDE) { 1063 if (intr_status & EESR_RDE) {
888 /* Receive Descriptor Empty int */ 1064 /* Receive Descriptor Empty int */
889 mdp->stats.rx_over_errors++; 1065 mdp->stats.rx_over_errors++;
890 1066
891 if (readl(ioaddr + EDRRR) ^ EDRRR_R) 1067 if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R)
892 writel(EDRRR_R, ioaddr + EDRRR); 1068 sh_eth_write(ndev, EDRRR_R, EDRRR);
893 dev_err(&ndev->dev, "Receive Descriptor Empty\n"); 1069 if (netif_msg_rx_err(mdp))
1070 dev_err(&ndev->dev, "Receive Descriptor Empty\n");
894 } 1071 }
1072
895 if (intr_status & EESR_RFE) { 1073 if (intr_status & EESR_RFE) {
896 /* Receive FIFO Overflow int */ 1074 /* Receive FIFO Overflow int */
897 mdp->stats.rx_fifo_errors++; 1075 mdp->stats.rx_fifo_errors++;
898 dev_err(&ndev->dev, "Receive FIFO Overflow\n"); 1076 if (netif_msg_rx_err(mdp))
1077 dev_err(&ndev->dev, "Receive FIFO Overflow\n");
1078 }
1079
1080 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1081 /* Address Error */
1082 mdp->stats.tx_fifo_errors++;
1083 if (netif_msg_tx_err(mdp))
1084 dev_err(&ndev->dev, "Address Error\n");
899 } 1085 }
900 1086
901 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE; 1087 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
@@ -903,7 +1089,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
903 mask &= ~EESR_ADE; 1089 mask &= ~EESR_ADE;
904 if (intr_status & mask) { 1090 if (intr_status & mask) {
905 /* Tx error */ 1091 /* Tx error */
906 u32 edtrr = readl(ndev->base_addr + EDTRR); 1092 u32 edtrr = sh_eth_read(ndev, EDTRR);
907 /* dmesg */ 1093 /* dmesg */
908 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ", 1094 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
909 intr_status, mdp->cur_tx); 1095 intr_status, mdp->cur_tx);
@@ -913,9 +1099,9 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
913 sh_eth_txfree(ndev); 1099 sh_eth_txfree(ndev);
914 1100
915 /* SH7712 BUG */ 1101 /* SH7712 BUG */
916 if (edtrr ^ EDTRR_TRNS) { 1102 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
917 /* tx dma start */ 1103 /* tx dma start */
918 writel(EDTRR_TRNS, ndev->base_addr + EDTRR); 1104 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
919 } 1105 }
920 /* wakeup */ 1106 /* wakeup */
921 netif_wake_queue(ndev); 1107 netif_wake_queue(ndev);
@@ -928,18 +1114,17 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
928 struct sh_eth_private *mdp = netdev_priv(ndev); 1114 struct sh_eth_private *mdp = netdev_priv(ndev);
929 struct sh_eth_cpu_data *cd = mdp->cd; 1115 struct sh_eth_cpu_data *cd = mdp->cd;
930 irqreturn_t ret = IRQ_NONE; 1116 irqreturn_t ret = IRQ_NONE;
931 u32 ioaddr, intr_status = 0; 1117 u32 intr_status = 0;
932 1118
933 ioaddr = ndev->base_addr;
934 spin_lock(&mdp->lock); 1119 spin_lock(&mdp->lock);
935 1120
936 /* Get interrpt stat */ 1121 /* Get interrpt stat */
937 intr_status = readl(ioaddr + EESR); 1122 intr_status = sh_eth_read(ndev, EESR);
938 /* Clear interrupt */ 1123 /* Clear interrupt */
939 if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | 1124 if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
940 EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | 1125 EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
941 cd->tx_check | cd->eesr_err_check)) { 1126 cd->tx_check | cd->eesr_err_check)) {
942 writel(intr_status, ioaddr + EESR); 1127 sh_eth_write(ndev, intr_status, EESR);
943 ret = IRQ_HANDLED; 1128 ret = IRQ_HANDLED;
944 } else 1129 } else
945 goto other_irq; 1130 goto other_irq;
@@ -982,7 +1167,6 @@ static void sh_eth_adjust_link(struct net_device *ndev)
982{ 1167{
983 struct sh_eth_private *mdp = netdev_priv(ndev); 1168 struct sh_eth_private *mdp = netdev_priv(ndev);
984 struct phy_device *phydev = mdp->phydev; 1169 struct phy_device *phydev = mdp->phydev;
985 u32 ioaddr = ndev->base_addr;
986 int new_state = 0; 1170 int new_state = 0;
987 1171
988 if (phydev->link != PHY_DOWN) { 1172 if (phydev->link != PHY_DOWN) {
@@ -1000,8 +1184,8 @@ static void sh_eth_adjust_link(struct net_device *ndev)
1000 mdp->cd->set_rate(ndev); 1184 mdp->cd->set_rate(ndev);
1001 } 1185 }
1002 if (mdp->link == PHY_DOWN) { 1186 if (mdp->link == PHY_DOWN) {
1003 writel((readl(ioaddr + ECMR) & ~ECMR_TXF) 1187 sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_TXF)
1004 | ECMR_DM, ioaddr + ECMR); 1188 | ECMR_DM, ECMR);
1005 new_state = 1; 1189 new_state = 1;
1006 mdp->link = phydev->link; 1190 mdp->link = phydev->link;
1007 } 1191 }
@@ -1012,7 +1196,7 @@ static void sh_eth_adjust_link(struct net_device *ndev)
1012 mdp->duplex = -1; 1196 mdp->duplex = -1;
1013 } 1197 }
1014 1198
1015 if (new_state) 1199 if (new_state && netif_msg_link(mdp))
1016 phy_print_status(phydev); 1200 phy_print_status(phydev);
1017} 1201}
1018 1202
@@ -1032,7 +1216,7 @@ static int sh_eth_phy_init(struct net_device *ndev)
1032 1216
1033 /* Try connect to PHY */ 1217 /* Try connect to PHY */
1034 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link, 1218 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1035 0, PHY_INTERFACE_MODE_MII); 1219 0, mdp->phy_interface);
1036 if (IS_ERR(phydev)) { 1220 if (IS_ERR(phydev)) {
1037 dev_err(&ndev->dev, "phy_connect failed\n"); 1221 dev_err(&ndev->dev, "phy_connect failed\n");
1038 return PTR_ERR(phydev); 1222 return PTR_ERR(phydev);
@@ -1063,6 +1247,131 @@ static int sh_eth_phy_start(struct net_device *ndev)
1063 return 0; 1247 return 0;
1064} 1248}
1065 1249
1250static int sh_eth_get_settings(struct net_device *ndev,
1251 struct ethtool_cmd *ecmd)
1252{
1253 struct sh_eth_private *mdp = netdev_priv(ndev);
1254 unsigned long flags;
1255 int ret;
1256
1257 spin_lock_irqsave(&mdp->lock, flags);
1258 ret = phy_ethtool_gset(mdp->phydev, ecmd);
1259 spin_unlock_irqrestore(&mdp->lock, flags);
1260
1261 return ret;
1262}
1263
1264static int sh_eth_set_settings(struct net_device *ndev,
1265 struct ethtool_cmd *ecmd)
1266{
1267 struct sh_eth_private *mdp = netdev_priv(ndev);
1268 unsigned long flags;
1269 int ret;
1270
1271 spin_lock_irqsave(&mdp->lock, flags);
1272
1273 /* disable tx and rx */
1274 sh_eth_rcv_snd_disable(ndev);
1275
1276 ret = phy_ethtool_sset(mdp->phydev, ecmd);
1277 if (ret)
1278 goto error_exit;
1279
1280 if (ecmd->duplex == DUPLEX_FULL)
1281 mdp->duplex = 1;
1282 else
1283 mdp->duplex = 0;
1284
1285 if (mdp->cd->set_duplex)
1286 mdp->cd->set_duplex(ndev);
1287
1288error_exit:
1289 mdelay(1);
1290
1291 /* enable tx and rx */
1292 sh_eth_rcv_snd_enable(ndev);
1293
1294 spin_unlock_irqrestore(&mdp->lock, flags);
1295
1296 return ret;
1297}
1298
1299static int sh_eth_nway_reset(struct net_device *ndev)
1300{
1301 struct sh_eth_private *mdp = netdev_priv(ndev);
1302 unsigned long flags;
1303 int ret;
1304
1305 spin_lock_irqsave(&mdp->lock, flags);
1306 ret = phy_start_aneg(mdp->phydev);
1307 spin_unlock_irqrestore(&mdp->lock, flags);
1308
1309 return ret;
1310}
1311
1312static u32 sh_eth_get_msglevel(struct net_device *ndev)
1313{
1314 struct sh_eth_private *mdp = netdev_priv(ndev);
1315 return mdp->msg_enable;
1316}
1317
1318static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1319{
1320 struct sh_eth_private *mdp = netdev_priv(ndev);
1321 mdp->msg_enable = value;
1322}
1323
1324static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1325 "rx_current", "tx_current",
1326 "rx_dirty", "tx_dirty",
1327};
1328#define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
1329
1330static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1331{
1332 switch (sset) {
1333 case ETH_SS_STATS:
1334 return SH_ETH_STATS_LEN;
1335 default:
1336 return -EOPNOTSUPP;
1337 }
1338}
1339
1340static void sh_eth_get_ethtool_stats(struct net_device *ndev,
1341 struct ethtool_stats *stats, u64 *data)
1342{
1343 struct sh_eth_private *mdp = netdev_priv(ndev);
1344 int i = 0;
1345
1346 /* device-specific stats */
1347 data[i++] = mdp->cur_rx;
1348 data[i++] = mdp->cur_tx;
1349 data[i++] = mdp->dirty_rx;
1350 data[i++] = mdp->dirty_tx;
1351}
1352
1353static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1354{
1355 switch (stringset) {
1356 case ETH_SS_STATS:
1357 memcpy(data, *sh_eth_gstrings_stats,
1358 sizeof(sh_eth_gstrings_stats));
1359 break;
1360 }
1361}
1362
1363static struct ethtool_ops sh_eth_ethtool_ops = {
1364 .get_settings = sh_eth_get_settings,
1365 .set_settings = sh_eth_set_settings,
1366 .nway_reset = sh_eth_nway_reset,
1367 .get_msglevel = sh_eth_get_msglevel,
1368 .set_msglevel = sh_eth_set_msglevel,
1369 .get_link = ethtool_op_get_link,
1370 .get_strings = sh_eth_get_strings,
1371 .get_ethtool_stats = sh_eth_get_ethtool_stats,
1372 .get_sset_count = sh_eth_get_sset_count,
1373};
1374
1066/* network device open function */ 1375/* network device open function */
1067static int sh_eth_open(struct net_device *ndev) 1376static int sh_eth_open(struct net_device *ndev)
1068{ 1377{
@@ -1073,8 +1382,8 @@ static int sh_eth_open(struct net_device *ndev)
1073 1382
1074 ret = request_irq(ndev->irq, sh_eth_interrupt, 1383 ret = request_irq(ndev->irq, sh_eth_interrupt,
1075#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 1384#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
1076 defined(CONFIG_CPU_SUBTYPE_SH7764) || \ 1385 defined(CONFIG_CPU_SUBTYPE_SH7764) || \
1077 defined(CONFIG_CPU_SUBTYPE_SH7757) 1386 defined(CONFIG_CPU_SUBTYPE_SH7757)
1078 IRQF_SHARED, 1387 IRQF_SHARED,
1079#else 1388#else
1080 0, 1389 0,
@@ -1117,15 +1426,14 @@ out_free_irq:
1117static void sh_eth_tx_timeout(struct net_device *ndev) 1426static void sh_eth_tx_timeout(struct net_device *ndev)
1118{ 1427{
1119 struct sh_eth_private *mdp = netdev_priv(ndev); 1428 struct sh_eth_private *mdp = netdev_priv(ndev);
1120 u32 ioaddr = ndev->base_addr;
1121 struct sh_eth_rxdesc *rxdesc; 1429 struct sh_eth_rxdesc *rxdesc;
1122 int i; 1430 int i;
1123 1431
1124 netif_stop_queue(ndev); 1432 netif_stop_queue(ndev);
1125 1433
1126 /* worning message out. */ 1434 if (netif_msg_timer(mdp))
1127 printk(KERN_WARNING "%s: transmit timed out, status %8.8x," 1435 dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
1128 " resetting...\n", ndev->name, (int)readl(ioaddr + EESR)); 1436 " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR));
1129 1437
1130 /* tx_errors count up */ 1438 /* tx_errors count up */
1131 mdp->stats.tx_errors++; 1439 mdp->stats.tx_errors++;
@@ -1167,6 +1475,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1167 spin_lock_irqsave(&mdp->lock, flags); 1475 spin_lock_irqsave(&mdp->lock, flags);
1168 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) { 1476 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
1169 if (!sh_eth_txfree(ndev)) { 1477 if (!sh_eth_txfree(ndev)) {
1478 if (netif_msg_tx_queued(mdp))
1479 dev_warn(&ndev->dev, "TxFD exhausted.\n");
1170 netif_stop_queue(ndev); 1480 netif_stop_queue(ndev);
1171 spin_unlock_irqrestore(&mdp->lock, flags); 1481 spin_unlock_irqrestore(&mdp->lock, flags);
1172 return NETDEV_TX_BUSY; 1482 return NETDEV_TX_BUSY;
@@ -1196,8 +1506,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1196 1506
1197 mdp->cur_tx++; 1507 mdp->cur_tx++;
1198 1508
1199 if (!(readl(ndev->base_addr + EDTRR) & EDTRR_TRNS)) 1509 if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
1200 writel(EDTRR_TRNS, ndev->base_addr + EDTRR); 1510 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1201 1511
1202 return NETDEV_TX_OK; 1512 return NETDEV_TX_OK;
1203} 1513}
@@ -1206,17 +1516,16 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1206static int sh_eth_close(struct net_device *ndev) 1516static int sh_eth_close(struct net_device *ndev)
1207{ 1517{
1208 struct sh_eth_private *mdp = netdev_priv(ndev); 1518 struct sh_eth_private *mdp = netdev_priv(ndev);
1209 u32 ioaddr = ndev->base_addr;
1210 int ringsize; 1519 int ringsize;
1211 1520
1212 netif_stop_queue(ndev); 1521 netif_stop_queue(ndev);
1213 1522
1214 /* Disable interrupts by clearing the interrupt mask. */ 1523 /* Disable interrupts by clearing the interrupt mask. */
1215 writel(0x0000, ioaddr + EESIPR); 1524 sh_eth_write(ndev, 0x0000, EESIPR);
1216 1525
1217 /* Stop the chip's Tx and Rx processes. */ 1526 /* Stop the chip's Tx and Rx processes. */
1218 writel(0, ioaddr + EDTRR); 1527 sh_eth_write(ndev, 0, EDTRR);
1219 writel(0, ioaddr + EDRRR); 1528 sh_eth_write(ndev, 0, EDRRR);
1220 1529
1221 /* PHY Disconnect */ 1530 /* PHY Disconnect */
1222 if (mdp->phydev) { 1531 if (mdp->phydev) {
@@ -1247,25 +1556,24 @@ static int sh_eth_close(struct net_device *ndev)
1247static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) 1556static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
1248{ 1557{
1249 struct sh_eth_private *mdp = netdev_priv(ndev); 1558 struct sh_eth_private *mdp = netdev_priv(ndev);
1250 u32 ioaddr = ndev->base_addr;
1251 1559
1252 pm_runtime_get_sync(&mdp->pdev->dev); 1560 pm_runtime_get_sync(&mdp->pdev->dev);
1253 1561
1254 mdp->stats.tx_dropped += readl(ioaddr + TROCR); 1562 mdp->stats.tx_dropped += sh_eth_read(ndev, TROCR);
1255 writel(0, ioaddr + TROCR); /* (write clear) */ 1563 sh_eth_write(ndev, 0, TROCR); /* (write clear) */
1256 mdp->stats.collisions += readl(ioaddr + CDCR); 1564 mdp->stats.collisions += sh_eth_read(ndev, CDCR);
1257 writel(0, ioaddr + CDCR); /* (write clear) */ 1565 sh_eth_write(ndev, 0, CDCR); /* (write clear) */
1258 mdp->stats.tx_carrier_errors += readl(ioaddr + LCCR); 1566 mdp->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
1259 writel(0, ioaddr + LCCR); /* (write clear) */ 1567 sh_eth_write(ndev, 0, LCCR); /* (write clear) */
1260#if defined(CONFIG_CPU_SUBTYPE_SH7763) 1568 if (sh_eth_is_gether(mdp)) {
1261 mdp->stats.tx_carrier_errors += readl(ioaddr + CERCR);/* CERCR */ 1569 mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
1262 writel(0, ioaddr + CERCR); /* (write clear) */ 1570 sh_eth_write(ndev, 0, CERCR); /* (write clear) */
1263 mdp->stats.tx_carrier_errors += readl(ioaddr + CEECR);/* CEECR */ 1571 mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
1264 writel(0, ioaddr + CEECR); /* (write clear) */ 1572 sh_eth_write(ndev, 0, CEECR); /* (write clear) */
1265#else 1573 } else {
1266 mdp->stats.tx_carrier_errors += readl(ioaddr + CNDCR); 1574 mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
1267 writel(0, ioaddr + CNDCR); /* (write clear) */ 1575 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
1268#endif 1576 }
1269 pm_runtime_put_sync(&mdp->pdev->dev); 1577 pm_runtime_put_sync(&mdp->pdev->dev);
1270 1578
1271 return &mdp->stats; 1579 return &mdp->stats;
@@ -1291,48 +1599,46 @@ static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
1291/* Multicast reception directions set */ 1599/* Multicast reception directions set */
1292static void sh_eth_set_multicast_list(struct net_device *ndev) 1600static void sh_eth_set_multicast_list(struct net_device *ndev)
1293{ 1601{
1294 u32 ioaddr = ndev->base_addr;
1295
1296 if (ndev->flags & IFF_PROMISC) { 1602 if (ndev->flags & IFF_PROMISC) {
1297 /* Set promiscuous. */ 1603 /* Set promiscuous. */
1298 writel((readl(ioaddr + ECMR) & ~ECMR_MCT) | ECMR_PRM, 1604 sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_MCT) |
1299 ioaddr + ECMR); 1605 ECMR_PRM, ECMR);
1300 } else { 1606 } else {
1301 /* Normal, unicast/broadcast-only mode. */ 1607 /* Normal, unicast/broadcast-only mode. */
1302 writel((readl(ioaddr + ECMR) & ~ECMR_PRM) | ECMR_MCT, 1608 sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) |
1303 ioaddr + ECMR); 1609 ECMR_MCT, ECMR);
1304 } 1610 }
1305} 1611}
1612#endif /* SH_ETH_HAS_TSU */
1306 1613
1307/* SuperH's TSU register init function */ 1614/* SuperH's TSU register init function */
1308static void sh_eth_tsu_init(u32 ioaddr) 1615static void sh_eth_tsu_init(struct sh_eth_private *mdp)
1309{ 1616{
1310 writel(0, ioaddr + TSU_FWEN0); /* Disable forward(0->1) */ 1617 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */
1311 writel(0, ioaddr + TSU_FWEN1); /* Disable forward(1->0) */ 1618 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */
1312 writel(0, ioaddr + TSU_FCM); /* forward fifo 3k-3k */ 1619 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */
1313 writel(0xc, ioaddr + TSU_BSYSL0); 1620 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
1314 writel(0xc, ioaddr + TSU_BSYSL1); 1621 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
1315 writel(0, ioaddr + TSU_PRISL0); 1622 sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
1316 writel(0, ioaddr + TSU_PRISL1); 1623 sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
1317 writel(0, ioaddr + TSU_FWSL0); 1624 sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
1318 writel(0, ioaddr + TSU_FWSL1); 1625 sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
1319 writel(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC); 1626 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
1320#if defined(CONFIG_CPU_SUBTYPE_SH7763) 1627 if (sh_eth_is_gether(mdp)) {
1321 writel(0, ioaddr + TSU_QTAG0); /* Disable QTAG(0->1) */ 1628 sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */
1322 writel(0, ioaddr + TSU_QTAG1); /* Disable QTAG(1->0) */ 1629 sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */
1323#else 1630 } else {
1324 writel(0, ioaddr + TSU_QTAGM0); /* Disable QTAG(0->1) */ 1631 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */
1325 writel(0, ioaddr + TSU_QTAGM1); /* Disable QTAG(1->0) */ 1632 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */
1326#endif 1633 }
1327 writel(0, ioaddr + TSU_FWSR); /* all interrupt status clear */ 1634 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */
1328 writel(0, ioaddr + TSU_FWINMK); /* Disable all interrupt */ 1635 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */
1329 writel(0, ioaddr + TSU_TEN); /* Disable all CAM entry */ 1636 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
1330 writel(0, ioaddr + TSU_POST1); /* Disable CAM entry [ 0- 7] */ 1637 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */
1331 writel(0, ioaddr + TSU_POST2); /* Disable CAM entry [ 8-15] */ 1638 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */
1332 writel(0, ioaddr + TSU_POST3); /* Disable CAM entry [16-23] */ 1639 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */
1333 writel(0, ioaddr + TSU_POST4); /* Disable CAM entry [24-31] */ 1640 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */
1334} 1641}
1335#endif /* SH_ETH_HAS_TSU */
1336 1642
1337/* MDIO bus release function */ 1643/* MDIO bus release function */
1338static int sh_mdio_release(struct net_device *ndev) 1644static int sh_mdio_release(struct net_device *ndev)
@@ -1355,7 +1661,8 @@ static int sh_mdio_release(struct net_device *ndev)
1355} 1661}
1356 1662
1357/* MDIO bus init function */ 1663/* MDIO bus init function */
1358static int sh_mdio_init(struct net_device *ndev, int id) 1664static int sh_mdio_init(struct net_device *ndev, int id,
1665 struct sh_eth_plat_data *pd)
1359{ 1666{
1360 int ret, i; 1667 int ret, i;
1361 struct bb_info *bitbang; 1668 struct bb_info *bitbang;
@@ -1369,7 +1676,8 @@ static int sh_mdio_init(struct net_device *ndev, int id)
1369 } 1676 }
1370 1677
1371 /* bitbang init */ 1678 /* bitbang init */
1372 bitbang->addr = ndev->base_addr + PIR; 1679 bitbang->addr = ndev->base_addr + mdp->reg_offset[PIR];
1680 bitbang->set_gate = pd->set_mdio_gate;
1373 bitbang->mdi_msk = 0x08; 1681 bitbang->mdi_msk = 0x08;
1374 bitbang->mdo_msk = 0x04; 1682 bitbang->mdo_msk = 0x04;
1375 bitbang->mmd_msk = 0x02;/* MMD */ 1683 bitbang->mmd_msk = 0x02;/* MMD */
@@ -1420,6 +1728,28 @@ out:
1420 return ret; 1728 return ret;
1421} 1729}
1422 1730
1731static const u16 *sh_eth_get_register_offset(int register_type)
1732{
1733 const u16 *reg_offset = NULL;
1734
1735 switch (register_type) {
1736 case SH_ETH_REG_GIGABIT:
1737 reg_offset = sh_eth_offset_gigabit;
1738 break;
1739 case SH_ETH_REG_FAST_SH4:
1740 reg_offset = sh_eth_offset_fast_sh4;
1741 break;
1742 case SH_ETH_REG_FAST_SH3_SH2:
1743 reg_offset = sh_eth_offset_fast_sh3_sh2;
1744 break;
1745 default:
1746 printk(KERN_ERR "Unknown register type (%d)\n", register_type);
1747 break;
1748 }
1749
1750 return reg_offset;
1751}
1752
1423static const struct net_device_ops sh_eth_netdev_ops = { 1753static const struct net_device_ops sh_eth_netdev_ops = {
1424 .ndo_open = sh_eth_open, 1754 .ndo_open = sh_eth_open,
1425 .ndo_stop = sh_eth_close, 1755 .ndo_stop = sh_eth_close,
@@ -1486,19 +1816,28 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1486 pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data); 1816 pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
1487 /* get PHY ID */ 1817 /* get PHY ID */
1488 mdp->phy_id = pd->phy; 1818 mdp->phy_id = pd->phy;
1819 mdp->phy_interface = pd->phy_interface;
1489 /* EDMAC endian */ 1820 /* EDMAC endian */
1490 mdp->edmac_endian = pd->edmac_endian; 1821 mdp->edmac_endian = pd->edmac_endian;
1491 mdp->no_ether_link = pd->no_ether_link; 1822 mdp->no_ether_link = pd->no_ether_link;
1492 mdp->ether_link_active_low = pd->ether_link_active_low; 1823 mdp->ether_link_active_low = pd->ether_link_active_low;
1824 mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
1493 1825
1494 /* set cpu data */ 1826 /* set cpu data */
1827#if defined(SH_ETH_HAS_BOTH_MODULES)
1828 mdp->cd = sh_eth_get_cpu_data(mdp);
1829#else
1495 mdp->cd = &sh_eth_my_cpu_data; 1830 mdp->cd = &sh_eth_my_cpu_data;
1831#endif
1496 sh_eth_set_default_cpu_data(mdp->cd); 1832 sh_eth_set_default_cpu_data(mdp->cd);
1497 1833
1498 /* set function */ 1834 /* set function */
1499 ndev->netdev_ops = &sh_eth_netdev_ops; 1835 ndev->netdev_ops = &sh_eth_netdev_ops;
1836 SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
1500 ndev->watchdog_timeo = TX_TIMEOUT; 1837 ndev->watchdog_timeo = TX_TIMEOUT;
1501 1838
1839 /* debug message level */
1840 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
1502 mdp->post_rx = POST_RX >> (devno << 1); 1841 mdp->post_rx = POST_RX >> (devno << 1);
1503 mdp->post_fw = POST_FW >> (devno << 1); 1842 mdp->post_fw = POST_FW >> (devno << 1);
1504 1843
@@ -1507,13 +1846,23 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1507 1846
1508 /* First device only init */ 1847 /* First device only init */
1509 if (!devno) { 1848 if (!devno) {
1849 if (mdp->cd->tsu) {
1850 struct resource *rtsu;
1851 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1852 if (!rtsu) {
1853 dev_err(&pdev->dev, "Not found TSU resource\n");
1854 goto out_release;
1855 }
1856 mdp->tsu_addr = ioremap(rtsu->start,
1857 resource_size(rtsu));
1858 }
1510 if (mdp->cd->chip_reset) 1859 if (mdp->cd->chip_reset)
1511 mdp->cd->chip_reset(ndev); 1860 mdp->cd->chip_reset(ndev);
1512 1861
1513#if defined(SH_ETH_HAS_TSU) 1862 if (mdp->cd->tsu) {
1514 /* TSU init (Init only)*/ 1863 /* TSU init (Init only)*/
1515 sh_eth_tsu_init(SH_TSU_ADDR); 1864 sh_eth_tsu_init(mdp);
1516#endif 1865 }
1517 } 1866 }
1518 1867
1519 /* network device register */ 1868 /* network device register */
@@ -1522,7 +1871,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1522 goto out_release; 1871 goto out_release;
1523 1872
1524 /* mdio bus init */ 1873 /* mdio bus init */
1525 ret = sh_mdio_init(ndev, pdev->id); 1874 ret = sh_mdio_init(ndev, pdev->id, pd);
1526 if (ret) 1875 if (ret)
1527 goto out_unregister; 1876 goto out_unregister;
1528 1877
@@ -1539,6 +1888,8 @@ out_unregister:
1539 1888
1540out_release: 1889out_release:
1541 /* net_dev free */ 1890 /* net_dev free */
1891 if (mdp->tsu_addr)
1892 iounmap(mdp->tsu_addr);
1542 if (ndev) 1893 if (ndev)
1543 free_netdev(ndev); 1894 free_netdev(ndev);
1544 1895
@@ -1549,7 +1900,9 @@ out:
1549static int sh_eth_drv_remove(struct platform_device *pdev) 1900static int sh_eth_drv_remove(struct platform_device *pdev)
1550{ 1901{
1551 struct net_device *ndev = platform_get_drvdata(pdev); 1902 struct net_device *ndev = platform_get_drvdata(pdev);
1903 struct sh_eth_private *mdp = netdev_priv(ndev);
1552 1904
1905 iounmap(mdp->tsu_addr);
1553 sh_mdio_release(ndev); 1906 sh_mdio_release(ndev);
1554 unregister_netdev(ndev); 1907 unregister_netdev(ndev);
1555 pm_runtime_disable(&pdev->dev); 1908 pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/sh_eth.h b/drivers/net/sh_eth.h
index efa64221eede..c3048a6ba676 100644
--- a/drivers/net/sh_eth.h
+++ b/drivers/net/sh_eth.h
@@ -2,7 +2,7 @@
2 * SuperH Ethernet device driver 2 * SuperH Ethernet device driver
3 * 3 *
4 * Copyright (C) 2006-2008 Nobuhiro Iwamatsu 4 * Copyright (C) 2006-2008 Nobuhiro Iwamatsu
5 * Copyright (C) 2008-2009 Renesas Solutions Corp. 5 * Copyright (C) 2008-2011 Renesas Solutions Corp.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License, 8 * under the terms and conditions of the GNU General Public License,
@@ -38,278 +38,340 @@
38#define ETHERSMALL 60 38#define ETHERSMALL 60
39#define PKT_BUF_SZ 1538 39#define PKT_BUF_SZ 1538
40 40
41#if defined(CONFIG_CPU_SUBTYPE_SH7763) 41enum {
42/* This CPU register maps is very difference by other SH4 CPU */ 42 /* E-DMAC registers */
43 43 EDSR = 0,
44/* Chip Base Address */ 44 EDMR,
45# define SH_TSU_ADDR 0xFEE01800 45 EDTRR,
46# define ARSTR SH_TSU_ADDR 46 EDRRR,
47 47 EESR,
48/* Chip Registers */ 48 EESIPR,
49/* E-DMAC */ 49 TDLAR,
50# define EDSR 0x000 50 TDFAR,
51# define EDMR 0x400 51 TDFXR,
52# define EDTRR 0x408 52 TDFFR,
53# define EDRRR 0x410 53 RDLAR,
54# define EESR 0x428 54 RDFAR,
55# define EESIPR 0x430 55 RDFXR,
56# define TDLAR 0x010 56 RDFFR,
57# define TDFAR 0x014 57 TRSCER,
58# define TDFXR 0x018 58 RMFCR,
59# define TDFFR 0x01C 59 TFTR,
60# define RDLAR 0x030 60 FDR,
61# define RDFAR 0x034 61 RMCR,
62# define RDFXR 0x038 62 EDOCR,
63# define RDFFR 0x03C 63 TFUCR,
64# define TRSCER 0x438 64 RFOCR,
65# define RMFCR 0x440 65 FCFTR,
66# define TFTR 0x448 66 RPADIR,
67# define FDR 0x450 67 TRIMD,
68# define RMCR 0x458 68 RBWAR,
69# define RPADIR 0x460 69 TBRAR,
70# define FCFTR 0x468 70
71 71 /* Ether registers */
72/* Ether Register */ 72 ECMR,
73# define ECMR 0x500 73 ECSR,
74# define ECSR 0x510 74 ECSIPR,
75# define ECSIPR 0x518 75 PIR,
76# define PIR 0x520 76 PSR,
77# define PSR 0x528 77 RDMLR,
78# define PIPR 0x52C 78 PIPR,
79# define RFLR 0x508 79 RFLR,
80# define APR 0x554 80 IPGR,
81# define MPR 0x558 81 APR,
82# define PFTCR 0x55C 82 MPR,
83# define PFRCR 0x560 83 PFTCR,
84# define TPAUSER 0x564 84 PFRCR,
85# define GECMR 0x5B0 85 RFCR,
86# define BCULR 0x5B4 86 RFCF,
87# define MAHR 0x5C0 87 TPAUSER,
88# define MALR 0x5C8 88 TPAUSECR,
89# define TROCR 0x700 89 BCFR,
90# define CDCR 0x708 90 BCFRR,
91# define LCCR 0x710 91 GECMR,
92# define CEFCR 0x740 92 BCULR,
93# define FRECR 0x748 93 MAHR,
94# define TSFRCR 0x750 94 MALR,
95# define TLFRCR 0x758 95 TROCR,
96# define RFCR 0x760 96 CDCR,
97# define CERCR 0x768 97 LCCR,
98# define CEECR 0x770 98 CNDCR,
99# define MAFCR 0x778 99 CEFCR,
100 100 FRECR,
101/* TSU Absolute Address */ 101 TSFRCR,
102# define TSU_CTRST 0x004 102 TLFRCR,
103# define TSU_FWEN0 0x010 103 CERCR,
104# define TSU_FWEN1 0x014 104 CEECR,
105# define TSU_FCM 0x18 105 MAFCR,
106# define TSU_BSYSL0 0x20 106 RTRATE,
107# define TSU_BSYSL1 0x24 107
108# define TSU_PRISL0 0x28 108 /* TSU Absolute address */
109# define TSU_PRISL1 0x2C 109 ARSTR,
110# define TSU_FWSL0 0x30 110 TSU_CTRST,
111# define TSU_FWSL1 0x34 111 TSU_FWEN0,
112# define TSU_FWSLC 0x38 112 TSU_FWEN1,
113# define TSU_QTAG0 0x40 113 TSU_FCM,
114# define TSU_QTAG1 0x44 114 TSU_BSYSL0,
115# define TSU_FWSR 0x50 115 TSU_BSYSL1,
116# define TSU_FWINMK 0x54 116 TSU_PRISL0,
117# define TSU_ADQT0 0x48 117 TSU_PRISL1,
118# define TSU_ADQT1 0x4C 118 TSU_FWSL0,
119# define TSU_VTAG0 0x58 119 TSU_FWSL1,
120# define TSU_VTAG1 0x5C 120 TSU_FWSLC,
121# define TSU_ADSBSY 0x60 121 TSU_QTAG0,
122# define TSU_TEN 0x64 122 TSU_QTAG1,
123# define TSU_POST1 0x70 123 TSU_QTAGM0,
124# define TSU_POST2 0x74 124 TSU_QTAGM1,
125# define TSU_POST3 0x78 125 TSU_FWSR,
126# define TSU_POST4 0x7C 126 TSU_FWINMK,
127# define TSU_ADRH0 0x100 127 TSU_ADQT0,
128# define TSU_ADRL0 0x104 128 TSU_ADQT1,
129# define TSU_ADRH31 0x1F8 129 TSU_VTAG0,
130# define TSU_ADRL31 0x1FC 130 TSU_VTAG1,
131 131 TSU_ADSBSY,
132# define TXNLCR0 0x80 132 TSU_TEN,
133# define TXALCR0 0x84 133 TSU_POST1,
134# define RXNLCR0 0x88 134 TSU_POST2,
135# define RXALCR0 0x8C 135 TSU_POST3,
136# define FWNLCR0 0x90 136 TSU_POST4,
137# define FWALCR0 0x94 137 TSU_ADRH0,
138# define TXNLCR1 0xA0 138 TSU_ADRL0,
139# define TXALCR1 0xA4 139 TSU_ADRH31,
140# define RXNLCR1 0xA8 140 TSU_ADRL31,
141# define RXALCR1 0xAC 141
142# define FWNLCR1 0xB0 142 TXNLCR0,
143# define FWALCR1 0x40 143 TXALCR0,
144 144 RXNLCR0,
145#elif defined(CONFIG_CPU_SH4) /* #if defined(CONFIG_CPU_SUBTYPE_SH7763) */ 145 RXALCR0,
146/* EtherC */ 146 FWNLCR0,
147#define ECMR 0x100 147 FWALCR0,
148#define RFLR 0x108 148 TXNLCR1,
149#define ECSR 0x110 149 TXALCR1,
150#define ECSIPR 0x118 150 RXNLCR1,
151#define PIR 0x120 151 RXALCR1,
152#define PSR 0x128 152 FWNLCR1,
153#define RDMLR 0x140 153 FWALCR1,
154#define IPGR 0x150 154
155#define APR 0x154 155 /* This value must be written at last. */
156#define MPR 0x158 156 SH_ETH_MAX_REGISTER_OFFSET,
157#define TPAUSER 0x164 157};
158#define RFCF 0x160 158
159#define TPAUSECR 0x168 159static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
160#define BCFRR 0x16c 160 [EDSR] = 0x0000,
161#define MAHR 0x1c0 161 [EDMR] = 0x0400,
162#define MALR 0x1c8 162 [EDTRR] = 0x0408,
163#define TROCR 0x1d0 163 [EDRRR] = 0x0410,
164#define CDCR 0x1d4 164 [EESR] = 0x0428,
165#define LCCR 0x1d8 165 [EESIPR] = 0x0430,
166#define CNDCR 0x1dc 166 [TDLAR] = 0x0010,
167#define CEFCR 0x1e4 167 [TDFAR] = 0x0014,
168#define FRECR 0x1e8 168 [TDFXR] = 0x0018,
169#define TSFRCR 0x1ec 169 [TDFFR] = 0x001c,
170#define TLFRCR 0x1f0 170 [RDLAR] = 0x0030,
171#define RFCR 0x1f4 171 [RDFAR] = 0x0034,
172#define MAFCR 0x1f8 172 [RDFXR] = 0x0038,
173#define RTRATE 0x1fc 173 [RDFFR] = 0x003c,
174 174 [TRSCER] = 0x0438,
175/* E-DMAC */ 175 [RMFCR] = 0x0440,
176#define EDMR 0x000 176 [TFTR] = 0x0448,
177#define EDTRR 0x008 177 [FDR] = 0x0450,
178#define EDRRR 0x010 178 [RMCR] = 0x0458,
179#define TDLAR 0x018 179 [RPADIR] = 0x0460,
180#define RDLAR 0x020 180 [FCFTR] = 0x0468,
181#define EESR 0x028 181
182#define EESIPR 0x030 182 [ECMR] = 0x0500,
183#define TRSCER 0x038 183 [ECSR] = 0x0510,
184#define RMFCR 0x040 184 [ECSIPR] = 0x0518,
185#define TFTR 0x048 185 [PIR] = 0x0520,
186#define FDR 0x050 186 [PSR] = 0x0528,
187#define RMCR 0x058 187 [PIPR] = 0x052c,
188#define TFUCR 0x064 188 [RFLR] = 0x0508,
189#define RFOCR 0x068 189 [APR] = 0x0554,
190#define FCFTR 0x070 190 [MPR] = 0x0558,
191#define RPADIR 0x078 191 [PFTCR] = 0x055c,
192#define TRIMD 0x07c 192 [PFRCR] = 0x0560,
193#define RBWAR 0x0c8 193 [TPAUSER] = 0x0564,
194#define RDFAR 0x0cc 194 [GECMR] = 0x05b0,
195#define TBRAR 0x0d4 195 [BCULR] = 0x05b4,
196#define TDFAR 0x0d8 196 [MAHR] = 0x05c0,
197#else /* #elif defined(CONFIG_CPU_SH4) */ 197 [MALR] = 0x05c8,
198/* This section is SH3 or SH2 */ 198 [TROCR] = 0x0700,
199#ifndef CONFIG_CPU_SUBTYPE_SH7619 199 [CDCR] = 0x0708,
200/* Chip base address */ 200 [LCCR] = 0x0710,
201# define SH_TSU_ADDR 0xA7000804 201 [CEFCR] = 0x0740,
202# define ARSTR 0xA7000800 202 [FRECR] = 0x0748,
203#endif 203 [TSFRCR] = 0x0750,
204/* Chip Registers */ 204 [TLFRCR] = 0x0758,
205/* E-DMAC */ 205 [RFCR] = 0x0760,
206# define EDMR 0x0000 206 [CERCR] = 0x0768,
207# define EDTRR 0x0004 207 [CEECR] = 0x0770,
208# define EDRRR 0x0008 208 [MAFCR] = 0x0778,
209# define TDLAR 0x000C 209
210# define RDLAR 0x0010 210 [ARSTR] = 0x0000,
211# define EESR 0x0014 211 [TSU_CTRST] = 0x0004,
212# define EESIPR 0x0018 212 [TSU_FWEN0] = 0x0010,
213# define TRSCER 0x001C 213 [TSU_FWEN1] = 0x0014,
214# define RMFCR 0x0020 214 [TSU_FCM] = 0x0018,
215# define TFTR 0x0024 215 [TSU_BSYSL0] = 0x0020,
216# define FDR 0x0028 216 [TSU_BSYSL1] = 0x0024,
217# define RMCR 0x002C 217 [TSU_PRISL0] = 0x0028,
218# define EDOCR 0x0030 218 [TSU_PRISL1] = 0x002c,
219# define FCFTR 0x0034 219 [TSU_FWSL0] = 0x0030,
220# define RPADIR 0x0038 220 [TSU_FWSL1] = 0x0034,
221# define TRIMD 0x003C 221 [TSU_FWSLC] = 0x0038,
222# define RBWAR 0x0040 222 [TSU_QTAG0] = 0x0040,
223# define RDFAR 0x0044 223 [TSU_QTAG1] = 0x0044,
224# define TBRAR 0x004C 224 [TSU_FWSR] = 0x0050,
225# define TDFAR 0x0050 225 [TSU_FWINMK] = 0x0054,
226 226 [TSU_ADQT0] = 0x0048,
227/* Ether Register */ 227 [TSU_ADQT1] = 0x004c,
228# define ECMR 0x0160 228 [TSU_VTAG0] = 0x0058,
229# define ECSR 0x0164 229 [TSU_VTAG1] = 0x005c,
230# define ECSIPR 0x0168 230 [TSU_ADSBSY] = 0x0060,
231# define PIR 0x016C 231 [TSU_TEN] = 0x0064,
232# define MAHR 0x0170 232 [TSU_POST1] = 0x0070,
233# define MALR 0x0174 233 [TSU_POST2] = 0x0074,
234# define RFLR 0x0178 234 [TSU_POST3] = 0x0078,
235# define PSR 0x017C 235 [TSU_POST4] = 0x007c,
236# define TROCR 0x0180 236 [TSU_ADRH0] = 0x0100,
237# define CDCR 0x0184 237 [TSU_ADRL0] = 0x0104,
238# define LCCR 0x0188 238 [TSU_ADRH31] = 0x01f8,
239# define CNDCR 0x018C 239 [TSU_ADRL31] = 0x01fc,
240# define CEFCR 0x0194 240
241# define FRECR 0x0198 241 [TXNLCR0] = 0x0080,
242# define TSFRCR 0x019C 242 [TXALCR0] = 0x0084,
243# define TLFRCR 0x01A0 243 [RXNLCR0] = 0x0088,
244# define RFCR 0x01A4 244 [RXALCR0] = 0x008c,
245# define MAFCR 0x01A8 245 [FWNLCR0] = 0x0090,
246# define IPGR 0x01B4 246 [FWALCR0] = 0x0094,
247# if defined(CONFIG_CPU_SUBTYPE_SH7710) 247 [TXNLCR1] = 0x00a0,
248# define APR 0x01B8 248 [TXALCR1] = 0x00a0,
249# define MPR 0x01BC 249 [RXNLCR1] = 0x00a8,
250# define TPAUSER 0x1C4 250 [RXALCR1] = 0x00ac,
251# define BCFR 0x1CC 251 [FWNLCR1] = 0x00b0,
252# endif /* CONFIG_CPU_SH7710 */ 252 [FWALCR1] = 0x00b4,
253 253};
254/* TSU */ 254
255# define TSU_CTRST 0x004 255static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
256# define TSU_FWEN0 0x010 256 [ECMR] = 0x0100,
257# define TSU_FWEN1 0x014 257 [RFLR] = 0x0108,
258# define TSU_FCM 0x018 258 [ECSR] = 0x0110,
259# define TSU_BSYSL0 0x020 259 [ECSIPR] = 0x0118,
260# define TSU_BSYSL1 0x024 260 [PIR] = 0x0120,
261# define TSU_PRISL0 0x028 261 [PSR] = 0x0128,
262# define TSU_PRISL1 0x02C 262 [RDMLR] = 0x0140,
263# define TSU_FWSL0 0x030 263 [IPGR] = 0x0150,
264# define TSU_FWSL1 0x034 264 [APR] = 0x0154,
265# define TSU_FWSLC 0x038 265 [MPR] = 0x0158,
266# define TSU_QTAGM0 0x040 266 [TPAUSER] = 0x0164,
267# define TSU_QTAGM1 0x044 267 [RFCF] = 0x0160,
268# define TSU_ADQT0 0x048 268 [TPAUSECR] = 0x0168,
269# define TSU_ADQT1 0x04C 269 [BCFRR] = 0x016c,
270# define TSU_FWSR 0x050 270 [MAHR] = 0x01c0,
271# define TSU_FWINMK 0x054 271 [MALR] = 0x01c8,
272# define TSU_ADSBSY 0x060 272 [TROCR] = 0x01d0,
273# define TSU_TEN 0x064 273 [CDCR] = 0x01d4,
274# define TSU_POST1 0x070 274 [LCCR] = 0x01d8,
275# define TSU_POST2 0x074 275 [CNDCR] = 0x01dc,
276# define TSU_POST3 0x078 276 [CEFCR] = 0x01e4,
277# define TSU_POST4 0x07C 277 [FRECR] = 0x01e8,
278# define TXNLCR0 0x080 278 [TSFRCR] = 0x01ec,
279# define TXALCR0 0x084 279 [TLFRCR] = 0x01f0,
280# define RXNLCR0 0x088 280 [RFCR] = 0x01f4,
281# define RXALCR0 0x08C 281 [MAFCR] = 0x01f8,
282# define FWNLCR0 0x090 282 [RTRATE] = 0x01fc,
283# define FWALCR0 0x094 283
284# define TXNLCR1 0x0A0 284 [EDMR] = 0x0000,
285# define TXALCR1 0x0A4 285 [EDTRR] = 0x0008,
286# define RXNLCR1 0x0A8 286 [EDRRR] = 0x0010,
287# define RXALCR1 0x0AC 287 [TDLAR] = 0x0018,
288# define FWNLCR1 0x0B0 288 [RDLAR] = 0x0020,
289# define FWALCR1 0x0B4 289 [EESR] = 0x0028,
290 290 [EESIPR] = 0x0030,
291#define TSU_ADRH0 0x0100 291 [TRSCER] = 0x0038,
292#define TSU_ADRL0 0x0104 292 [RMFCR] = 0x0040,
293#define TSU_ADRL31 0x01FC 293 [TFTR] = 0x0048,
294 294 [FDR] = 0x0050,
295#endif /* CONFIG_CPU_SUBTYPE_SH7763 */ 295 [RMCR] = 0x0058,
296 296 [TFUCR] = 0x0064,
297/* There are avoid compile error... */ 297 [RFOCR] = 0x0068,
298#if !defined(BCULR) 298 [FCFTR] = 0x0070,
299#define BCULR 0x0fc 299 [RPADIR] = 0x0078,
300#endif 300 [TRIMD] = 0x007c,
301#if !defined(TRIMD) 301 [RBWAR] = 0x00c8,
302#define TRIMD 0x0fc 302 [RDFAR] = 0x00cc,
303#endif 303 [TBRAR] = 0x00d4,
304#if !defined(APR) 304 [TDFAR] = 0x00d8,
305#define APR 0x0fc 305};
306#endif 306
307#if !defined(MPR) 307static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
308#define MPR 0x0fc 308 [ECMR] = 0x0160,
309#endif 309 [ECSR] = 0x0164,
310#if !defined(TPAUSER) 310 [ECSIPR] = 0x0168,
311#define TPAUSER 0x0fc 311 [PIR] = 0x016c,
312#endif 312 [MAHR] = 0x0170,
313 [MALR] = 0x0174,
314 [RFLR] = 0x0178,
315 [PSR] = 0x017c,
316 [TROCR] = 0x0180,
317 [CDCR] = 0x0184,
318 [LCCR] = 0x0188,
319 [CNDCR] = 0x018c,
320 [CEFCR] = 0x0194,
321 [FRECR] = 0x0198,
322 [TSFRCR] = 0x019c,
323 [TLFRCR] = 0x01a0,
324 [RFCR] = 0x01a4,
325 [MAFCR] = 0x01a8,
326 [IPGR] = 0x01b4,
327 [APR] = 0x01b8,
328 [MPR] = 0x01bc,
329 [TPAUSER] = 0x01c4,
330 [BCFR] = 0x01cc,
331
332 [ARSTR] = 0x0000,
333 [TSU_CTRST] = 0x0004,
334 [TSU_FWEN0] = 0x0010,
335 [TSU_FWEN1] = 0x0014,
336 [TSU_FCM] = 0x0018,
337 [TSU_BSYSL0] = 0x0020,
338 [TSU_BSYSL1] = 0x0024,
339 [TSU_PRISL0] = 0x0028,
340 [TSU_PRISL1] = 0x002c,
341 [TSU_FWSL0] = 0x0030,
342 [TSU_FWSL1] = 0x0034,
343 [TSU_FWSLC] = 0x0038,
344 [TSU_QTAGM0] = 0x0040,
345 [TSU_QTAGM1] = 0x0044,
346 [TSU_ADQT0] = 0x0048,
347 [TSU_ADQT1] = 0x004c,
348 [TSU_FWSR] = 0x0050,
349 [TSU_FWINMK] = 0x0054,
350 [TSU_ADSBSY] = 0x0060,
351 [TSU_TEN] = 0x0064,
352 [TSU_POST1] = 0x0070,
353 [TSU_POST2] = 0x0074,
354 [TSU_POST3] = 0x0078,
355 [TSU_POST4] = 0x007c,
356
357 [TXNLCR0] = 0x0080,
358 [TXALCR0] = 0x0084,
359 [RXNLCR0] = 0x0088,
360 [RXALCR0] = 0x008c,
361 [FWNLCR0] = 0x0090,
362 [FWALCR0] = 0x0094,
363 [TXNLCR1] = 0x00a0,
364 [TXALCR1] = 0x00a0,
365 [RXNLCR1] = 0x00a8,
366 [RXALCR1] = 0x00ac,
367 [FWNLCR1] = 0x00b0,
368 [FWALCR1] = 0x00b4,
369
370 [TSU_ADRH0] = 0x0100,
371 [TSU_ADRL0] = 0x0104,
372 [TSU_ADRL31] = 0x01fc,
373
374};
313 375
314/* Driver's parameters */ 376/* Driver's parameters */
315#if defined(CONFIG_CPU_SH4) 377#if defined(CONFIG_CPU_SH4)
@@ -338,20 +400,14 @@ enum GECMR_BIT {
338enum DMAC_M_BIT { 400enum DMAC_M_BIT {
339 EDMR_EL = 0x40, /* Litte endian */ 401 EDMR_EL = 0x40, /* Litte endian */
340 EDMR_DL1 = 0x20, EDMR_DL0 = 0x10, 402 EDMR_DL1 = 0x20, EDMR_DL0 = 0x10,
341#ifdef CONFIG_CPU_SUBTYPE_SH7763 403 EDMR_SRST_GETHER = 0x03,
342 EDMR_SRST = 0x03, 404 EDMR_SRST_ETHER = 0x01,
343#else /* CONFIG_CPU_SUBTYPE_SH7763 */
344 EDMR_SRST = 0x01,
345#endif
346}; 405};
347 406
348/* EDTRR */ 407/* EDTRR */
349enum DMAC_T_BIT { 408enum DMAC_T_BIT {
350#ifdef CONFIG_CPU_SUBTYPE_SH7763 409 EDTRR_TRNS_GETHER = 0x03,
351 EDTRR_TRNS = 0x03, 410 EDTRR_TRNS_ETHER = 0x01,
352#else
353 EDTRR_TRNS = 0x01,
354#endif
355}; 411};
356 412
357/* EDRRR*/ 413/* EDRRR*/
@@ -695,6 +751,7 @@ struct sh_eth_cpu_data {
695 unsigned mpr:1; /* EtherC have MPR */ 751 unsigned mpr:1; /* EtherC have MPR */
696 unsigned tpauser:1; /* EtherC have TPAUSER */ 752 unsigned tpauser:1; /* EtherC have TPAUSER */
697 unsigned bculr:1; /* EtherC have BCULR */ 753 unsigned bculr:1; /* EtherC have BCULR */
754 unsigned tsu:1; /* EtherC have TSU */
698 unsigned hw_swap:1; /* E-DMAC have DE bit in EDMR */ 755 unsigned hw_swap:1; /* E-DMAC have DE bit in EDMR */
699 unsigned rpadir:1; /* E-DMAC have RPADIR */ 756 unsigned rpadir:1; /* E-DMAC have RPADIR */
700 unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */ 757 unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */
@@ -704,6 +761,8 @@ struct sh_eth_cpu_data {
704struct sh_eth_private { 761struct sh_eth_private {
705 struct platform_device *pdev; 762 struct platform_device *pdev;
706 struct sh_eth_cpu_data *cd; 763 struct sh_eth_cpu_data *cd;
764 const u16 *reg_offset;
765 void __iomem *tsu_addr;
707 dma_addr_t rx_desc_dma; 766 dma_addr_t rx_desc_dma;
708 dma_addr_t tx_desc_dma; 767 dma_addr_t tx_desc_dma;
709 struct sh_eth_rxdesc *rx_ring; 768 struct sh_eth_rxdesc *rx_ring;
@@ -722,6 +781,7 @@ struct sh_eth_private {
722 struct mii_bus *mii_bus; /* MDIO bus control */ 781 struct mii_bus *mii_bus; /* MDIO bus control */
723 struct phy_device *phydev; /* PHY device control */ 782 struct phy_device *phydev; /* PHY device control */
724 enum phy_state link; 783 enum phy_state link;
784 phy_interface_t phy_interface;
725 int msg_enable; 785 int msg_enable;
726 int speed; 786 int speed;
727 int duplex; 787 int duplex;
@@ -746,4 +806,32 @@ static inline void sh_eth_soft_swap(char *src, int len)
746#endif 806#endif
747} 807}
748 808
809static inline void sh_eth_write(struct net_device *ndev, unsigned long data,
810 int enum_index)
811{
812 struct sh_eth_private *mdp = netdev_priv(ndev);
813
814 writel(data, ndev->base_addr + mdp->reg_offset[enum_index]);
815}
816
817static inline unsigned long sh_eth_read(struct net_device *ndev,
818 int enum_index)
819{
820 struct sh_eth_private *mdp = netdev_priv(ndev);
821
822 return readl(ndev->base_addr + mdp->reg_offset[enum_index]);
823}
824
825static inline void sh_eth_tsu_write(struct sh_eth_private *mdp,
826 unsigned long data, int enum_index)
827{
828 writel(data, mdp->tsu_addr + mdp->reg_offset[enum_index]);
829}
830
831static inline unsigned long sh_eth_tsu_read(struct sh_eth_private *mdp,
832 int enum_index)
833{
834 return readl(mdp->tsu_addr + mdp->reg_offset[enum_index]);
835}
836
749#endif /* #ifndef __SH_ETH_H__ */ 837#endif /* #ifndef __SH_ETH_H__ */
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 640e368ebeee..84d4167eee9a 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -495,7 +495,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
495 sis_priv->mii_info.reg_num_mask = 0x1f; 495 sis_priv->mii_info.reg_num_mask = 0x1f;
496 496
497 /* Get Mac address according to the chip revision */ 497 /* Get Mac address according to the chip revision */
498 pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &(sis_priv->chipset_rev)); 498 sis_priv->chipset_rev = pci_dev->revision;
499 if(netif_msg_probe(sis_priv)) 499 if(netif_msg_probe(sis_priv))
500 printk(KERN_DEBUG "%s: detected revision %2.2x, " 500 printk(KERN_DEBUG "%s: detected revision %2.2x, "
501 "trying to get MAC address...\n", 501 "trying to get MAC address...\n",
@@ -532,7 +532,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
532 /* save our host bridge revision */ 532 /* save our host bridge revision */
533 dev = pci_get_device(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_630, NULL); 533 dev = pci_get_device(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_630, NULL);
534 if (dev) { 534 if (dev) {
535 pci_read_config_byte(dev, PCI_CLASS_REVISION, &sis_priv->host_bridge_rev); 535 sis_priv->host_bridge_rev = dev->revision;
536 pci_dev_put(dev); 536 pci_dev_put(dev);
537 } 537 }
538 538
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 7d85a38377a1..2a91868788f7 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -4983,7 +4983,7 @@ static int sky2_suspend(struct device *dev)
4983 return 0; 4983 return 0;
4984} 4984}
4985 4985
4986#ifdef CONFIG_PM 4986#ifdef CONFIG_PM_SLEEP
4987static int sky2_resume(struct device *dev) 4987static int sky2_resume(struct device *dev)
4988{ 4988{
4989 struct pci_dev *pdev = to_pci_dev(dev); 4989 struct pci_dev *pdev = to_pci_dev(dev);
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 726df611ee17..43654a3bb0ec 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -81,6 +81,7 @@ static const char version[] =
81#include <linux/ethtool.h> 81#include <linux/ethtool.h>
82#include <linux/mii.h> 82#include <linux/mii.h>
83#include <linux/workqueue.h> 83#include <linux/workqueue.h>
84#include <linux/of.h>
84 85
85#include <linux/netdevice.h> 86#include <linux/netdevice.h>
86#include <linux/etherdevice.h> 87#include <linux/etherdevice.h>
@@ -2394,6 +2395,15 @@ static int smc_drv_resume(struct device *dev)
2394 return 0; 2395 return 0;
2395} 2396}
2396 2397
2398#ifdef CONFIG_OF
2399static const struct of_device_id smc91x_match[] = {
2400 { .compatible = "smsc,lan91c94", },
2401 { .compatible = "smsc,lan91c111", },
2402 {},
2403}
2404MODULE_DEVICE_TABLE(of, smc91x_match);
2405#endif
2406
2397static struct dev_pm_ops smc_drv_pm_ops = { 2407static struct dev_pm_ops smc_drv_pm_ops = {
2398 .suspend = smc_drv_suspend, 2408 .suspend = smc_drv_suspend,
2399 .resume = smc_drv_resume, 2409 .resume = smc_drv_resume,
@@ -2406,6 +2416,9 @@ static struct platform_driver smc_driver = {
2406 .name = CARDNAME, 2416 .name = CARDNAME,
2407 .owner = THIS_MODULE, 2417 .owner = THIS_MODULE,
2408 .pm = &smc_drv_pm_ops, 2418 .pm = &smc_drv_pm_ops,
2419#ifdef CONFIG_OF
2420 .of_match_table = smc91x_match,
2421#endif
2409 }, 2422 },
2410}; 2423};
2411 2424
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index d70bde95460b..1566259c1f27 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -791,8 +791,8 @@ static int smsc911x_mii_probe(struct net_device *dev)
791 return -ENODEV; 791 return -ENODEV;
792 } 792 }
793 793
794 SMSC_TRACE(PROBE, "PHY %d: addr %d, phy_id 0x%08X", 794 SMSC_TRACE(PROBE, "PHY: addr %d, phy_id 0x%08X",
795 phy_addr, phydev->addr, phydev->phy_id); 795 phydev->addr, phydev->phy_id);
796 796
797 ret = phy_connect_direct(dev, phydev, 797 ret = phy_connect_direct(dev, phydev,
798 &smsc911x_phy_adjust_link, 0, 798 &smsc911x_phy_adjust_link, 0,
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 1c5408f83937..c1a344829b54 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -320,28 +320,28 @@ static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s
320 320
321 if (txmac_stat & MAC_TXSTAT_URUN) { 321 if (txmac_stat & MAC_TXSTAT_URUN) {
322 netdev_err(dev, "TX MAC xmit underrun\n"); 322 netdev_err(dev, "TX MAC xmit underrun\n");
323 gp->net_stats.tx_fifo_errors++; 323 dev->stats.tx_fifo_errors++;
324 } 324 }
325 325
326 if (txmac_stat & MAC_TXSTAT_MPE) { 326 if (txmac_stat & MAC_TXSTAT_MPE) {
327 netdev_err(dev, "TX MAC max packet size error\n"); 327 netdev_err(dev, "TX MAC max packet size error\n");
328 gp->net_stats.tx_errors++; 328 dev->stats.tx_errors++;
329 } 329 }
330 330
331 /* The rest are all cases of one of the 16-bit TX 331 /* The rest are all cases of one of the 16-bit TX
332 * counters expiring. 332 * counters expiring.
333 */ 333 */
334 if (txmac_stat & MAC_TXSTAT_NCE) 334 if (txmac_stat & MAC_TXSTAT_NCE)
335 gp->net_stats.collisions += 0x10000; 335 dev->stats.collisions += 0x10000;
336 336
337 if (txmac_stat & MAC_TXSTAT_ECE) { 337 if (txmac_stat & MAC_TXSTAT_ECE) {
338 gp->net_stats.tx_aborted_errors += 0x10000; 338 dev->stats.tx_aborted_errors += 0x10000;
339 gp->net_stats.collisions += 0x10000; 339 dev->stats.collisions += 0x10000;
340 } 340 }
341 341
342 if (txmac_stat & MAC_TXSTAT_LCE) { 342 if (txmac_stat & MAC_TXSTAT_LCE) {
343 gp->net_stats.tx_aborted_errors += 0x10000; 343 dev->stats.tx_aborted_errors += 0x10000;
344 gp->net_stats.collisions += 0x10000; 344 dev->stats.collisions += 0x10000;
345 } 345 }
346 346
347 /* We do not keep track of MAC_TXSTAT_FCE and 347 /* We do not keep track of MAC_TXSTAT_FCE and
@@ -469,20 +469,20 @@ static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s
469 u32 smac = readl(gp->regs + MAC_SMACHINE); 469 u32 smac = readl(gp->regs + MAC_SMACHINE);
470 470
471 netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac); 471 netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac);
472 gp->net_stats.rx_over_errors++; 472 dev->stats.rx_over_errors++;
473 gp->net_stats.rx_fifo_errors++; 473 dev->stats.rx_fifo_errors++;
474 474
475 ret = gem_rxmac_reset(gp); 475 ret = gem_rxmac_reset(gp);
476 } 476 }
477 477
478 if (rxmac_stat & MAC_RXSTAT_ACE) 478 if (rxmac_stat & MAC_RXSTAT_ACE)
479 gp->net_stats.rx_frame_errors += 0x10000; 479 dev->stats.rx_frame_errors += 0x10000;
480 480
481 if (rxmac_stat & MAC_RXSTAT_CCE) 481 if (rxmac_stat & MAC_RXSTAT_CCE)
482 gp->net_stats.rx_crc_errors += 0x10000; 482 dev->stats.rx_crc_errors += 0x10000;
483 483
484 if (rxmac_stat & MAC_RXSTAT_LCE) 484 if (rxmac_stat & MAC_RXSTAT_LCE)
485 gp->net_stats.rx_length_errors += 0x10000; 485 dev->stats.rx_length_errors += 0x10000;
486 486
487 /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE 487 /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE
488 * events. 488 * events.
@@ -594,7 +594,7 @@ static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_stat
594 if (netif_msg_rx_err(gp)) 594 if (netif_msg_rx_err(gp))
595 printk(KERN_DEBUG "%s: no buffer for rx frame\n", 595 printk(KERN_DEBUG "%s: no buffer for rx frame\n",
596 gp->dev->name); 596 gp->dev->name);
597 gp->net_stats.rx_dropped++; 597 dev->stats.rx_dropped++;
598 } 598 }
599 599
600 if (gem_status & GREG_STAT_RXTAGERR) { 600 if (gem_status & GREG_STAT_RXTAGERR) {
@@ -602,7 +602,7 @@ static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_stat
602 if (netif_msg_rx_err(gp)) 602 if (netif_msg_rx_err(gp))
603 printk(KERN_DEBUG "%s: corrupt rx tag framing\n", 603 printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
604 gp->dev->name); 604 gp->dev->name);
605 gp->net_stats.rx_errors++; 605 dev->stats.rx_errors++;
606 606
607 goto do_reset; 607 goto do_reset;
608 } 608 }
@@ -684,7 +684,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st
684 break; 684 break;
685 } 685 }
686 gp->tx_skbs[entry] = NULL; 686 gp->tx_skbs[entry] = NULL;
687 gp->net_stats.tx_bytes += skb->len; 687 dev->stats.tx_bytes += skb->len;
688 688
689 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { 689 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
690 txd = &gp->init_block->txd[entry]; 690 txd = &gp->init_block->txd[entry];
@@ -696,7 +696,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st
696 entry = NEXT_TX(entry); 696 entry = NEXT_TX(entry);
697 } 697 }
698 698
699 gp->net_stats.tx_packets++; 699 dev->stats.tx_packets++;
700 dev_kfree_skb_irq(skb); 700 dev_kfree_skb_irq(skb);
701 } 701 }
702 gp->tx_old = entry; 702 gp->tx_old = entry;
@@ -738,6 +738,7 @@ static __inline__ void gem_post_rxds(struct gem *gp, int limit)
738 738
739static int gem_rx(struct gem *gp, int work_to_do) 739static int gem_rx(struct gem *gp, int work_to_do)
740{ 740{
741 struct net_device *dev = gp->dev;
741 int entry, drops, work_done = 0; 742 int entry, drops, work_done = 0;
742 u32 done; 743 u32 done;
743 __sum16 csum; 744 __sum16 csum;
@@ -782,15 +783,15 @@ static int gem_rx(struct gem *gp, int work_to_do)
782 783
783 len = (status & RXDCTRL_BUFSZ) >> 16; 784 len = (status & RXDCTRL_BUFSZ) >> 16;
784 if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) { 785 if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) {
785 gp->net_stats.rx_errors++; 786 dev->stats.rx_errors++;
786 if (len < ETH_ZLEN) 787 if (len < ETH_ZLEN)
787 gp->net_stats.rx_length_errors++; 788 dev->stats.rx_length_errors++;
788 if (len & RXDCTRL_BAD) 789 if (len & RXDCTRL_BAD)
789 gp->net_stats.rx_crc_errors++; 790 dev->stats.rx_crc_errors++;
790 791
791 /* We'll just return it to GEM. */ 792 /* We'll just return it to GEM. */
792 drop_it: 793 drop_it:
793 gp->net_stats.rx_dropped++; 794 dev->stats.rx_dropped++;
794 goto next; 795 goto next;
795 } 796 }
796 797
@@ -843,8 +844,8 @@ static int gem_rx(struct gem *gp, int work_to_do)
843 844
844 netif_receive_skb(skb); 845 netif_receive_skb(skb);
845 846
846 gp->net_stats.rx_packets++; 847 dev->stats.rx_packets++;
847 gp->net_stats.rx_bytes += len; 848 dev->stats.rx_bytes += len;
848 849
849 next: 850 next:
850 entry = NEXT_RX(entry); 851 entry = NEXT_RX(entry);
@@ -2472,7 +2473,6 @@ static int gem_resume(struct pci_dev *pdev)
2472static struct net_device_stats *gem_get_stats(struct net_device *dev) 2473static struct net_device_stats *gem_get_stats(struct net_device *dev)
2473{ 2474{
2474 struct gem *gp = netdev_priv(dev); 2475 struct gem *gp = netdev_priv(dev);
2475 struct net_device_stats *stats = &gp->net_stats;
2476 2476
2477 spin_lock_irq(&gp->lock); 2477 spin_lock_irq(&gp->lock);
2478 spin_lock(&gp->tx_lock); 2478 spin_lock(&gp->tx_lock);
@@ -2481,17 +2481,17 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev)
2481 * so we shield against this 2481 * so we shield against this
2482 */ 2482 */
2483 if (gp->running) { 2483 if (gp->running) {
2484 stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR); 2484 dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR);
2485 writel(0, gp->regs + MAC_FCSERR); 2485 writel(0, gp->regs + MAC_FCSERR);
2486 2486
2487 stats->rx_frame_errors += readl(gp->regs + MAC_AERR); 2487 dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR);
2488 writel(0, gp->regs + MAC_AERR); 2488 writel(0, gp->regs + MAC_AERR);
2489 2489
2490 stats->rx_length_errors += readl(gp->regs + MAC_LERR); 2490 dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR);
2491 writel(0, gp->regs + MAC_LERR); 2491 writel(0, gp->regs + MAC_LERR);
2492 2492
2493 stats->tx_aborted_errors += readl(gp->regs + MAC_ECOLL); 2493 dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
2494 stats->collisions += 2494 dev->stats.collisions +=
2495 (readl(gp->regs + MAC_ECOLL) + 2495 (readl(gp->regs + MAC_ECOLL) +
2496 readl(gp->regs + MAC_LCOLL)); 2496 readl(gp->regs + MAC_LCOLL));
2497 writel(0, gp->regs + MAC_ECOLL); 2497 writel(0, gp->regs + MAC_ECOLL);
@@ -2501,7 +2501,7 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev)
2501 spin_unlock(&gp->tx_lock); 2501 spin_unlock(&gp->tx_lock);
2502 spin_unlock_irq(&gp->lock); 2502 spin_unlock_irq(&gp->lock);
2503 2503
2504 return &gp->net_stats; 2504 return &dev->stats;
2505} 2505}
2506 2506
2507static int gem_set_mac_address(struct net_device *dev, void *addr) 2507static int gem_set_mac_address(struct net_device *dev, void *addr)
diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h
index 19905460def6..ede017872367 100644
--- a/drivers/net/sungem.h
+++ b/drivers/net/sungem.h
@@ -994,7 +994,6 @@ struct gem {
994 u32 status; 994 u32 status;
995 995
996 struct napi_struct napi; 996 struct napi_struct napi;
997 struct net_device_stats net_stats;
998 997
999 int tx_fifo_sz; 998 int tx_fifo_sz;
1000 int rx_fifo_sz; 999 int rx_fifo_sz;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 06c0e5033656..ebec88882c3b 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -4,7 +4,7 @@
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) 5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc. 6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2010 Broadcom Corporation. 7 * Copyright (C) 2005-2011 Broadcom Corporation.
8 * 8 *
9 * Firmware is: 9 * Firmware is:
10 * Derived from proprietary unpublished source code, 10 * Derived from proprietary unpublished source code,
@@ -64,10 +64,10 @@
64 64
65#define DRV_MODULE_NAME "tg3" 65#define DRV_MODULE_NAME "tg3"
66#define TG3_MAJ_NUM 3 66#define TG3_MAJ_NUM 3
67#define TG3_MIN_NUM 116 67#define TG3_MIN_NUM 117
68#define DRV_MODULE_VERSION \ 68#define DRV_MODULE_VERSION \
69 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) 69 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
70#define DRV_MODULE_RELDATE "December 3, 2010" 70#define DRV_MODULE_RELDATE "January 25, 2011"
71 71
72#define TG3_DEF_MAC_MODE 0 72#define TG3_DEF_MAC_MODE 0
73#define TG3_DEF_RX_MODE 0 73#define TG3_DEF_RX_MODE 0
@@ -1776,9 +1776,29 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1776 tg3_phy_cl45_read(tp, MDIO_MMD_AN, 1776 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1777 TG3_CL45_D7_EEERES_STAT, &val); 1777 TG3_CL45_D7_EEERES_STAT, &val);
1778 1778
1779 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || 1779 switch (val) {
1780 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) 1780 case TG3_CL45_D7_EEERES_STAT_LP_1000T:
1781 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
1782 case ASIC_REV_5717:
1783 case ASIC_REV_5719:
1784 case ASIC_REV_57765:
1785 /* Enable SM_DSP clock and tx 6dB coding. */
1786 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1787 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1788 MII_TG3_AUXCTL_ACTL_TX_6DB;
1789 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1790
1791 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
1792
1793 /* Turn off SM_DSP clock. */
1794 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1795 MII_TG3_AUXCTL_ACTL_TX_6DB;
1796 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1797 }
1798 /* Fallthrough */
1799 case TG3_CL45_D7_EEERES_STAT_LP_100TX:
1781 tp->setlpicnt = 2; 1800 tp->setlpicnt = 2;
1801 }
1782 } 1802 }
1783 1803
1784 if (!tp->setlpicnt) { 1804 if (!tp->setlpicnt) {
@@ -2100,7 +2120,7 @@ out:
2100 2120
2101static void tg3_frob_aux_power(struct tg3 *tp) 2121static void tg3_frob_aux_power(struct tg3 *tp)
2102{ 2122{
2103 struct tg3 *tp_peer = tp; 2123 bool need_vaux = false;
2104 2124
2105 /* The GPIOs do something completely different on 57765. */ 2125 /* The GPIOs do something completely different on 57765. */
2106 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 || 2126 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
@@ -2108,23 +2128,32 @@ static void tg3_frob_aux_power(struct tg3 *tp)
2108 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 2128 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2109 return; 2129 return;
2110 2130
2111 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || 2131 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2112 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 || 2132 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2113 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 2133 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) &&
2134 tp->pdev_peer != tp->pdev) {
2114 struct net_device *dev_peer; 2135 struct net_device *dev_peer;
2115 2136
2116 dev_peer = pci_get_drvdata(tp->pdev_peer); 2137 dev_peer = pci_get_drvdata(tp->pdev_peer);
2138
2117 /* remove_one() may have been run on the peer. */ 2139 /* remove_one() may have been run on the peer. */
2118 if (!dev_peer) 2140 if (dev_peer) {
2119 tp_peer = tp; 2141 struct tg3 *tp_peer = netdev_priv(dev_peer);
2120 else 2142
2121 tp_peer = netdev_priv(dev_peer); 2143 if (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE)
2144 return;
2145
2146 if ((tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) ||
2147 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF))
2148 need_vaux = true;
2149 }
2122 } 2150 }
2123 2151
2124 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 || 2152 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) ||
2125 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 || 2153 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2126 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 || 2154 need_vaux = true;
2127 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) { 2155
2156 if (need_vaux) {
2128 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 2157 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2129 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { 2158 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2130 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2159 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
@@ -2154,10 +2183,6 @@ static void tg3_frob_aux_power(struct tg3 *tp)
2154 u32 no_gpio2; 2183 u32 no_gpio2;
2155 u32 grc_local_ctrl = 0; 2184 u32 grc_local_ctrl = 0;
2156 2185
2157 if (tp_peer != tp &&
2158 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2159 return;
2160
2161 /* Workaround to prevent overdrawing Amps. */ 2186 /* Workaround to prevent overdrawing Amps. */
2162 if (GET_ASIC_REV(tp->pci_chip_rev_id) == 2187 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2163 ASIC_REV_5714) { 2188 ASIC_REV_5714) {
@@ -2196,10 +2221,6 @@ static void tg3_frob_aux_power(struct tg3 *tp)
2196 } else { 2221 } else {
2197 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && 2222 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2198 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { 2223 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2199 if (tp_peer != tp &&
2200 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2201 return;
2202
2203 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2224 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2204 (GRC_LCLCTRL_GPIO_OE1 | 2225 (GRC_LCLCTRL_GPIO_OE1 |
2205 GRC_LCLCTRL_GPIO_OUTPUT1), 100); 2226 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
@@ -2968,11 +2989,19 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
2968 MII_TG3_AUXCTL_ACTL_TX_6DB; 2989 MII_TG3_AUXCTL_ACTL_TX_6DB;
2969 tg3_writephy(tp, MII_TG3_AUX_CTRL, val); 2990 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2970 2991
2971 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 2992 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2972 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && 2993 case ASIC_REV_5717:
2973 !tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) 2994 case ASIC_REV_57765:
2974 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, 2995 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
2975 val | MII_TG3_DSP_CH34TP2_HIBW01); 2996 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
2997 MII_TG3_DSP_CH34TP2_HIBW01);
2998 /* Fall through */
2999 case ASIC_REV_5719:
3000 val = MII_TG3_DSP_TAP26_ALNOKO |
3001 MII_TG3_DSP_TAP26_RMRXSTO |
3002 MII_TG3_DSP_TAP26_OPCSINPT;
3003 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3004 }
2976 3005
2977 val = 0; 3006 val = 0;
2978 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 3007 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
@@ -7801,7 +7830,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7801 TG3_CPMU_DBTMR1_LNKIDLE_2047US); 7830 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7802 7831
7803 tw32_f(TG3_CPMU_EEE_DBTMR2, 7832 tw32_f(TG3_CPMU_EEE_DBTMR2,
7804 TG3_CPMU_DBTMR1_APE_TX_2047US | 7833 TG3_CPMU_DBTMR2_APE_TX_2047US |
7805 TG3_CPMU_DBTMR2_TXIDXEQ_2047US); 7834 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
7806 } 7835 }
7807 7836
@@ -8075,8 +8104,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8075 /* Program the jumbo buffer descriptor ring control 8104 /* Program the jumbo buffer descriptor ring control
8076 * blocks on those devices that have them. 8105 * blocks on those devices that have them.
8077 */ 8106 */
8078 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && 8107 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8079 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { 8108 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
8109 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))) {
8080 /* Setup replenish threshold. */ 8110 /* Setup replenish threshold. */
8081 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8); 8111 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
8082 8112
@@ -8163,10 +8193,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8163 RDMAC_MODE_MBUF_RBD_CRPT_ENAB | 8193 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8164 RDMAC_MODE_MBUF_SBD_CRPT_ENAB; 8194 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8165 8195
8166 /* If statement applies to 5705 and 5750 PCI devices only */ 8196 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8167 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && 8197 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8168 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
8169 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
8170 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE && 8198 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
8171 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { 8199 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8172 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; 8200 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
@@ -8194,8 +8222,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8194 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) { 8222 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
8195 val = tr32(TG3_RDMA_RSRVCTRL_REG); 8223 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8196 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { 8224 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
8197 val &= ~TG3_RDMA_RSRVCTRL_TXMRGN_MASK; 8225 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8198 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B; 8226 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8227 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8228 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8229 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8230 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8199 } 8231 }
8200 tw32(TG3_RDMA_RSRVCTRL_REG, 8232 tw32(TG3_RDMA_RSRVCTRL_REG,
8201 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 8233 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
@@ -8317,7 +8349,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8317 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 8349 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8318 udelay(100); 8350 udelay(100);
8319 8351
8320 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) { 8352 if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
8353 tp->irq_cnt > 1) {
8321 val = tr32(MSGINT_MODE); 8354 val = tr32(MSGINT_MODE);
8322 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE; 8355 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8323 tw32(MSGINT_MODE, val); 8356 tw32(MSGINT_MODE, val);
@@ -8334,17 +8367,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8334 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | 8367 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8335 WDMAC_MODE_LNGREAD_ENAB); 8368 WDMAC_MODE_LNGREAD_ENAB);
8336 8369
8337 /* If statement applies to 5705 and 5750 PCI devices only */ 8370 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8338 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && 8371 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8339 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
8340 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8341 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && 8372 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8342 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 || 8373 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8343 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) { 8374 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8344 /* nothing */ 8375 /* nothing */
8345 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && 8376 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8346 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) && 8377 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
8347 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8348 val |= WDMAC_MODE_RX_ACCEL; 8378 val |= WDMAC_MODE_RX_ACCEL;
8349 } 8379 }
8350 } 8380 }
@@ -9057,7 +9087,8 @@ static void tg3_ints_init(struct tg3 *tp)
9057 9087
9058 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) { 9088 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
9059 u32 msi_mode = tr32(MSGINT_MODE); 9089 u32 msi_mode = tr32(MSGINT_MODE);
9060 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) 9090 if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
9091 tp->irq_cnt > 1)
9061 msi_mode |= MSGINT_MODE_MULTIVEC_EN; 9092 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9062 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); 9093 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9063 } 9094 }
@@ -10452,16 +10483,53 @@ static int tg3_test_nvram(struct tg3 *tp)
10452 goto out; 10483 goto out;
10453 } 10484 }
10454 10485
10486 err = -EIO;
10487
10455 /* Bootstrap checksum at offset 0x10 */ 10488 /* Bootstrap checksum at offset 0x10 */
10456 csum = calc_crc((unsigned char *) buf, 0x10); 10489 csum = calc_crc((unsigned char *) buf, 0x10);
10457 if (csum != be32_to_cpu(buf[0x10/4])) 10490 if (csum != le32_to_cpu(buf[0x10/4]))
10458 goto out; 10491 goto out;
10459 10492
10460 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ 10493 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10461 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); 10494 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10462 if (csum != be32_to_cpu(buf[0xfc/4])) 10495 if (csum != le32_to_cpu(buf[0xfc/4]))
10463 goto out; 10496 goto out;
10464 10497
10498 for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
10499 /* The data is in little-endian format in NVRAM.
10500 * Use the big-endian read routines to preserve
10501 * the byte order as it exists in NVRAM.
10502 */
10503 if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &buf[i/4]))
10504 goto out;
10505 }
10506
10507 i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10508 PCI_VPD_LRDT_RO_DATA);
10509 if (i > 0) {
10510 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10511 if (j < 0)
10512 goto out;
10513
10514 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10515 goto out;
10516
10517 i += PCI_VPD_LRDT_TAG_SIZE;
10518 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10519 PCI_VPD_RO_KEYWORD_CHKSUM);
10520 if (j > 0) {
10521 u8 csum8 = 0;
10522
10523 j += PCI_VPD_INFO_FLD_HDR_SIZE;
10524
10525 for (i = 0; i <= j; i++)
10526 csum8 += ((u8 *)buf)[i];
10527
10528 if (csum8)
10529 goto out;
10530 }
10531 }
10532
10465 err = 0; 10533 err = 0;
10466 10534
10467out: 10535out:
@@ -10833,13 +10901,16 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10833 if (loopback_mode == TG3_MAC_LOOPBACK) { 10901 if (loopback_mode == TG3_MAC_LOOPBACK) {
10834 /* HW errata - mac loopback fails in some cases on 5780. 10902 /* HW errata - mac loopback fails in some cases on 5780.
10835 * Normal traffic and PHY loopback are not affected by 10903 * Normal traffic and PHY loopback are not affected by
10836 * errata. 10904 * errata. Also, the MAC loopback test is deprecated for
10905 * all newer ASIC revisions.
10837 */ 10906 */
10838 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) 10907 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10908 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
10839 return 0; 10909 return 0;
10840 10910
10841 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | 10911 mac_mode = tp->mac_mode &
10842 MAC_MODE_PORT_INT_LPBACK; 10912 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
10913 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
10843 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 10914 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10844 mac_mode |= MAC_MODE_LINK_POLARITY; 10915 mac_mode |= MAC_MODE_LINK_POLARITY;
10845 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 10916 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
@@ -10861,7 +10932,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10861 tg3_writephy(tp, MII_BMCR, val); 10932 tg3_writephy(tp, MII_BMCR, val);
10862 udelay(40); 10933 udelay(40);
10863 10934
10864 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; 10935 mac_mode = tp->mac_mode &
10936 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
10865 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 10937 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
10866 tg3_writephy(tp, MII_TG3_FET_PTEST, 10938 tg3_writephy(tp, MII_TG3_FET_PTEST,
10867 MII_TG3_FET_PTEST_FRC_TX_LINK | 10939 MII_TG3_FET_PTEST_FRC_TX_LINK |
@@ -10889,6 +10961,13 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10889 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 10961 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10890 } 10962 }
10891 tw32(MAC_MODE, mac_mode); 10963 tw32(MAC_MODE, mac_mode);
10964
10965 /* Wait for link */
10966 for (i = 0; i < 100; i++) {
10967 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
10968 break;
10969 mdelay(1);
10970 }
10892 } else { 10971 } else {
10893 return -EINVAL; 10972 return -EINVAL;
10894 } 10973 }
@@ -10995,14 +11074,19 @@ out:
10995static int tg3_test_loopback(struct tg3 *tp) 11074static int tg3_test_loopback(struct tg3 *tp)
10996{ 11075{
10997 int err = 0; 11076 int err = 0;
10998 u32 cpmuctrl = 0; 11077 u32 eee_cap, cpmuctrl = 0;
10999 11078
11000 if (!netif_running(tp->dev)) 11079 if (!netif_running(tp->dev))
11001 return TG3_LOOPBACK_FAILED; 11080 return TG3_LOOPBACK_FAILED;
11002 11081
11082 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11083 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11084
11003 err = tg3_reset_hw(tp, 1); 11085 err = tg3_reset_hw(tp, 1);
11004 if (err) 11086 if (err) {
11005 return TG3_LOOPBACK_FAILED; 11087 err = TG3_LOOPBACK_FAILED;
11088 goto done;
11089 }
11006 11090
11007 /* Turn off gphy autopowerdown. */ 11091 /* Turn off gphy autopowerdown. */
11008 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 11092 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
@@ -11022,8 +11106,10 @@ static int tg3_test_loopback(struct tg3 *tp)
11022 udelay(10); 11106 udelay(10);
11023 } 11107 }
11024 11108
11025 if (status != CPMU_MUTEX_GNT_DRIVER) 11109 if (status != CPMU_MUTEX_GNT_DRIVER) {
11026 return TG3_LOOPBACK_FAILED; 11110 err = TG3_LOOPBACK_FAILED;
11111 goto done;
11112 }
11027 11113
11028 /* Turn off link-based power management. */ 11114 /* Turn off link-based power management. */
11029 cpmuctrl = tr32(TG3_CPMU_CTRL); 11115 cpmuctrl = tr32(TG3_CPMU_CTRL);
@@ -11052,6 +11138,9 @@ static int tg3_test_loopback(struct tg3 *tp)
11052 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 11138 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11053 tg3_phy_toggle_apd(tp, true); 11139 tg3_phy_toggle_apd(tp, true);
11054 11140
11141done:
11142 tp->phy_flags |= eee_cap;
11143
11055 return err; 11144 return err;
11056} 11145}
11057 11146
@@ -12407,9 +12496,11 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12407 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN; 12496 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
12408 } 12497 }
12409done: 12498done:
12410 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP); 12499 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
12411 device_set_wakeup_enable(&tp->pdev->dev, 12500 device_set_wakeup_enable(&tp->pdev->dev,
12412 tp->tg3_flags & TG3_FLAG_WOL_ENABLE); 12501 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
12502 else
12503 device_set_wakeup_capable(&tp->pdev->dev, false);
12413} 12504}
12414 12505
12415static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd) 12506static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
@@ -12461,12 +12552,45 @@ static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12461 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16); 12552 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12462} 12553}
12463 12554
12555static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
12556{
12557 u32 adv = ADVERTISED_Autoneg |
12558 ADVERTISED_Pause;
12559
12560 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12561 adv |= ADVERTISED_1000baseT_Half |
12562 ADVERTISED_1000baseT_Full;
12563
12564 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12565 adv |= ADVERTISED_100baseT_Half |
12566 ADVERTISED_100baseT_Full |
12567 ADVERTISED_10baseT_Half |
12568 ADVERTISED_10baseT_Full |
12569 ADVERTISED_TP;
12570 else
12571 adv |= ADVERTISED_FIBRE;
12572
12573 tp->link_config.advertising = adv;
12574 tp->link_config.speed = SPEED_INVALID;
12575 tp->link_config.duplex = DUPLEX_INVALID;
12576 tp->link_config.autoneg = AUTONEG_ENABLE;
12577 tp->link_config.active_speed = SPEED_INVALID;
12578 tp->link_config.active_duplex = DUPLEX_INVALID;
12579 tp->link_config.orig_speed = SPEED_INVALID;
12580 tp->link_config.orig_duplex = DUPLEX_INVALID;
12581 tp->link_config.orig_autoneg = AUTONEG_INVALID;
12582}
12583
12464static int __devinit tg3_phy_probe(struct tg3 *tp) 12584static int __devinit tg3_phy_probe(struct tg3 *tp)
12465{ 12585{
12466 u32 hw_phy_id_1, hw_phy_id_2; 12586 u32 hw_phy_id_1, hw_phy_id_2;
12467 u32 hw_phy_id, hw_phy_id_masked; 12587 u32 hw_phy_id, hw_phy_id_masked;
12468 int err; 12588 int err;
12469 12589
12590 /* flow control autonegotiation is default behavior */
12591 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12592 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
12593
12470 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) 12594 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
12471 return tg3_phy_init(tp); 12595 return tg3_phy_init(tp);
12472 12596
@@ -12528,6 +12652,8 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
12528 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0))) 12652 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
12529 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; 12653 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
12530 12654
12655 tg3_phy_init_link_config(tp);
12656
12531 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 12657 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12532 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) && 12658 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
12533 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { 12659 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
@@ -12583,17 +12709,6 @@ skip_phy_reset:
12583 err = tg3_init_5401phy_dsp(tp); 12709 err = tg3_init_5401phy_dsp(tp);
12584 } 12710 }
12585 12711
12586 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12587 tp->link_config.advertising =
12588 (ADVERTISED_1000baseT_Half |
12589 ADVERTISED_1000baseT_Full |
12590 ADVERTISED_Autoneg |
12591 ADVERTISED_FIBRE);
12592 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
12593 tp->link_config.advertising &=
12594 ~(ADVERTISED_1000baseT_Half |
12595 ADVERTISED_1000baseT_Full);
12596
12597 return err; 12712 return err;
12598} 12713}
12599 12714
@@ -13020,7 +13135,7 @@ static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13020 return 512; 13135 return 512;
13021} 13136}
13022 13137
13023DEFINE_PCI_DEVICE_TABLE(write_reorder_chipsets) = { 13138static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13024 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, 13139 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13025 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) }, 13140 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13026 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, 13141 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
@@ -13262,7 +13377,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13262 } 13377 }
13263 13378
13264 /* Determine TSO capabilities */ 13379 /* Determine TSO capabilities */
13265 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) 13380 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13381 ; /* Do nothing. HW bug. */
13382 else if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
13266 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3; 13383 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
13267 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || 13384 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13268 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 13385 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
@@ -13313,7 +13430,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13313 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG; 13430 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
13314 } 13431 }
13315 13432
13316 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) 13433 if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
13434 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13317 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG; 13435 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
13318 13436
13319 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || 13437 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
@@ -13331,42 +13449,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13331 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; 13449 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13332 13450
13333 tp->pcie_readrq = 4096; 13451 tp->pcie_readrq = 4096;
13334 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { 13452 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13335 u16 word; 13453 tp->pcie_readrq = 2048;
13336
13337 pci_read_config_word(tp->pdev,
13338 tp->pcie_cap + PCI_EXP_LNKSTA,
13339 &word);
13340 switch (word & PCI_EXP_LNKSTA_CLS) {
13341 case PCI_EXP_LNKSTA_CLS_2_5GB:
13342 word &= PCI_EXP_LNKSTA_NLW;
13343 word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
13344 switch (word) {
13345 case 2:
13346 tp->pcie_readrq = 2048;
13347 break;
13348 case 4:
13349 tp->pcie_readrq = 1024;
13350 break;
13351 }
13352 break;
13353
13354 case PCI_EXP_LNKSTA_CLS_5_0GB:
13355 word &= PCI_EXP_LNKSTA_NLW;
13356 word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
13357 switch (word) {
13358 case 1:
13359 tp->pcie_readrq = 2048;
13360 break;
13361 case 2:
13362 tp->pcie_readrq = 1024;
13363 break;
13364 case 4:
13365 tp->pcie_readrq = 512;
13366 break;
13367 }
13368 }
13369 }
13370 13454
13371 pcie_set_readrq(tp->pdev, tp->pcie_readrq); 13455 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13372 13456
@@ -13405,7 +13489,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13405 * every mailbox register write to force the writes to be 13489 * every mailbox register write to force the writes to be
13406 * posted to the chip in order. 13490 * posted to the chip in order.
13407 */ 13491 */
13408 if (pci_dev_present(write_reorder_chipsets) && 13492 if (pci_dev_present(tg3_write_reorder_chipsets) &&
13409 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) 13493 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13410 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER; 13494 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
13411 13495
@@ -14161,7 +14245,7 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm
14161 14245
14162#define TEST_BUFFER_SIZE 0x2000 14246#define TEST_BUFFER_SIZE 0x2000
14163 14247
14164DEFINE_PCI_DEVICE_TABLE(dma_wait_state_chipsets) = { 14248static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14165 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, 14249 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14166 { }, 14250 { },
14167}; 14251};
@@ -14340,7 +14424,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
14340 * now look for chipsets that are known to expose the 14424 * now look for chipsets that are known to expose the
14341 * DMA bug without failing the test. 14425 * DMA bug without failing the test.
14342 */ 14426 */
14343 if (pci_dev_present(dma_wait_state_chipsets)) { 14427 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14344 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 14428 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14345 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 14429 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14346 } else { 14430 } else {
@@ -14357,23 +14441,6 @@ out_nofree:
14357 return ret; 14441 return ret;
14358} 14442}
14359 14443
14360static void __devinit tg3_init_link_config(struct tg3 *tp)
14361{
14362 tp->link_config.advertising =
14363 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
14364 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
14365 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
14366 ADVERTISED_Autoneg | ADVERTISED_MII);
14367 tp->link_config.speed = SPEED_INVALID;
14368 tp->link_config.duplex = DUPLEX_INVALID;
14369 tp->link_config.autoneg = AUTONEG_ENABLE;
14370 tp->link_config.active_speed = SPEED_INVALID;
14371 tp->link_config.active_duplex = DUPLEX_INVALID;
14372 tp->link_config.orig_speed = SPEED_INVALID;
14373 tp->link_config.orig_duplex = DUPLEX_INVALID;
14374 tp->link_config.orig_autoneg = AUTONEG_INVALID;
14375}
14376
14377static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) 14444static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14378{ 14445{
14379 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) { 14446 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
@@ -14677,8 +14744,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14677 goto err_out_free_dev; 14744 goto err_out_free_dev;
14678 } 14745 }
14679 14746
14680 tg3_init_link_config(tp);
14681
14682 tp->rx_pending = TG3_DEF_RX_RING_PENDING; 14747 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
14683 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; 14748 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
14684 14749
@@ -14826,10 +14891,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14826 goto err_out_apeunmap; 14891 goto err_out_apeunmap;
14827 } 14892 }
14828 14893
14829 /* flow control autonegotiation is default behavior */
14830 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
14831 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14832
14833 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; 14894 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
14834 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; 14895 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
14835 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; 14896 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index f528243e1a4f..73884b69b749 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -4,7 +4,7 @@
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com) 5 * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc. 6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2007-2010 Broadcom Corporation. 7 * Copyright (C) 2007-2011 Broadcom Corporation.
8 */ 8 */
9 9
10#ifndef _T3_H 10#ifndef _T3_H
@@ -141,6 +141,7 @@
141#define CHIPREV_ID_57780_A1 0x57780001 141#define CHIPREV_ID_57780_A1 0x57780001
142#define CHIPREV_ID_5717_A0 0x05717000 142#define CHIPREV_ID_5717_A0 0x05717000
143#define CHIPREV_ID_57765_A0 0x57785000 143#define CHIPREV_ID_57765_A0 0x57785000
144#define CHIPREV_ID_5719_A0 0x05719000
144#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12) 145#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12)
145#define ASIC_REV_5700 0x07 146#define ASIC_REV_5700 0x07
146#define ASIC_REV_5701 0x00 147#define ASIC_REV_5701 0x00
@@ -1105,7 +1106,7 @@
1105#define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000 1106#define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000
1106#define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000070ff 1107#define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000070ff
1107#define TG3_CPMU_EEE_DBTMR2 0x000036b8 1108#define TG3_CPMU_EEE_DBTMR2 0x000036b8
1108#define TG3_CPMU_DBTMR1_APE_TX_2047US 0x07ff0000 1109#define TG3_CPMU_DBTMR2_APE_TX_2047US 0x07ff0000
1109#define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000070ff 1110#define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000070ff
1110#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc 1111#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc
1111#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000 1112#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000
@@ -1333,6 +1334,10 @@
1333 1334
1334#define TG3_RDMA_RSRVCTRL_REG 0x00004900 1335#define TG3_RDMA_RSRVCTRL_REG 0x00004900
1335#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004 1336#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004
1337#define TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K 0x00000c00
1338#define TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK 0x00000ff0
1339#define TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K 0x000c0000
1340#define TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK 0x000ff000
1336#define TG3_RDMA_RSRVCTRL_TXMRGN_320B 0x28000000 1341#define TG3_RDMA_RSRVCTRL_TXMRGN_320B 0x28000000
1337#define TG3_RDMA_RSRVCTRL_TXMRGN_MASK 0xffe00000 1342#define TG3_RDMA_RSRVCTRL_TXMRGN_MASK 0xffe00000
1338/* 0x4904 --> 0x4910 unused */ 1343/* 0x4904 --> 0x4910 unused */
@@ -2108,6 +2113,10 @@
2108 2113
2109#define MII_TG3_DSP_TAP1 0x0001 2114#define MII_TG3_DSP_TAP1 0x0001
2110#define MII_TG3_DSP_TAP1_AGCTGT_DFLT 0x0007 2115#define MII_TG3_DSP_TAP1_AGCTGT_DFLT 0x0007
2116#define MII_TG3_DSP_TAP26 0x001a
2117#define MII_TG3_DSP_TAP26_ALNOKO 0x0001
2118#define MII_TG3_DSP_TAP26_RMRXSTO 0x0002
2119#define MII_TG3_DSP_TAP26_OPCSINPT 0x0004
2111#define MII_TG3_DSP_AADJ1CH0 0x001f 2120#define MII_TG3_DSP_AADJ1CH0 0x001f
2112#define MII_TG3_DSP_CH34TP2 0x4022 2121#define MII_TG3_DSP_CH34TP2 0x4022
2113#define MII_TG3_DSP_CH34TP2_HIBW01 0x0010 2122#define MII_TG3_DSP_CH34TP2_HIBW01 0x0010
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index f8e463cd8ecc..ace6404e2fac 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -25,150 +25,9 @@
25 * Microchip Technology, 24C01A/02A/04A Data Sheet 25 * Microchip Technology, 24C01A/02A/04A Data Sheet
26 * available in PDF format from www.microchip.com 26 * available in PDF format from www.microchip.com
27 * 27 *
28 * Change History 28 ******************************************************************************/
29 * 29
30 * Tigran Aivazian <tigran@sco.com>: TLan_PciProbe() now uses 30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 * new PCI BIOS interface.
32 * Alan Cox <alan@lxorguk.ukuu.org.uk>:
33 * Fixed the out of memory
34 * handling.
35 *
36 * Torben Mathiasen <torben.mathiasen@compaq.com> New Maintainer!
37 *
38 * v1.1 Dec 20, 1999 - Removed linux version checking
39 * Patch from Tigran Aivazian.
40 * - v1.1 includes Alan's SMP updates.
41 * - We still have problems on SMP though,
42 * but I'm looking into that.
43 *
44 * v1.2 Jan 02, 2000 - Hopefully fixed the SMP deadlock.
45 * - Removed dependency of HZ being 100.
46 * - We now allow higher priority timers to
47 * overwrite timers like TLAN_TIMER_ACTIVITY
48 * Patch from John Cagle <john.cagle@compaq.com>.
49 * - Fixed a few compiler warnings.
50 *
51 * v1.3 Feb 04, 2000 - Fixed the remaining HZ issues.
52 * - Removed call to pci_present().
53 * - Removed SA_INTERRUPT flag from irq handler.
54 * - Added __init and __initdata to reduce resisdent
55 * code size.
56 * - Driver now uses module_init/module_exit.
57 * - Rewrote init_module and tlan_probe to
58 * share a lot more code. We now use tlan_probe
59 * with builtin and module driver.
60 * - Driver ported to new net API.
61 * - tlan.txt has been reworked to reflect current
62 * driver (almost)
63 * - Other minor stuff
64 *
65 * v1.4 Feb 10, 2000 - Updated with more changes required after Dave's
66 * network cleanup in 2.3.43pre7 (Tigran & myself)
67 * - Minor stuff.
68 *
69 * v1.5 March 22, 2000 - Fixed another timer bug that would hang the driver
70 * if no cable/link were present.
71 * - Cosmetic changes.
72 * - TODO: Port completely to new PCI/DMA API
73 * Auto-Neg fallback.
74 *
75 * v1.6 April 04, 2000 - Fixed driver support for kernel-parameters. Haven't
76 * tested it though, as the kernel support is currently
77 * broken (2.3.99p4p3).
78 * - Updated tlan.txt accordingly.
79 * - Adjusted minimum/maximum frame length.
80 * - There is now a TLAN website up at
81 * http://hp.sourceforge.net/
82 *
83 * v1.7 April 07, 2000 - Started to implement custom ioctls. Driver now
84 * reports PHY information when used with Donald
85 * Beckers userspace MII diagnostics utility.
86 *
87 * v1.8 April 23, 2000 - Fixed support for forced speed/duplex settings.
88 * - Added link information to Auto-Neg and forced
89 * modes. When NIC operates with auto-neg the driver
90 * will report Link speed & duplex modes as well as
91 * link partner abilities. When forced link is used,
92 * the driver will report status of the established
93 * link.
94 * Please read tlan.txt for additional information.
95 * - Removed call to check_region(), and used
96 * return value of request_region() instead.
97 *
98 * v1.8a May 28, 2000 - Minor updates.
99 *
100 * v1.9 July 25, 2000 - Fixed a few remaining Full-Duplex issues.
101 * - Updated with timer fixes from Andrew Morton.
102 * - Fixed module race in TLan_Open.
103 * - Added routine to monitor PHY status.
104 * - Added activity led support for Proliant devices.
105 *
106 * v1.10 Aug 30, 2000 - Added support for EISA based tlan controllers
107 * like the Compaq NetFlex3/E.
108 * - Rewrote tlan_probe to better handle multiple
109 * bus probes. Probing and device setup is now
110 * done through TLan_Probe and TLan_init_one. Actual
111 * hardware probe is done with kernel API and
112 * TLan_EisaProbe.
113 * - Adjusted debug information for probing.
114 * - Fixed bug that would cause general debug information
115 * to be printed after driver removal.
116 * - Added transmit timeout handling.
117 * - Fixed OOM return values in tlan_probe.
118 * - Fixed possible mem leak in tlan_exit
119 * (now tlan_remove_one).
120 * - Fixed timer bug in TLan_phyMonitor.
121 * - This driver version is alpha quality, please
122 * send me any bug issues you may encounter.
123 *
124 * v1.11 Aug 31, 2000 - Do not try to register irq 0 if no irq line was
125 * set for EISA cards.
126 * - Added support for NetFlex3/E with nibble-rate
127 * 10Base-T PHY. This is untestet as I haven't got
128 * one of these cards.
129 * - Fixed timer being added twice.
130 * - Disabled PhyMonitoring by default as this is
131 * work in progress. Define MONITOR to enable it.
132 * - Now we don't display link info with PHYs that
133 * doesn't support it (level1).
134 * - Incresed tx_timeout beacuse of auto-neg.
135 * - Adjusted timers for forced speeds.
136 *
137 * v1.12 Oct 12, 2000 - Minor fixes (memleak, init, etc.)
138 *
139 * v1.13 Nov 28, 2000 - Stop flooding console with auto-neg issues
140 * when link can't be established.
141 * - Added the bbuf option as a kernel parameter.
142 * - Fixed ioaddr probe bug.
143 * - Fixed stupid deadlock with MII interrupts.
144 * - Added support for speed/duplex selection with
145 * multiple nics.
146 * - Added partly fix for TX Channel lockup with
147 * TLAN v1.0 silicon. This needs to be investigated
148 * further.
149 *
150 * v1.14 Dec 16, 2000 - Added support for servicing multiple frames per.
151 * interrupt. Thanks goes to
152 * Adam Keys <adam@ti.com>
153 * Denis Beaudoin <dbeaudoin@ti.com>
154 * for providing the patch.
155 * - Fixed auto-neg output when using multiple
156 * adapters.
157 * - Converted to use new taskq interface.
158 *
159 * v1.14a Jan 6, 2001 - Minor adjustments (spinlocks, etc.)
160 *
161 * Samuel Chessman <chessman@tux.org> New Maintainer!
162 *
163 * v1.15 Apr 4, 2002 - Correct operation when aui=1 to be
164 * 10T half duplex no loopback
165 * Thanks to Gunnar Eikman
166 *
167 * Sakari Ailus <sakari.ailus@iki.fi>:
168 *
169 * v1.15a Dec 15 2008 - Remove bbuf support, it doesn't work anyway.
170 *
171 *******************************************************************************/
172 31
173#include <linux/module.h> 32#include <linux/module.h>
174#include <linux/init.h> 33#include <linux/init.h>
@@ -185,13 +44,11 @@
185 44
186#include "tlan.h" 45#include "tlan.h"
187 46
188typedef u32 (TLanIntVectorFunc)( struct net_device *, u16 );
189
190 47
191/* For removing EISA devices */ 48/* For removing EISA devices */
192static struct net_device *TLan_Eisa_Devices; 49static struct net_device *tlan_eisa_devices;
193 50
194static int TLanDevicesInstalled; 51static int tlan_devices_installed;
195 52
196/* Set speed, duplex and aui settings */ 53/* Set speed, duplex and aui settings */
197static int aui[MAX_TLAN_BOARDS]; 54static int aui[MAX_TLAN_BOARDS];
@@ -202,8 +59,9 @@ module_param_array(aui, int, NULL, 0);
202module_param_array(duplex, int, NULL, 0); 59module_param_array(duplex, int, NULL, 0);
203module_param_array(speed, int, NULL, 0); 60module_param_array(speed, int, NULL, 0);
204MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)"); 61MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)");
205MODULE_PARM_DESC(duplex, "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)"); 62MODULE_PARM_DESC(duplex,
206MODULE_PARM_DESC(speed, "ThunderLAN port speen setting(s) (0,10,100)"); 63 "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
64MODULE_PARM_DESC(speed, "ThunderLAN port speed setting(s) (0,10,100)");
207 65
208MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>"); 66MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
209MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters"); 67MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
@@ -218,139 +76,144 @@ static int debug;
218module_param(debug, int, 0); 76module_param(debug, int, 0);
219MODULE_PARM_DESC(debug, "ThunderLAN debug mask"); 77MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
220 78
221static const char TLanSignature[] = "TLAN"; 79static const char tlan_signature[] = "TLAN";
222static const char tlan_banner[] = "ThunderLAN driver v1.15a\n"; 80static const char tlan_banner[] = "ThunderLAN driver v1.17\n";
223static int tlan_have_pci; 81static int tlan_have_pci;
224static int tlan_have_eisa; 82static int tlan_have_eisa;
225 83
226static const char *media[] = { 84static const char * const media[] = {
227 "10BaseT-HD ", "10BaseT-FD ","100baseTx-HD ", 85 "10BaseT-HD", "10BaseT-FD", "100baseTx-HD",
228 "100baseTx-FD", "100baseT4", NULL 86 "100BaseTx-FD", "100BaseT4", NULL
229}; 87};
230 88
231static struct board { 89static struct board {
232 const char *deviceLabel; 90 const char *device_label;
233 u32 flags; 91 u32 flags;
234 u16 addrOfs; 92 u16 addr_ofs;
235} board_info[] = { 93} board_info[] = {
236 { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 94 { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
237 { "Compaq Netelligent 10/100 TX PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 95 { "Compaq Netelligent 10/100 TX PCI UTP",
96 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
238 { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 }, 97 { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
239 { "Compaq NetFlex-3/P", 98 { "Compaq NetFlex-3/P",
240 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 }, 99 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
241 { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 }, 100 { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
242 { "Compaq Netelligent Integrated 10/100 TX UTP", 101 { "Compaq Netelligent Integrated 10/100 TX UTP",
243 TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 102 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
244 { "Compaq Netelligent Dual 10/100 TX PCI UTP", TLAN_ADAPTER_NONE, 0x83 }, 103 { "Compaq Netelligent Dual 10/100 TX PCI UTP",
245 { "Compaq Netelligent 10/100 TX Embedded UTP", TLAN_ADAPTER_NONE, 0x83 }, 104 TLAN_ADAPTER_NONE, 0x83 },
105 { "Compaq Netelligent 10/100 TX Embedded UTP",
106 TLAN_ADAPTER_NONE, 0x83 },
246 { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 }, 107 { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
247 { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xF8 }, 108 { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 },
248 { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xF8 }, 109 { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xf8 },
249 { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 110 { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
250 { "Compaq Netelligent 10 T/2 PCI UTP/Coax", TLAN_ADAPTER_NONE, 0x83 }, 111 { "Compaq Netelligent 10 T/2 PCI UTP/coax", TLAN_ADAPTER_NONE, 0x83 },
251 { "Compaq NetFlex-3/E", 112 { "Compaq NetFlex-3/E",
252 TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */ 113 TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */
253 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 }, 114 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
254 { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */ 115 { "Compaq NetFlex-3/E",
116 TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
255}; 117};
256 118
257static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = { 119static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = {
258 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10, 120 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
259 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
260 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100, 122 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
261 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, 123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
262 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I, 124 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I,
263 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 }, 125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
264 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER, 126 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER,
265 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 }, 127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
266 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B, 128 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B,
267 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, 129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
268 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI, 130 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI,
269 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 }, 131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
270 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D, 132 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D,
271 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 }, 133 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
272 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I, 134 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I,
273 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 }, 135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
274 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183, 136 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183,
275 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, 137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
276 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325, 138 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325,
277 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 }, 139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
278 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326, 140 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326,
279 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 }, 141 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
280 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100, 142 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100,
281 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 }, 143 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
282 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2, 144 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2,
283 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 }, 145 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
284 { 0,} 146 { 0,}
285}; 147};
286MODULE_DEVICE_TABLE(pci, tlan_pci_tbl); 148MODULE_DEVICE_TABLE(pci, tlan_pci_tbl);
287 149
288static void TLan_EisaProbe( void ); 150static void tlan_eisa_probe(void);
289static void TLan_Eisa_Cleanup( void ); 151static void tlan_eisa_cleanup(void);
290static int TLan_Init( struct net_device * ); 152static int tlan_init(struct net_device *);
291static int TLan_Open( struct net_device *dev ); 153static int tlan_open(struct net_device *dev);
292static netdev_tx_t TLan_StartTx( struct sk_buff *, struct net_device *); 154static netdev_tx_t tlan_start_tx(struct sk_buff *, struct net_device *);
293static irqreturn_t TLan_HandleInterrupt( int, void *); 155static irqreturn_t tlan_handle_interrupt(int, void *);
294static int TLan_Close( struct net_device *); 156static int tlan_close(struct net_device *);
295static struct net_device_stats *TLan_GetStats( struct net_device *); 157static struct net_device_stats *tlan_get_stats(struct net_device *);
296static void TLan_SetMulticastList( struct net_device *); 158static void tlan_set_multicast_list(struct net_device *);
297static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd); 159static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
298static int TLan_probe1( struct pci_dev *pdev, long ioaddr, 160static int tlan_probe1(struct pci_dev *pdev, long ioaddr,
299 int irq, int rev, const struct pci_device_id *ent); 161 int irq, int rev, const struct pci_device_id *ent);
300static void TLan_tx_timeout( struct net_device *dev); 162static void tlan_tx_timeout(struct net_device *dev);
301static void TLan_tx_timeout_work(struct work_struct *work); 163static void tlan_tx_timeout_work(struct work_struct *work);
302static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent); 164static int tlan_init_one(struct pci_dev *pdev,
303 165 const struct pci_device_id *ent);
304static u32 TLan_HandleTxEOF( struct net_device *, u16 ); 166
305static u32 TLan_HandleStatOverflow( struct net_device *, u16 ); 167static u32 tlan_handle_tx_eof(struct net_device *, u16);
306static u32 TLan_HandleRxEOF( struct net_device *, u16 ); 168static u32 tlan_handle_stat_overflow(struct net_device *, u16);
307static u32 TLan_HandleDummy( struct net_device *, u16 ); 169static u32 tlan_handle_rx_eof(struct net_device *, u16);
308static u32 TLan_HandleTxEOC( struct net_device *, u16 ); 170static u32 tlan_handle_dummy(struct net_device *, u16);
309static u32 TLan_HandleStatusCheck( struct net_device *, u16 ); 171static u32 tlan_handle_tx_eoc(struct net_device *, u16);
310static u32 TLan_HandleRxEOC( struct net_device *, u16 ); 172static u32 tlan_handle_status_check(struct net_device *, u16);
311 173static u32 tlan_handle_rx_eoc(struct net_device *, u16);
312static void TLan_Timer( unsigned long ); 174
313 175static void tlan_timer(unsigned long);
314static void TLan_ResetLists( struct net_device * ); 176
315static void TLan_FreeLists( struct net_device * ); 177static void tlan_reset_lists(struct net_device *);
316static void TLan_PrintDio( u16 ); 178static void tlan_free_lists(struct net_device *);
317static void TLan_PrintList( TLanList *, char *, int ); 179static void tlan_print_dio(u16);
318static void TLan_ReadAndClearStats( struct net_device *, int ); 180static void tlan_print_list(struct tlan_list *, char *, int);
319static void TLan_ResetAdapter( struct net_device * ); 181static void tlan_read_and_clear_stats(struct net_device *, int);
320static void TLan_FinishReset( struct net_device * ); 182static void tlan_reset_adapter(struct net_device *);
321static void TLan_SetMac( struct net_device *, int areg, char *mac ); 183static void tlan_finish_reset(struct net_device *);
322 184static void tlan_set_mac(struct net_device *, int areg, char *mac);
323static void TLan_PhyPrint( struct net_device * ); 185
324static void TLan_PhyDetect( struct net_device * ); 186static void tlan_phy_print(struct net_device *);
325static void TLan_PhyPowerDown( struct net_device * ); 187static void tlan_phy_detect(struct net_device *);
326static void TLan_PhyPowerUp( struct net_device * ); 188static void tlan_phy_power_down(struct net_device *);
327static void TLan_PhyReset( struct net_device * ); 189static void tlan_phy_power_up(struct net_device *);
328static void TLan_PhyStartLink( struct net_device * ); 190static void tlan_phy_reset(struct net_device *);
329static void TLan_PhyFinishAutoNeg( struct net_device * ); 191static void tlan_phy_start_link(struct net_device *);
192static void tlan_phy_finish_auto_neg(struct net_device *);
330#ifdef MONITOR 193#ifdef MONITOR
331static void TLan_PhyMonitor( struct net_device * ); 194static void tlan_phy_monitor(struct net_device *);
332#endif 195#endif
333 196
334/* 197/*
335static int TLan_PhyNop( struct net_device * ); 198 static int tlan_phy_nop(struct net_device *);
336static int TLan_PhyInternalCheck( struct net_device * ); 199 static int tlan_phy_internal_check(struct net_device *);
337static int TLan_PhyInternalService( struct net_device * ); 200 static int tlan_phy_internal_service(struct net_device *);
338static int TLan_PhyDp83840aCheck( struct net_device * ); 201 static int tlan_phy_dp83840a_check(struct net_device *);
339*/ 202*/
340 203
341static bool TLan_MiiReadReg( struct net_device *, u16, u16, u16 * ); 204static bool tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
342static void TLan_MiiSendData( u16, u32, unsigned ); 205static void tlan_mii_send_data(u16, u32, unsigned);
343static void TLan_MiiSync( u16 ); 206static void tlan_mii_sync(u16);
344static void TLan_MiiWriteReg( struct net_device *, u16, u16, u16 ); 207static void tlan_mii_write_reg(struct net_device *, u16, u16, u16);
345 208
346static void TLan_EeSendStart( u16 ); 209static void tlan_ee_send_start(u16);
347static int TLan_EeSendByte( u16, u8, int ); 210static int tlan_ee_send_byte(u16, u8, int);
348static void TLan_EeReceiveByte( u16, u8 *, int ); 211static void tlan_ee_receive_byte(u16, u8 *, int);
349static int TLan_EeReadByte( struct net_device *, u8, u8 * ); 212static int tlan_ee_read_byte(struct net_device *, u8, u8 *);
350 213
351 214
352static inline void 215static inline void
353TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb) 216tlan_store_skb(struct tlan_list *tag, struct sk_buff *skb)
354{ 217{
355 unsigned long addr = (unsigned long)skb; 218 unsigned long addr = (unsigned long)skb;
356 tag->buffer[9].address = addr; 219 tag->buffer[9].address = addr;
@@ -358,7 +221,7 @@ TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb)
358} 221}
359 222
360static inline struct sk_buff * 223static inline struct sk_buff *
361TLan_GetSKB( const struct tlan_list_tag *tag) 224tlan_get_skb(const struct tlan_list *tag)
362{ 225{
363 unsigned long addr; 226 unsigned long addr;
364 227
@@ -367,50 +230,50 @@ TLan_GetSKB( const struct tlan_list_tag *tag)
367 return (struct sk_buff *) addr; 230 return (struct sk_buff *) addr;
368} 231}
369 232
370 233static u32
371static TLanIntVectorFunc *TLanIntVector[TLAN_INT_NUMBER_OF_INTS] = { 234(*tlan_int_vector[TLAN_INT_NUMBER_OF_INTS])(struct net_device *, u16) = {
372 NULL, 235 NULL,
373 TLan_HandleTxEOF, 236 tlan_handle_tx_eof,
374 TLan_HandleStatOverflow, 237 tlan_handle_stat_overflow,
375 TLan_HandleRxEOF, 238 tlan_handle_rx_eof,
376 TLan_HandleDummy, 239 tlan_handle_dummy,
377 TLan_HandleTxEOC, 240 tlan_handle_tx_eoc,
378 TLan_HandleStatusCheck, 241 tlan_handle_status_check,
379 TLan_HandleRxEOC 242 tlan_handle_rx_eoc
380}; 243};
381 244
382static inline void 245static inline void
383TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type ) 246tlan_set_timer(struct net_device *dev, u32 ticks, u32 type)
384{ 247{
385 TLanPrivateInfo *priv = netdev_priv(dev); 248 struct tlan_priv *priv = netdev_priv(dev);
386 unsigned long flags = 0; 249 unsigned long flags = 0;
387 250
388 if (!in_irq()) 251 if (!in_irq())
389 spin_lock_irqsave(&priv->lock, flags); 252 spin_lock_irqsave(&priv->lock, flags);
390 if ( priv->timer.function != NULL && 253 if (priv->timer.function != NULL &&
391 priv->timerType != TLAN_TIMER_ACTIVITY ) { 254 priv->timer_type != TLAN_TIMER_ACTIVITY) {
392 if (!in_irq()) 255 if (!in_irq())
393 spin_unlock_irqrestore(&priv->lock, flags); 256 spin_unlock_irqrestore(&priv->lock, flags);
394 return; 257 return;
395 } 258 }
396 priv->timer.function = TLan_Timer; 259 priv->timer.function = tlan_timer;
397 if (!in_irq()) 260 if (!in_irq())
398 spin_unlock_irqrestore(&priv->lock, flags); 261 spin_unlock_irqrestore(&priv->lock, flags);
399 262
400 priv->timer.data = (unsigned long) dev; 263 priv->timer.data = (unsigned long) dev;
401 priv->timerSetAt = jiffies; 264 priv->timer_set_at = jiffies;
402 priv->timerType = type; 265 priv->timer_type = type;
403 mod_timer(&priv->timer, jiffies + ticks); 266 mod_timer(&priv->timer, jiffies + ticks);
404 267
405} /* TLan_SetTimer */ 268}
406 269
407 270
408/***************************************************************************** 271/*****************************************************************************
409****************************************************************************** 272******************************************************************************
410 273
411 ThunderLAN Driver Primary Functions 274ThunderLAN driver primary functions
412 275
413 These functions are more or less common to all Linux network drivers. 276these functions are more or less common to all linux network drivers.
414 277
415****************************************************************************** 278******************************************************************************
416*****************************************************************************/ 279*****************************************************************************/
@@ -419,56 +282,124 @@ TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type )
419 282
420 283
421 284
422 /*************************************************************** 285/***************************************************************
423 * tlan_remove_one 286 * tlan_remove_one
424 * 287 *
425 * Returns: 288 * Returns:
426 * Nothing 289 * Nothing
427 * Parms: 290 * Parms:
428 * None 291 * None
429 * 292 *
430 * Goes through the TLanDevices list and frees the device 293 * Goes through the TLanDevices list and frees the device
431 * structs and memory associated with each device (lists 294 * structs and memory associated with each device (lists
432 * and buffers). It also ureserves the IO port regions 295 * and buffers). It also ureserves the IO port regions
433 * associated with this device. 296 * associated with this device.
434 * 297 *
435 **************************************************************/ 298 **************************************************************/
436 299
437 300
438static void __devexit tlan_remove_one( struct pci_dev *pdev) 301static void __devexit tlan_remove_one(struct pci_dev *pdev)
439{ 302{
440 struct net_device *dev = pci_get_drvdata( pdev ); 303 struct net_device *dev = pci_get_drvdata(pdev);
441 TLanPrivateInfo *priv = netdev_priv(dev); 304 struct tlan_priv *priv = netdev_priv(dev);
442 305
443 unregister_netdev( dev ); 306 unregister_netdev(dev);
444 307
445 if ( priv->dmaStorage ) { 308 if (priv->dma_storage) {
446 pci_free_consistent(priv->pciDev, 309 pci_free_consistent(priv->pci_dev,
447 priv->dmaSize, priv->dmaStorage, 310 priv->dma_size, priv->dma_storage,
448 priv->dmaStorageDMA ); 311 priv->dma_storage_dma);
449 } 312 }
450 313
451#ifdef CONFIG_PCI 314#ifdef CONFIG_PCI
452 pci_release_regions(pdev); 315 pci_release_regions(pdev);
453#endif 316#endif
454 317
455 free_netdev( dev ); 318 free_netdev(dev);
456 319
457 pci_set_drvdata( pdev, NULL ); 320 pci_set_drvdata(pdev, NULL);
458} 321}
459 322
323static void tlan_start(struct net_device *dev)
324{
325 tlan_reset_lists(dev);
326 /* NOTE: It might not be necessary to read the stats before a
327 reset if you don't care what the values are.
328 */
329 tlan_read_and_clear_stats(dev, TLAN_IGNORE);
330 tlan_reset_adapter(dev);
331 netif_wake_queue(dev);
332}
333
334static void tlan_stop(struct net_device *dev)
335{
336 struct tlan_priv *priv = netdev_priv(dev);
337
338 tlan_read_and_clear_stats(dev, TLAN_RECORD);
339 outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
340 /* Reset and power down phy */
341 tlan_reset_adapter(dev);
342 if (priv->timer.function != NULL) {
343 del_timer_sync(&priv->timer);
344 priv->timer.function = NULL;
345 }
346}
347
348#ifdef CONFIG_PM
349
350static int tlan_suspend(struct pci_dev *pdev, pm_message_t state)
351{
352 struct net_device *dev = pci_get_drvdata(pdev);
353
354 if (netif_running(dev))
355 tlan_stop(dev);
356
357 netif_device_detach(dev);
358 pci_save_state(pdev);
359 pci_disable_device(pdev);
360 pci_wake_from_d3(pdev, false);
361 pci_set_power_state(pdev, PCI_D3hot);
362
363 return 0;
364}
365
366static int tlan_resume(struct pci_dev *pdev)
367{
368 struct net_device *dev = pci_get_drvdata(pdev);
369
370 pci_set_power_state(pdev, PCI_D0);
371 pci_restore_state(pdev);
372 pci_enable_wake(pdev, 0, 0);
373 netif_device_attach(dev);
374
375 if (netif_running(dev))
376 tlan_start(dev);
377
378 return 0;
379}
380
381#else /* CONFIG_PM */
382
383#define tlan_suspend NULL
384#define tlan_resume NULL
385
386#endif /* CONFIG_PM */
387
388
460static struct pci_driver tlan_driver = { 389static struct pci_driver tlan_driver = {
461 .name = "tlan", 390 .name = "tlan",
462 .id_table = tlan_pci_tbl, 391 .id_table = tlan_pci_tbl,
463 .probe = tlan_init_one, 392 .probe = tlan_init_one,
464 .remove = __devexit_p(tlan_remove_one), 393 .remove = __devexit_p(tlan_remove_one),
394 .suspend = tlan_suspend,
395 .resume = tlan_resume,
465}; 396};
466 397
467static int __init tlan_probe(void) 398static int __init tlan_probe(void)
468{ 399{
469 int rc = -ENODEV; 400 int rc = -ENODEV;
470 401
471 printk(KERN_INFO "%s", tlan_banner); 402 pr_info("%s", tlan_banner);
472 403
473 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n"); 404 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n");
474 405
@@ -477,18 +408,18 @@ static int __init tlan_probe(void)
477 rc = pci_register_driver(&tlan_driver); 408 rc = pci_register_driver(&tlan_driver);
478 409
479 if (rc != 0) { 410 if (rc != 0) {
480 printk(KERN_ERR "TLAN: Could not register pci driver.\n"); 411 pr_err("Could not register pci driver\n");
481 goto err_out_pci_free; 412 goto err_out_pci_free;
482 } 413 }
483 414
484 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n"); 415 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n");
485 TLan_EisaProbe(); 416 tlan_eisa_probe();
486 417
487 printk(KERN_INFO "TLAN: %d device%s installed, PCI: %d EISA: %d\n", 418 pr_info("%d device%s installed, PCI: %d EISA: %d\n",
488 TLanDevicesInstalled, TLanDevicesInstalled == 1 ? "" : "s", 419 tlan_devices_installed, tlan_devices_installed == 1 ? "" : "s",
489 tlan_have_pci, tlan_have_eisa); 420 tlan_have_pci, tlan_have_eisa);
490 421
491 if (TLanDevicesInstalled == 0) { 422 if (tlan_devices_installed == 0) {
492 rc = -ENODEV; 423 rc = -ENODEV;
493 goto err_out_pci_unreg; 424 goto err_out_pci_unreg;
494 } 425 }
@@ -501,39 +432,39 @@ err_out_pci_free:
501} 432}
502 433
503 434
504static int __devinit tlan_init_one( struct pci_dev *pdev, 435static int __devinit tlan_init_one(struct pci_dev *pdev,
505 const struct pci_device_id *ent) 436 const struct pci_device_id *ent)
506{ 437{
507 return TLan_probe1( pdev, -1, -1, 0, ent); 438 return tlan_probe1(pdev, -1, -1, 0, ent);
508} 439}
509 440
510 441
511/* 442/*
512 *************************************************************** 443***************************************************************
513 * tlan_probe1 444* tlan_probe1
514 * 445*
515 * Returns: 446* Returns:
516 * 0 on success, error code on error 447* 0 on success, error code on error
517 * Parms: 448* Parms:
518 * none 449* none
519 * 450*
520 * The name is lower case to fit in with all the rest of 451* The name is lower case to fit in with all the rest of
521 * the netcard_probe names. This function looks for 452* the netcard_probe names. This function looks for
522 * another TLan based adapter, setting it up with the 453* another TLan based adapter, setting it up with the
523 * allocated device struct if one is found. 454* allocated device struct if one is found.
524 * tlan_probe has been ported to the new net API and 455* tlan_probe has been ported to the new net API and
525 * now allocates its own device structure. This function 456* now allocates its own device structure. This function
526 * is also used by modules. 457* is also used by modules.
527 * 458*
528 **************************************************************/ 459**************************************************************/
529 460
530static int __devinit TLan_probe1(struct pci_dev *pdev, 461static int __devinit tlan_probe1(struct pci_dev *pdev,
531 long ioaddr, int irq, int rev, 462 long ioaddr, int irq, int rev,
532 const struct pci_device_id *ent ) 463 const struct pci_device_id *ent)
533{ 464{
534 465
535 struct net_device *dev; 466 struct net_device *dev;
536 TLanPrivateInfo *priv; 467 struct tlan_priv *priv;
537 u16 device_id; 468 u16 device_id;
538 int reg, rc = -ENODEV; 469 int reg, rc = -ENODEV;
539 470
@@ -543,17 +474,17 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
543 if (rc) 474 if (rc)
544 return rc; 475 return rc;
545 476
546 rc = pci_request_regions(pdev, TLanSignature); 477 rc = pci_request_regions(pdev, tlan_signature);
547 if (rc) { 478 if (rc) {
548 printk(KERN_ERR "TLAN: Could not reserve IO regions\n"); 479 pr_err("Could not reserve IO regions\n");
549 goto err_out; 480 goto err_out;
550 } 481 }
551 } 482 }
552#endif /* CONFIG_PCI */ 483#endif /* CONFIG_PCI */
553 484
554 dev = alloc_etherdev(sizeof(TLanPrivateInfo)); 485 dev = alloc_etherdev(sizeof(struct tlan_priv));
555 if (dev == NULL) { 486 if (dev == NULL) {
556 printk(KERN_ERR "TLAN: Could not allocate memory for device.\n"); 487 pr_err("Could not allocate memory for device\n");
557 rc = -ENOMEM; 488 rc = -ENOMEM;
558 goto err_out_regions; 489 goto err_out_regions;
559 } 490 }
@@ -561,38 +492,39 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
561 492
562 priv = netdev_priv(dev); 493 priv = netdev_priv(dev);
563 494
564 priv->pciDev = pdev; 495 priv->pci_dev = pdev;
565 priv->dev = dev; 496 priv->dev = dev;
566 497
567 /* Is this a PCI device? */ 498 /* Is this a PCI device? */
568 if (pdev) { 499 if (pdev) {
569 u32 pci_io_base = 0; 500 u32 pci_io_base = 0;
570 501
571 priv->adapter = &board_info[ent->driver_data]; 502 priv->adapter = &board_info[ent->driver_data];
572 503
573 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 504 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
574 if (rc) { 505 if (rc) {
575 printk(KERN_ERR "TLAN: No suitable PCI mapping available.\n"); 506 pr_err("No suitable PCI mapping available\n");
576 goto err_out_free_dev; 507 goto err_out_free_dev;
577 } 508 }
578 509
579 for ( reg= 0; reg <= 5; reg ++ ) { 510 for (reg = 0; reg <= 5; reg++) {
580 if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) { 511 if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) {
581 pci_io_base = pci_resource_start(pdev, reg); 512 pci_io_base = pci_resource_start(pdev, reg);
582 TLAN_DBG( TLAN_DEBUG_GNRL, "IO mapping is available at %x.\n", 513 TLAN_DBG(TLAN_DEBUG_GNRL,
583 pci_io_base); 514 "IO mapping is available at %x.\n",
515 pci_io_base);
584 break; 516 break;
585 } 517 }
586 } 518 }
587 if (!pci_io_base) { 519 if (!pci_io_base) {
588 printk(KERN_ERR "TLAN: No IO mappings available\n"); 520 pr_err("No IO mappings available\n");
589 rc = -EIO; 521 rc = -EIO;
590 goto err_out_free_dev; 522 goto err_out_free_dev;
591 } 523 }
592 524
593 dev->base_addr = pci_io_base; 525 dev->base_addr = pci_io_base;
594 dev->irq = pdev->irq; 526 dev->irq = pdev->irq;
595 priv->adapterRev = pdev->revision; 527 priv->adapter_rev = pdev->revision;
596 pci_set_master(pdev); 528 pci_set_master(pdev);
597 pci_set_drvdata(pdev, dev); 529 pci_set_drvdata(pdev, dev);
598 530
@@ -602,11 +534,11 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
602 device_id = inw(ioaddr + EISA_ID2); 534 device_id = inw(ioaddr + EISA_ID2);
603 priv->is_eisa = 1; 535 priv->is_eisa = 1;
604 if (device_id == 0x20F1) { 536 if (device_id == 0x20F1) {
605 priv->adapter = &board_info[13]; /* NetFlex-3/E */ 537 priv->adapter = &board_info[13]; /* NetFlex-3/E */
606 priv->adapterRev = 23; /* TLAN 2.3 */ 538 priv->adapter_rev = 23; /* TLAN 2.3 */
607 } else { 539 } else {
608 priv->adapter = &board_info[14]; 540 priv->adapter = &board_info[14];
609 priv->adapterRev = 10; /* TLAN 1.0 */ 541 priv->adapter_rev = 10; /* TLAN 1.0 */
610 } 542 }
611 dev->base_addr = ioaddr; 543 dev->base_addr = ioaddr;
612 dev->irq = irq; 544 dev->irq = irq;
@@ -620,11 +552,11 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
620 priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0 552 priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0
621 : (dev->mem_start & 0x18) >> 3; 553 : (dev->mem_start & 0x18) >> 3;
622 554
623 if (priv->speed == 0x1) { 555 if (priv->speed == 0x1)
624 priv->speed = TLAN_SPEED_10; 556 priv->speed = TLAN_SPEED_10;
625 } else if (priv->speed == 0x2) { 557 else if (priv->speed == 0x2)
626 priv->speed = TLAN_SPEED_100; 558 priv->speed = TLAN_SPEED_100;
627 } 559
628 debug = priv->debug = dev->mem_end; 560 debug = priv->debug = dev->mem_end;
629 } else { 561 } else {
630 priv->aui = aui[boards_found]; 562 priv->aui = aui[boards_found];
@@ -635,46 +567,45 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
635 567
636 /* This will be used when we get an adapter error from 568 /* This will be used when we get an adapter error from
637 * within our irq handler */ 569 * within our irq handler */
638 INIT_WORK(&priv->tlan_tqueue, TLan_tx_timeout_work); 570 INIT_WORK(&priv->tlan_tqueue, tlan_tx_timeout_work);
639 571
640 spin_lock_init(&priv->lock); 572 spin_lock_init(&priv->lock);
641 573
642 rc = TLan_Init(dev); 574 rc = tlan_init(dev);
643 if (rc) { 575 if (rc) {
644 printk(KERN_ERR "TLAN: Could not set up device.\n"); 576 pr_err("Could not set up device\n");
645 goto err_out_free_dev; 577 goto err_out_free_dev;
646 } 578 }
647 579
648 rc = register_netdev(dev); 580 rc = register_netdev(dev);
649 if (rc) { 581 if (rc) {
650 printk(KERN_ERR "TLAN: Could not register device.\n"); 582 pr_err("Could not register device\n");
651 goto err_out_uninit; 583 goto err_out_uninit;
652 } 584 }
653 585
654 586
655 TLanDevicesInstalled++; 587 tlan_devices_installed++;
656 boards_found++; 588 boards_found++;
657 589
658 /* pdev is NULL if this is an EISA device */ 590 /* pdev is NULL if this is an EISA device */
659 if (pdev) 591 if (pdev)
660 tlan_have_pci++; 592 tlan_have_pci++;
661 else { 593 else {
662 priv->nextDevice = TLan_Eisa_Devices; 594 priv->next_device = tlan_eisa_devices;
663 TLan_Eisa_Devices = dev; 595 tlan_eisa_devices = dev;
664 tlan_have_eisa++; 596 tlan_have_eisa++;
665 } 597 }
666 598
667 printk(KERN_INFO "TLAN: %s irq=%2d, io=%04x, %s, Rev. %d\n", 599 netdev_info(dev, "irq=%2d, io=%04x, %s, Rev. %d\n",
668 dev->name, 600 (int)dev->irq,
669 (int) dev->irq, 601 (int)dev->base_addr,
670 (int) dev->base_addr, 602 priv->adapter->device_label,
671 priv->adapter->deviceLabel, 603 priv->adapter_rev);
672 priv->adapterRev);
673 return 0; 604 return 0;
674 605
675err_out_uninit: 606err_out_uninit:
676 pci_free_consistent(priv->pciDev, priv->dmaSize, priv->dmaStorage, 607 pci_free_consistent(priv->pci_dev, priv->dma_size, priv->dma_storage,
677 priv->dmaStorageDMA ); 608 priv->dma_storage_dma);
678err_out_free_dev: 609err_out_free_dev:
679 free_netdev(dev); 610 free_netdev(dev);
680err_out_regions: 611err_out_regions:
@@ -689,22 +620,23 @@ err_out:
689} 620}
690 621
691 622
692static void TLan_Eisa_Cleanup(void) 623static void tlan_eisa_cleanup(void)
693{ 624{
694 struct net_device *dev; 625 struct net_device *dev;
695 TLanPrivateInfo *priv; 626 struct tlan_priv *priv;
696 627
697 while( tlan_have_eisa ) { 628 while (tlan_have_eisa) {
698 dev = TLan_Eisa_Devices; 629 dev = tlan_eisa_devices;
699 priv = netdev_priv(dev); 630 priv = netdev_priv(dev);
700 if (priv->dmaStorage) { 631 if (priv->dma_storage) {
701 pci_free_consistent(priv->pciDev, priv->dmaSize, 632 pci_free_consistent(priv->pci_dev, priv->dma_size,
702 priv->dmaStorage, priv->dmaStorageDMA ); 633 priv->dma_storage,
634 priv->dma_storage_dma);
703 } 635 }
704 release_region( dev->base_addr, 0x10); 636 release_region(dev->base_addr, 0x10);
705 unregister_netdev( dev ); 637 unregister_netdev(dev);
706 TLan_Eisa_Devices = priv->nextDevice; 638 tlan_eisa_devices = priv->next_device;
707 free_netdev( dev ); 639 free_netdev(dev);
708 tlan_have_eisa--; 640 tlan_have_eisa--;
709 } 641 }
710} 642}
@@ -715,7 +647,7 @@ static void __exit tlan_exit(void)
715 pci_unregister_driver(&tlan_driver); 647 pci_unregister_driver(&tlan_driver);
716 648
717 if (tlan_have_eisa) 649 if (tlan_have_eisa)
718 TLan_Eisa_Cleanup(); 650 tlan_eisa_cleanup();
719 651
720} 652}
721 653
@@ -726,24 +658,24 @@ module_exit(tlan_exit);
726 658
727 659
728 660
729 /************************************************************** 661/**************************************************************
730 * TLan_EisaProbe 662 * tlan_eisa_probe
731 * 663 *
732 * Returns: 0 on success, 1 otherwise 664 * Returns: 0 on success, 1 otherwise
733 * 665 *
734 * Parms: None 666 * Parms: None
735 * 667 *
736 * 668 *
737 * This functions probes for EISA devices and calls 669 * This functions probes for EISA devices and calls
738 * TLan_probe1 when one is found. 670 * TLan_probe1 when one is found.
739 * 671 *
740 *************************************************************/ 672 *************************************************************/
741 673
742static void __init TLan_EisaProbe (void) 674static void __init tlan_eisa_probe(void)
743{ 675{
744 long ioaddr; 676 long ioaddr;
745 int rc = -ENODEV; 677 int rc = -ENODEV;
746 int irq; 678 int irq;
747 u16 device_id; 679 u16 device_id;
748 680
749 if (!EISA_bus) { 681 if (!EISA_bus) {
@@ -754,15 +686,16 @@ static void __init TLan_EisaProbe (void)
754 /* Loop through all slots of the EISA bus */ 686 /* Loop through all slots of the EISA bus */
755 for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) { 687 for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
756 688
757 TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n", 689 TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
758 (int) ioaddr + 0xC80, inw(ioaddr + EISA_ID)); 690 (int) ioaddr + 0xc80, inw(ioaddr + EISA_ID));
759 TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n", 691 TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
760 (int) ioaddr + 0xC82, inw(ioaddr + EISA_ID2)); 692 (int) ioaddr + 0xc82, inw(ioaddr + EISA_ID2));
761 693
762 694
763 TLAN_DBG(TLAN_DEBUG_PROBE, "Probing for EISA adapter at IO: 0x%4x : ", 695 TLAN_DBG(TLAN_DEBUG_PROBE,
764 (int) ioaddr); 696 "Probing for EISA adapter at IO: 0x%4x : ",
765 if (request_region(ioaddr, 0x10, TLanSignature) == NULL) 697 (int) ioaddr);
698 if (request_region(ioaddr, 0x10, tlan_signature) == NULL)
766 goto out; 699 goto out;
767 700
768 if (inw(ioaddr + EISA_ID) != 0x110E) { 701 if (inw(ioaddr + EISA_ID) != 0x110E) {
@@ -772,326 +705,324 @@ static void __init TLan_EisaProbe (void)
772 705
773 device_id = inw(ioaddr + EISA_ID2); 706 device_id = inw(ioaddr + EISA_ID2);
774 if (device_id != 0x20F1 && device_id != 0x40F1) { 707 if (device_id != 0x20F1 && device_id != 0x40F1) {
775 release_region (ioaddr, 0x10); 708 release_region(ioaddr, 0x10);
776 goto out; 709 goto out;
777 } 710 }
778 711
779 if (inb(ioaddr + EISA_CR) != 0x1) { /* Check if adapter is enabled */ 712 /* check if adapter is enabled */
780 release_region (ioaddr, 0x10); 713 if (inb(ioaddr + EISA_CR) != 0x1) {
714 release_region(ioaddr, 0x10);
781 goto out2; 715 goto out2;
782 } 716 }
783 717
784 if (debug == 0x10) 718 if (debug == 0x10)
785 printk("Found one\n"); 719 pr_info("Found one\n");
786 720
787 721
788 /* Get irq from board */ 722 /* Get irq from board */
789 switch (inb(ioaddr + 0xCC0)) { 723 switch (inb(ioaddr + 0xcc0)) {
790 case(0x10): 724 case(0x10):
791 irq=5; 725 irq = 5;
792 break; 726 break;
793 case(0x20): 727 case(0x20):
794 irq=9; 728 irq = 9;
795 break; 729 break;
796 case(0x40): 730 case(0x40):
797 irq=10; 731 irq = 10;
798 break; 732 break;
799 case(0x80): 733 case(0x80):
800 irq=11; 734 irq = 11;
801 break; 735 break;
802 default: 736 default:
803 goto out; 737 goto out;
804 } 738 }
805 739
806 740
807 /* Setup the newly found eisa adapter */ 741 /* Setup the newly found eisa adapter */
808 rc = TLan_probe1( NULL, ioaddr, irq, 742 rc = tlan_probe1(NULL, ioaddr, irq,
809 12, NULL); 743 12, NULL);
810 continue; 744 continue;
811 745
812 out: 746out:
813 if (debug == 0x10) 747 if (debug == 0x10)
814 printk("None found\n"); 748 pr_info("None found\n");
815 continue; 749 continue;
816 750
817 out2: if (debug == 0x10) 751out2:
818 printk("Card found but it is not enabled, skipping\n"); 752 if (debug == 0x10)
819 continue; 753 pr_info("Card found but it is not enabled, skipping\n");
754 continue;
820 755
821 } 756 }
822 757
823} /* TLan_EisaProbe */ 758}
824 759
825#ifdef CONFIG_NET_POLL_CONTROLLER 760#ifdef CONFIG_NET_POLL_CONTROLLER
826static void TLan_Poll(struct net_device *dev) 761static void tlan_poll(struct net_device *dev)
827{ 762{
828 disable_irq(dev->irq); 763 disable_irq(dev->irq);
829 TLan_HandleInterrupt(dev->irq, dev); 764 tlan_handle_interrupt(dev->irq, dev);
830 enable_irq(dev->irq); 765 enable_irq(dev->irq);
831} 766}
832#endif 767#endif
833 768
834static const struct net_device_ops TLan_netdev_ops = { 769static const struct net_device_ops tlan_netdev_ops = {
835 .ndo_open = TLan_Open, 770 .ndo_open = tlan_open,
836 .ndo_stop = TLan_Close, 771 .ndo_stop = tlan_close,
837 .ndo_start_xmit = TLan_StartTx, 772 .ndo_start_xmit = tlan_start_tx,
838 .ndo_tx_timeout = TLan_tx_timeout, 773 .ndo_tx_timeout = tlan_tx_timeout,
839 .ndo_get_stats = TLan_GetStats, 774 .ndo_get_stats = tlan_get_stats,
840 .ndo_set_multicast_list = TLan_SetMulticastList, 775 .ndo_set_multicast_list = tlan_set_multicast_list,
841 .ndo_do_ioctl = TLan_ioctl, 776 .ndo_do_ioctl = tlan_ioctl,
842 .ndo_change_mtu = eth_change_mtu, 777 .ndo_change_mtu = eth_change_mtu,
843 .ndo_set_mac_address = eth_mac_addr, 778 .ndo_set_mac_address = eth_mac_addr,
844 .ndo_validate_addr = eth_validate_addr, 779 .ndo_validate_addr = eth_validate_addr,
845#ifdef CONFIG_NET_POLL_CONTROLLER 780#ifdef CONFIG_NET_POLL_CONTROLLER
846 .ndo_poll_controller = TLan_Poll, 781 .ndo_poll_controller = tlan_poll,
847#endif 782#endif
848}; 783};
849 784
850 785
851 786
852 /*************************************************************** 787/***************************************************************
853 * TLan_Init 788 * tlan_init
854 * 789 *
855 * Returns: 790 * Returns:
856 * 0 on success, error code otherwise. 791 * 0 on success, error code otherwise.
857 * Parms: 792 * Parms:
858 * dev The structure of the device to be 793 * dev The structure of the device to be
859 * init'ed. 794 * init'ed.
860 * 795 *
861 * This function completes the initialization of the 796 * This function completes the initialization of the
862 * device structure and driver. It reserves the IO 797 * device structure and driver. It reserves the IO
863 * addresses, allocates memory for the lists and bounce 798 * addresses, allocates memory for the lists and bounce
864 * buffers, retrieves the MAC address from the eeprom 799 * buffers, retrieves the MAC address from the eeprom
865 * and assignes the device's methods. 800 * and assignes the device's methods.
866 * 801 *
867 **************************************************************/ 802 **************************************************************/
868 803
869static int TLan_Init( struct net_device *dev ) 804static int tlan_init(struct net_device *dev)
870{ 805{
871 int dma_size; 806 int dma_size;
872 int err; 807 int err;
873 int i; 808 int i;
874 TLanPrivateInfo *priv; 809 struct tlan_priv *priv;
875 810
876 priv = netdev_priv(dev); 811 priv = netdev_priv(dev);
877 812
878 dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS ) 813 dma_size = (TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS)
879 * ( sizeof(TLanList) ); 814 * (sizeof(struct tlan_list));
880 priv->dmaStorage = pci_alloc_consistent(priv->pciDev, 815 priv->dma_storage = pci_alloc_consistent(priv->pci_dev,
881 dma_size, &priv->dmaStorageDMA); 816 dma_size,
882 priv->dmaSize = dma_size; 817 &priv->dma_storage_dma);
818 priv->dma_size = dma_size;
883 819
884 if ( priv->dmaStorage == NULL ) { 820 if (priv->dma_storage == NULL) {
885 printk(KERN_ERR "TLAN: Could not allocate lists and buffers for %s.\n", 821 pr_err("Could not allocate lists and buffers for %s\n",
886 dev->name ); 822 dev->name);
887 return -ENOMEM; 823 return -ENOMEM;
888 } 824 }
889 memset( priv->dmaStorage, 0, dma_size ); 825 memset(priv->dma_storage, 0, dma_size);
890 priv->rxList = (TLanList *) ALIGN((unsigned long)priv->dmaStorage, 8); 826 priv->rx_list = (struct tlan_list *)
891 priv->rxListDMA = ALIGN(priv->dmaStorageDMA, 8); 827 ALIGN((unsigned long)priv->dma_storage, 8);
892 priv->txList = priv->rxList + TLAN_NUM_RX_LISTS; 828 priv->rx_list_dma = ALIGN(priv->dma_storage_dma, 8);
893 priv->txListDMA = priv->rxListDMA + sizeof(TLanList) * TLAN_NUM_RX_LISTS; 829 priv->tx_list = priv->rx_list + TLAN_NUM_RX_LISTS;
830 priv->tx_list_dma =
831 priv->rx_list_dma + sizeof(struct tlan_list)*TLAN_NUM_RX_LISTS;
894 832
895 err = 0; 833 err = 0;
896 for ( i = 0; i < 6 ; i++ ) 834 for (i = 0; i < 6 ; i++)
897 err |= TLan_EeReadByte( dev, 835 err |= tlan_ee_read_byte(dev,
898 (u8) priv->adapter->addrOfs + i, 836 (u8) priv->adapter->addr_ofs + i,
899 (u8 *) &dev->dev_addr[i] ); 837 (u8 *) &dev->dev_addr[i]);
900 if ( err ) { 838 if (err) {
901 printk(KERN_ERR "TLAN: %s: Error reading MAC from eeprom: %d\n", 839 pr_err("%s: Error reading MAC from eeprom: %d\n",
902 dev->name, 840 dev->name, err);
903 err );
904 } 841 }
905 dev->addr_len = 6; 842 dev->addr_len = 6;
906 843
907 netif_carrier_off(dev); 844 netif_carrier_off(dev);
908 845
909 /* Device methods */ 846 /* Device methods */
910 dev->netdev_ops = &TLan_netdev_ops; 847 dev->netdev_ops = &tlan_netdev_ops;
911 dev->watchdog_timeo = TX_TIMEOUT; 848 dev->watchdog_timeo = TX_TIMEOUT;
912 849
913 return 0; 850 return 0;
914 851
915} /* TLan_Init */ 852}
916 853
917 854
918 855
919 856
920 /*************************************************************** 857/***************************************************************
921 * TLan_Open 858 * tlan_open
922 * 859 *
923 * Returns: 860 * Returns:
924 * 0 on success, error code otherwise. 861 * 0 on success, error code otherwise.
925 * Parms: 862 * Parms:
926 * dev Structure of device to be opened. 863 * dev Structure of device to be opened.
927 * 864 *
928 * This routine puts the driver and TLAN adapter in a 865 * This routine puts the driver and TLAN adapter in a
929 * state where it is ready to send and receive packets. 866 * state where it is ready to send and receive packets.
930 * It allocates the IRQ, resets and brings the adapter 867 * It allocates the IRQ, resets and brings the adapter
931 * out of reset, and allows interrupts. It also delays 868 * out of reset, and allows interrupts. It also delays
932 * the startup for autonegotiation or sends a Rx GO 869 * the startup for autonegotiation or sends a Rx GO
933 * command to the adapter, as appropriate. 870 * command to the adapter, as appropriate.
934 * 871 *
935 **************************************************************/ 872 **************************************************************/
936 873
937static int TLan_Open( struct net_device *dev ) 874static int tlan_open(struct net_device *dev)
938{ 875{
939 TLanPrivateInfo *priv = netdev_priv(dev); 876 struct tlan_priv *priv = netdev_priv(dev);
940 int err; 877 int err;
941 878
942 priv->tlanRev = TLan_DioRead8( dev->base_addr, TLAN_DEF_REVISION ); 879 priv->tlan_rev = tlan_dio_read8(dev->base_addr, TLAN_DEF_REVISION);
943 err = request_irq( dev->irq, TLan_HandleInterrupt, IRQF_SHARED, 880 err = request_irq(dev->irq, tlan_handle_interrupt, IRQF_SHARED,
944 dev->name, dev ); 881 dev->name, dev);
945 882
946 if ( err ) { 883 if (err) {
947 pr_err("TLAN: Cannot open %s because IRQ %d is already in use.\n", 884 netdev_err(dev, "Cannot open because IRQ %d is already in use\n",
948 dev->name, dev->irq ); 885 dev->irq);
949 return err; 886 return err;
950 } 887 }
951 888
952 init_timer(&priv->timer); 889 init_timer(&priv->timer);
953 netif_start_queue(dev);
954 890
955 /* NOTE: It might not be necessary to read the stats before a 891 tlan_start(dev);
956 reset if you don't care what the values are.
957 */
958 TLan_ResetLists( dev );
959 TLan_ReadAndClearStats( dev, TLAN_IGNORE );
960 TLan_ResetAdapter( dev );
961 892
962 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n", 893 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n",
963 dev->name, priv->tlanRev ); 894 dev->name, priv->tlan_rev);
964 895
965 return 0; 896 return 0;
966 897
967} /* TLan_Open */ 898}
968 899
969 900
970 901
971 /************************************************************** 902/**************************************************************
972 * TLan_ioctl 903 * tlan_ioctl
973 * 904 *
974 * Returns: 905 * Returns:
975 * 0 on success, error code otherwise 906 * 0 on success, error code otherwise
976 * Params: 907 * Params:
977 * dev structure of device to receive ioctl. 908 * dev structure of device to receive ioctl.
978 * 909 *
979 * rq ifreq structure to hold userspace data. 910 * rq ifreq structure to hold userspace data.
980 * 911 *
981 * cmd ioctl command. 912 * cmd ioctl command.
982 * 913 *
983 * 914 *
984 *************************************************************/ 915 *************************************************************/
985 916
986static int TLan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 917static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
987{ 918{
988 TLanPrivateInfo *priv = netdev_priv(dev); 919 struct tlan_priv *priv = netdev_priv(dev);
989 struct mii_ioctl_data *data = if_mii(rq); 920 struct mii_ioctl_data *data = if_mii(rq);
990 u32 phy = priv->phy[priv->phyNum]; 921 u32 phy = priv->phy[priv->phy_num];
991 922
992 if (!priv->phyOnline) 923 if (!priv->phy_online)
993 return -EAGAIN; 924 return -EAGAIN;
994 925
995 switch(cmd) { 926 switch (cmd) {
996 case SIOCGMIIPHY: /* Get address of MII PHY in use. */ 927 case SIOCGMIIPHY: /* get address of MII PHY in use. */
997 data->phy_id = phy; 928 data->phy_id = phy;
998 929
999 930
1000 case SIOCGMIIREG: /* Read MII PHY register. */ 931 case SIOCGMIIREG: /* read MII PHY register. */
1001 TLan_MiiReadReg(dev, data->phy_id & 0x1f, 932 tlan_mii_read_reg(dev, data->phy_id & 0x1f,
1002 data->reg_num & 0x1f, &data->val_out); 933 data->reg_num & 0x1f, &data->val_out);
1003 return 0; 934 return 0;
1004 935
1005 936
1006 case SIOCSMIIREG: /* Write MII PHY register. */ 937 case SIOCSMIIREG: /* write MII PHY register. */
1007 TLan_MiiWriteReg(dev, data->phy_id & 0x1f, 938 tlan_mii_write_reg(dev, data->phy_id & 0x1f,
1008 data->reg_num & 0x1f, data->val_in); 939 data->reg_num & 0x1f, data->val_in);
1009 return 0; 940 return 0;
1010 default: 941 default:
1011 return -EOPNOTSUPP; 942 return -EOPNOTSUPP;
1012 } 943 }
1013} /* tlan_ioctl */ 944}
1014 945
1015 946
1016 /*************************************************************** 947/***************************************************************
1017 * TLan_tx_timeout 948 * tlan_tx_timeout
1018 * 949 *
1019 * Returns: nothing 950 * Returns: nothing
1020 * 951 *
1021 * Params: 952 * Params:
1022 * dev structure of device which timed out 953 * dev structure of device which timed out
1023 * during transmit. 954 * during transmit.
1024 * 955 *
1025 **************************************************************/ 956 **************************************************************/
1026 957
1027static void TLan_tx_timeout(struct net_device *dev) 958static void tlan_tx_timeout(struct net_device *dev)
1028{ 959{
1029 960
1030 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name); 961 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
1031 962
1032 /* Ok so we timed out, lets see what we can do about it...*/ 963 /* Ok so we timed out, lets see what we can do about it...*/
1033 TLan_FreeLists( dev ); 964 tlan_free_lists(dev);
1034 TLan_ResetLists( dev ); 965 tlan_reset_lists(dev);
1035 TLan_ReadAndClearStats( dev, TLAN_IGNORE ); 966 tlan_read_and_clear_stats(dev, TLAN_IGNORE);
1036 TLan_ResetAdapter( dev ); 967 tlan_reset_adapter(dev);
1037 dev->trans_start = jiffies; /* prevent tx timeout */ 968 dev->trans_start = jiffies; /* prevent tx timeout */
1038 netif_wake_queue( dev ); 969 netif_wake_queue(dev);
1039 970
1040} 971}
1041 972
1042 973
1043 /*************************************************************** 974/***************************************************************
1044 * TLan_tx_timeout_work 975 * tlan_tx_timeout_work
1045 * 976 *
1046 * Returns: nothing 977 * Returns: nothing
1047 * 978 *
1048 * Params: 979 * Params:
1049 * work work item of device which timed out 980 * work work item of device which timed out
1050 * 981 *
1051 **************************************************************/ 982 **************************************************************/
1052 983
1053static void TLan_tx_timeout_work(struct work_struct *work) 984static void tlan_tx_timeout_work(struct work_struct *work)
1054{ 985{
1055 TLanPrivateInfo *priv = 986 struct tlan_priv *priv =
1056 container_of(work, TLanPrivateInfo, tlan_tqueue); 987 container_of(work, struct tlan_priv, tlan_tqueue);
1057 988
1058 TLan_tx_timeout(priv->dev); 989 tlan_tx_timeout(priv->dev);
1059} 990}
1060 991
1061 992
1062 993
1063 /*************************************************************** 994/***************************************************************
1064 * TLan_StartTx 995 * tlan_start_tx
1065 * 996 *
1066 * Returns: 997 * Returns:
1067 * 0 on success, non-zero on failure. 998 * 0 on success, non-zero on failure.
1068 * Parms: 999 * Parms:
1069 * skb A pointer to the sk_buff containing the 1000 * skb A pointer to the sk_buff containing the
1070 * frame to be sent. 1001 * frame to be sent.
1071 * dev The device to send the data on. 1002 * dev The device to send the data on.
1072 * 1003 *
1073 * This function adds a frame to the Tx list to be sent 1004 * This function adds a frame to the Tx list to be sent
1074 * ASAP. First it verifies that the adapter is ready and 1005 * ASAP. First it verifies that the adapter is ready and
1075 * there is room in the queue. Then it sets up the next 1006 * there is room in the queue. Then it sets up the next
1076 * available list, copies the frame to the corresponding 1007 * available list, copies the frame to the corresponding
1077 * buffer. If the adapter Tx channel is idle, it gives 1008 * buffer. If the adapter Tx channel is idle, it gives
1078 * the adapter a Tx Go command on the list, otherwise it 1009 * the adapter a Tx Go command on the list, otherwise it
1079 * sets the forward address of the previous list to point 1010 * sets the forward address of the previous list to point
1080 * to this one. Then it frees the sk_buff. 1011 * to this one. Then it frees the sk_buff.
1081 * 1012 *
1082 **************************************************************/ 1013 **************************************************************/
1083 1014
1084static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev ) 1015static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev)
1085{ 1016{
1086 TLanPrivateInfo *priv = netdev_priv(dev); 1017 struct tlan_priv *priv = netdev_priv(dev);
1087 dma_addr_t tail_list_phys; 1018 dma_addr_t tail_list_phys;
1088 TLanList *tail_list; 1019 struct tlan_list *tail_list;
1089 unsigned long flags; 1020 unsigned long flags;
1090 unsigned int txlen; 1021 unsigned int txlen;
1091 1022
1092 if ( ! priv->phyOnline ) { 1023 if (!priv->phy_online) {
1093 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n", 1024 TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
1094 dev->name ); 1025 dev->name);
1095 dev_kfree_skb_any(skb); 1026 dev_kfree_skb_any(skb);
1096 return NETDEV_TX_OK; 1027 return NETDEV_TX_OK;
1097 } 1028 }
@@ -1100,218 +1031,214 @@ static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
1100 return NETDEV_TX_OK; 1031 return NETDEV_TX_OK;
1101 txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE); 1032 txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
1102 1033
1103 tail_list = priv->txList + priv->txTail; 1034 tail_list = priv->tx_list + priv->tx_tail;
1104 tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail; 1035 tail_list_phys =
1036 priv->tx_list_dma + sizeof(struct tlan_list)*priv->tx_tail;
1105 1037
1106 if ( tail_list->cStat != TLAN_CSTAT_UNUSED ) { 1038 if (tail_list->c_stat != TLAN_CSTAT_UNUSED) {
1107 TLAN_DBG( TLAN_DEBUG_TX, 1039 TLAN_DBG(TLAN_DEBUG_TX,
1108 "TRANSMIT: %s is busy (Head=%d Tail=%d)\n", 1040 "TRANSMIT: %s is busy (Head=%d Tail=%d)\n",
1109 dev->name, priv->txHead, priv->txTail ); 1041 dev->name, priv->tx_head, priv->tx_tail);
1110 netif_stop_queue(dev); 1042 netif_stop_queue(dev);
1111 priv->txBusyCount++; 1043 priv->tx_busy_count++;
1112 return NETDEV_TX_BUSY; 1044 return NETDEV_TX_BUSY;
1113 } 1045 }
1114 1046
1115 tail_list->forward = 0; 1047 tail_list->forward = 0;
1116 1048
1117 tail_list->buffer[0].address = pci_map_single(priv->pciDev, 1049 tail_list->buffer[0].address = pci_map_single(priv->pci_dev,
1118 skb->data, txlen, 1050 skb->data, txlen,
1119 PCI_DMA_TODEVICE); 1051 PCI_DMA_TODEVICE);
1120 TLan_StoreSKB(tail_list, skb); 1052 tlan_store_skb(tail_list, skb);
1121 1053
1122 tail_list->frameSize = (u16) txlen; 1054 tail_list->frame_size = (u16) txlen;
1123 tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen; 1055 tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
1124 tail_list->buffer[1].count = 0; 1056 tail_list->buffer[1].count = 0;
1125 tail_list->buffer[1].address = 0; 1057 tail_list->buffer[1].address = 0;
1126 1058
1127 spin_lock_irqsave(&priv->lock, flags); 1059 spin_lock_irqsave(&priv->lock, flags);
1128 tail_list->cStat = TLAN_CSTAT_READY; 1060 tail_list->c_stat = TLAN_CSTAT_READY;
1129 if ( ! priv->txInProgress ) { 1061 if (!priv->tx_in_progress) {
1130 priv->txInProgress = 1; 1062 priv->tx_in_progress = 1;
1131 TLAN_DBG( TLAN_DEBUG_TX, 1063 TLAN_DBG(TLAN_DEBUG_TX,
1132 "TRANSMIT: Starting TX on buffer %d\n", priv->txTail ); 1064 "TRANSMIT: Starting TX on buffer %d\n",
1133 outl( tail_list_phys, dev->base_addr + TLAN_CH_PARM ); 1065 priv->tx_tail);
1134 outl( TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD ); 1066 outl(tail_list_phys, dev->base_addr + TLAN_CH_PARM);
1067 outl(TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD);
1135 } else { 1068 } else {
1136 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Adding buffer %d to TX channel\n", 1069 TLAN_DBG(TLAN_DEBUG_TX,
1137 priv->txTail ); 1070 "TRANSMIT: Adding buffer %d to TX channel\n",
1138 if ( priv->txTail == 0 ) { 1071 priv->tx_tail);
1139 ( priv->txList + ( TLAN_NUM_TX_LISTS - 1 ) )->forward 1072 if (priv->tx_tail == 0) {
1073 (priv->tx_list + (TLAN_NUM_TX_LISTS - 1))->forward
1140 = tail_list_phys; 1074 = tail_list_phys;
1141 } else { 1075 } else {
1142 ( priv->txList + ( priv->txTail - 1 ) )->forward 1076 (priv->tx_list + (priv->tx_tail - 1))->forward
1143 = tail_list_phys; 1077 = tail_list_phys;
1144 } 1078 }
1145 } 1079 }
1146 spin_unlock_irqrestore(&priv->lock, flags); 1080 spin_unlock_irqrestore(&priv->lock, flags);
1147 1081
1148 CIRC_INC( priv->txTail, TLAN_NUM_TX_LISTS ); 1082 CIRC_INC(priv->tx_tail, TLAN_NUM_TX_LISTS);
1149 1083
1150 return NETDEV_TX_OK; 1084 return NETDEV_TX_OK;
1151 1085
1152} /* TLan_StartTx */ 1086}
1153 1087
1154 1088
1155 1089
1156 1090
1157 /*************************************************************** 1091/***************************************************************
1158 * TLan_HandleInterrupt 1092 * tlan_handle_interrupt
1159 * 1093 *
1160 * Returns: 1094 * Returns:
1161 * Nothing 1095 * Nothing
1162 * Parms: 1096 * Parms:
1163 * irq The line on which the interrupt 1097 * irq The line on which the interrupt
1164 * occurred. 1098 * occurred.
1165 * dev_id A pointer to the device assigned to 1099 * dev_id A pointer to the device assigned to
1166 * this irq line. 1100 * this irq line.
1167 * 1101 *
1168 * This function handles an interrupt generated by its 1102 * This function handles an interrupt generated by its
1169 * assigned TLAN adapter. The function deactivates 1103 * assigned TLAN adapter. The function deactivates
1170 * interrupts on its adapter, records the type of 1104 * interrupts on its adapter, records the type of
1171 * interrupt, executes the appropriate subhandler, and 1105 * interrupt, executes the appropriate subhandler, and
1172 * acknowdges the interrupt to the adapter (thus 1106 * acknowdges the interrupt to the adapter (thus
1173 * re-enabling adapter interrupts. 1107 * re-enabling adapter interrupts.
1174 * 1108 *
1175 **************************************************************/ 1109 **************************************************************/
1176 1110
1177static irqreturn_t TLan_HandleInterrupt(int irq, void *dev_id) 1111static irqreturn_t tlan_handle_interrupt(int irq, void *dev_id)
1178{ 1112{
1179 struct net_device *dev = dev_id; 1113 struct net_device *dev = dev_id;
1180 TLanPrivateInfo *priv = netdev_priv(dev); 1114 struct tlan_priv *priv = netdev_priv(dev);
1181 u16 host_int; 1115 u16 host_int;
1182 u16 type; 1116 u16 type;
1183 1117
1184 spin_lock(&priv->lock); 1118 spin_lock(&priv->lock);
1185 1119
1186 host_int = inw( dev->base_addr + TLAN_HOST_INT ); 1120 host_int = inw(dev->base_addr + TLAN_HOST_INT);
1187 type = ( host_int & TLAN_HI_IT_MASK ) >> 2; 1121 type = (host_int & TLAN_HI_IT_MASK) >> 2;
1188 if ( type ) { 1122 if (type) {
1189 u32 ack; 1123 u32 ack;
1190 u32 host_cmd; 1124 u32 host_cmd;
1191 1125
1192 outw( host_int, dev->base_addr + TLAN_HOST_INT ); 1126 outw(host_int, dev->base_addr + TLAN_HOST_INT);
1193 ack = TLanIntVector[type]( dev, host_int ); 1127 ack = tlan_int_vector[type](dev, host_int);
1194 1128
1195 if ( ack ) { 1129 if (ack) {
1196 host_cmd = TLAN_HC_ACK | ack | ( type << 18 ); 1130 host_cmd = TLAN_HC_ACK | ack | (type << 18);
1197 outl( host_cmd, dev->base_addr + TLAN_HOST_CMD ); 1131 outl(host_cmd, dev->base_addr + TLAN_HOST_CMD);
1198 } 1132 }
1199 } 1133 }
1200 1134
1201 spin_unlock(&priv->lock); 1135 spin_unlock(&priv->lock);
1202 1136
1203 return IRQ_RETVAL(type); 1137 return IRQ_RETVAL(type);
1204} /* TLan_HandleInterrupts */ 1138}
1205 1139
1206 1140
1207 1141
1208 1142
1209 /*************************************************************** 1143/***************************************************************
1210 * TLan_Close 1144 * tlan_close
1211 * 1145 *
1212 * Returns: 1146 * Returns:
1213 * An error code. 1147 * An error code.
1214 * Parms: 1148 * Parms:
1215 * dev The device structure of the device to 1149 * dev The device structure of the device to
1216 * close. 1150 * close.
1217 * 1151 *
1218 * This function shuts down the adapter. It records any 1152 * This function shuts down the adapter. It records any
1219 * stats, puts the adapter into reset state, deactivates 1153 * stats, puts the adapter into reset state, deactivates
1220 * its time as needed, and frees the irq it is using. 1154 * its time as needed, and frees the irq it is using.
1221 * 1155 *
1222 **************************************************************/ 1156 **************************************************************/
1223 1157
1224static int TLan_Close(struct net_device *dev) 1158static int tlan_close(struct net_device *dev)
1225{ 1159{
1226 TLanPrivateInfo *priv = netdev_priv(dev); 1160 struct tlan_priv *priv = netdev_priv(dev);
1227 1161
1228 netif_stop_queue(dev);
1229 priv->neg_be_verbose = 0; 1162 priv->neg_be_verbose = 0;
1163 tlan_stop(dev);
1230 1164
1231 TLan_ReadAndClearStats( dev, TLAN_RECORD ); 1165 free_irq(dev->irq, dev);
1232 outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD ); 1166 tlan_free_lists(dev);
1233 if ( priv->timer.function != NULL ) { 1167 TLAN_DBG(TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name);
1234 del_timer_sync( &priv->timer );
1235 priv->timer.function = NULL;
1236 }
1237
1238 free_irq( dev->irq, dev );
1239 TLan_FreeLists( dev );
1240 TLAN_DBG( TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name );
1241 1168
1242 return 0; 1169 return 0;
1243 1170
1244} /* TLan_Close */ 1171}
1245 1172
1246 1173
1247 1174
1248 1175
1249 /*************************************************************** 1176/***************************************************************
1250 * TLan_GetStats 1177 * tlan_get_stats
1251 * 1178 *
1252 * Returns: 1179 * Returns:
1253 * A pointer to the device's statistics structure. 1180 * A pointer to the device's statistics structure.
1254 * Parms: 1181 * Parms:
1255 * dev The device structure to return the 1182 * dev The device structure to return the
1256 * stats for. 1183 * stats for.
1257 * 1184 *
1258 * This function updates the devices statistics by reading 1185 * This function updates the devices statistics by reading
1259 * the TLAN chip's onboard registers. Then it returns the 1186 * the TLAN chip's onboard registers. Then it returns the
1260 * address of the statistics structure. 1187 * address of the statistics structure.
1261 * 1188 *
1262 **************************************************************/ 1189 **************************************************************/
1263 1190
1264static struct net_device_stats *TLan_GetStats( struct net_device *dev ) 1191static struct net_device_stats *tlan_get_stats(struct net_device *dev)
1265{ 1192{
1266 TLanPrivateInfo *priv = netdev_priv(dev); 1193 struct tlan_priv *priv = netdev_priv(dev);
1267 int i; 1194 int i;
1268 1195
1269 /* Should only read stats if open ? */ 1196 /* Should only read stats if open ? */
1270 TLan_ReadAndClearStats( dev, TLAN_RECORD ); 1197 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1271 1198
1272 TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name, 1199 TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name,
1273 priv->rxEocCount ); 1200 priv->rx_eoc_count);
1274 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name, 1201 TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name,
1275 priv->txBusyCount ); 1202 priv->tx_busy_count);
1276 if ( debug & TLAN_DEBUG_GNRL ) { 1203 if (debug & TLAN_DEBUG_GNRL) {
1277 TLan_PrintDio( dev->base_addr ); 1204 tlan_print_dio(dev->base_addr);
1278 TLan_PhyPrint( dev ); 1205 tlan_phy_print(dev);
1279 } 1206 }
1280 if ( debug & TLAN_DEBUG_LIST ) { 1207 if (debug & TLAN_DEBUG_LIST) {
1281 for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) 1208 for (i = 0; i < TLAN_NUM_RX_LISTS; i++)
1282 TLan_PrintList( priv->rxList + i, "RX", i ); 1209 tlan_print_list(priv->rx_list + i, "RX", i);
1283 for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) 1210 for (i = 0; i < TLAN_NUM_TX_LISTS; i++)
1284 TLan_PrintList( priv->txList + i, "TX", i ); 1211 tlan_print_list(priv->tx_list + i, "TX", i);
1285 } 1212 }
1286 1213
1287 return &dev->stats; 1214 return &dev->stats;
1288 1215
1289} /* TLan_GetStats */ 1216}
1290 1217
1291 1218
1292 1219
1293 1220
1294 /*************************************************************** 1221/***************************************************************
1295 * TLan_SetMulticastList 1222 * tlan_set_multicast_list
1296 * 1223 *
1297 * Returns: 1224 * Returns:
1298 * Nothing 1225 * Nothing
1299 * Parms: 1226 * Parms:
1300 * dev The device structure to set the 1227 * dev The device structure to set the
1301 * multicast list for. 1228 * multicast list for.
1302 * 1229 *
1303 * This function sets the TLAN adaptor to various receive 1230 * This function sets the TLAN adaptor to various receive
1304 * modes. If the IFF_PROMISC flag is set, promiscuous 1231 * modes. If the IFF_PROMISC flag is set, promiscuous
1305 * mode is acitviated. Otherwise, promiscuous mode is 1232 * mode is acitviated. Otherwise, promiscuous mode is
1306 * turned off. If the IFF_ALLMULTI flag is set, then 1233 * turned off. If the IFF_ALLMULTI flag is set, then
1307 * the hash table is set to receive all group addresses. 1234 * the hash table is set to receive all group addresses.
1308 * Otherwise, the first three multicast addresses are 1235 * Otherwise, the first three multicast addresses are
1309 * stored in AREG_1-3, and the rest are selected via the 1236 * stored in AREG_1-3, and the rest are selected via the
1310 * hash table, as necessary. 1237 * hash table, as necessary.
1311 * 1238 *
1312 **************************************************************/ 1239 **************************************************************/
1313 1240
1314static void TLan_SetMulticastList( struct net_device *dev ) 1241static void tlan_set_multicast_list(struct net_device *dev)
1315{ 1242{
1316 struct netdev_hw_addr *ha; 1243 struct netdev_hw_addr *ha;
1317 u32 hash1 = 0; 1244 u32 hash1 = 0;
@@ -1320,53 +1247,56 @@ static void TLan_SetMulticastList( struct net_device *dev )
1320 u32 offset; 1247 u32 offset;
1321 u8 tmp; 1248 u8 tmp;
1322 1249
1323 if ( dev->flags & IFF_PROMISC ) { 1250 if (dev->flags & IFF_PROMISC) {
1324 tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD ); 1251 tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
1325 TLan_DioWrite8( dev->base_addr, 1252 tlan_dio_write8(dev->base_addr,
1326 TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF ); 1253 TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF);
1327 } else { 1254 } else {
1328 tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD ); 1255 tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
1329 TLan_DioWrite8( dev->base_addr, 1256 tlan_dio_write8(dev->base_addr,
1330 TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF ); 1257 TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF);
1331 if ( dev->flags & IFF_ALLMULTI ) { 1258 if (dev->flags & IFF_ALLMULTI) {
1332 for ( i = 0; i < 3; i++ ) 1259 for (i = 0; i < 3; i++)
1333 TLan_SetMac( dev, i + 1, NULL ); 1260 tlan_set_mac(dev, i + 1, NULL);
1334 TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, 0xFFFFFFFF ); 1261 tlan_dio_write32(dev->base_addr, TLAN_HASH_1,
1335 TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF ); 1262 0xffffffff);
1263 tlan_dio_write32(dev->base_addr, TLAN_HASH_2,
1264 0xffffffff);
1336 } else { 1265 } else {
1337 i = 0; 1266 i = 0;
1338 netdev_for_each_mc_addr(ha, dev) { 1267 netdev_for_each_mc_addr(ha, dev) {
1339 if ( i < 3 ) { 1268 if (i < 3) {
1340 TLan_SetMac( dev, i + 1, 1269 tlan_set_mac(dev, i + 1,
1341 (char *) &ha->addr); 1270 (char *) &ha->addr);
1342 } else { 1271 } else {
1343 offset = TLan_HashFunc((u8 *)&ha->addr); 1272 offset =
1344 if ( offset < 32 ) 1273 tlan_hash_func((u8 *)&ha->addr);
1345 hash1 |= ( 1 << offset ); 1274 if (offset < 32)
1275 hash1 |= (1 << offset);
1346 else 1276 else
1347 hash2 |= ( 1 << ( offset - 32 ) ); 1277 hash2 |= (1 << (offset - 32));
1348 } 1278 }
1349 i++; 1279 i++;
1350 } 1280 }
1351 for ( ; i < 3; i++ ) 1281 for ( ; i < 3; i++)
1352 TLan_SetMac( dev, i + 1, NULL ); 1282 tlan_set_mac(dev, i + 1, NULL);
1353 TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, hash1 ); 1283 tlan_dio_write32(dev->base_addr, TLAN_HASH_1, hash1);
1354 TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, hash2 ); 1284 tlan_dio_write32(dev->base_addr, TLAN_HASH_2, hash2);
1355 } 1285 }
1356 } 1286 }
1357 1287
1358} /* TLan_SetMulticastList */ 1288}
1359 1289
1360 1290
1361 1291
1362/***************************************************************************** 1292/*****************************************************************************
1363****************************************************************************** 1293******************************************************************************
1364 1294
1365 ThunderLAN Driver Interrupt Vectors and Table 1295ThunderLAN driver interrupt vectors and table
1366 1296
1367 Please see Chap. 4, "Interrupt Handling" of the "ThunderLAN 1297please see chap. 4, "Interrupt Handling" of the "ThunderLAN
1368 Programmer's Guide" for more informations on handling interrupts 1298Programmer's Guide" for more informations on handling interrupts
1369 generated by TLAN based adapters. 1299generated by TLAN based adapters.
1370 1300
1371****************************************************************************** 1301******************************************************************************
1372*****************************************************************************/ 1302*****************************************************************************/
@@ -1374,46 +1304,48 @@ static void TLan_SetMulticastList( struct net_device *dev )
1374 1304
1375 1305
1376 1306
1377 /*************************************************************** 1307/***************************************************************
1378 * TLan_HandleTxEOF 1308 * tlan_handle_tx_eof
1379 * 1309 *
1380 * Returns: 1310 * Returns:
1381 * 1 1311 * 1
1382 * Parms: 1312 * Parms:
1383 * dev Device assigned the IRQ that was 1313 * dev Device assigned the IRQ that was
1384 * raised. 1314 * raised.
1385 * host_int The contents of the HOST_INT 1315 * host_int The contents of the HOST_INT
1386 * port. 1316 * port.
1387 * 1317 *
1388 * This function handles Tx EOF interrupts which are raised 1318 * This function handles Tx EOF interrupts which are raised
1389 * by the adapter when it has completed sending the 1319 * by the adapter when it has completed sending the
1390 * contents of a buffer. If detemines which list/buffer 1320 * contents of a buffer. If detemines which list/buffer
1391 * was completed and resets it. If the buffer was the last 1321 * was completed and resets it. If the buffer was the last
1392 * in the channel (EOC), then the function checks to see if 1322 * in the channel (EOC), then the function checks to see if
1393 * another buffer is ready to send, and if so, sends a Tx 1323 * another buffer is ready to send, and if so, sends a Tx
1394 * Go command. Finally, the driver activates/continues the 1324 * Go command. Finally, the driver activates/continues the
1395 * activity LED. 1325 * activity LED.
1396 * 1326 *
1397 **************************************************************/ 1327 **************************************************************/
1398 1328
1399static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int ) 1329static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
1400{ 1330{
1401 TLanPrivateInfo *priv = netdev_priv(dev); 1331 struct tlan_priv *priv = netdev_priv(dev);
1402 int eoc = 0; 1332 int eoc = 0;
1403 TLanList *head_list; 1333 struct tlan_list *head_list;
1404 dma_addr_t head_list_phys; 1334 dma_addr_t head_list_phys;
1405 u32 ack = 0; 1335 u32 ack = 0;
1406 u16 tmpCStat; 1336 u16 tmp_c_stat;
1407 1337
1408 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n", 1338 TLAN_DBG(TLAN_DEBUG_TX,
1409 priv->txHead, priv->txTail ); 1339 "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n",
1410 head_list = priv->txList + priv->txHead; 1340 priv->tx_head, priv->tx_tail);
1341 head_list = priv->tx_list + priv->tx_head;
1411 1342
1412 while (((tmpCStat = head_list->cStat ) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) { 1343 while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
1413 struct sk_buff *skb = TLan_GetSKB(head_list); 1344 && (ack < 255)) {
1345 struct sk_buff *skb = tlan_get_skb(head_list);
1414 1346
1415 ack++; 1347 ack++;
1416 pci_unmap_single(priv->pciDev, head_list->buffer[0].address, 1348 pci_unmap_single(priv->pci_dev, head_list->buffer[0].address,
1417 max(skb->len, 1349 max(skb->len,
1418 (unsigned int)TLAN_MIN_FRAME_SIZE), 1350 (unsigned int)TLAN_MIN_FRAME_SIZE),
1419 PCI_DMA_TODEVICE); 1351 PCI_DMA_TODEVICE);
@@ -1421,304 +1353,313 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
1421 head_list->buffer[8].address = 0; 1353 head_list->buffer[8].address = 0;
1422 head_list->buffer[9].address = 0; 1354 head_list->buffer[9].address = 0;
1423 1355
1424 if ( tmpCStat & TLAN_CSTAT_EOC ) 1356 if (tmp_c_stat & TLAN_CSTAT_EOC)
1425 eoc = 1; 1357 eoc = 1;
1426 1358
1427 dev->stats.tx_bytes += head_list->frameSize; 1359 dev->stats.tx_bytes += head_list->frame_size;
1428 1360
1429 head_list->cStat = TLAN_CSTAT_UNUSED; 1361 head_list->c_stat = TLAN_CSTAT_UNUSED;
1430 netif_start_queue(dev); 1362 netif_start_queue(dev);
1431 CIRC_INC( priv->txHead, TLAN_NUM_TX_LISTS ); 1363 CIRC_INC(priv->tx_head, TLAN_NUM_TX_LISTS);
1432 head_list = priv->txList + priv->txHead; 1364 head_list = priv->tx_list + priv->tx_head;
1433 } 1365 }
1434 1366
1435 if (!ack) 1367 if (!ack)
1436 printk(KERN_INFO "TLAN: Received interrupt for uncompleted TX frame.\n"); 1368 netdev_info(dev,
1437 1369 "Received interrupt for uncompleted TX frame\n");
1438 if ( eoc ) { 1370
1439 TLAN_DBG( TLAN_DEBUG_TX, 1371 if (eoc) {
1440 "TRANSMIT: Handling TX EOC (Head=%d Tail=%d)\n", 1372 TLAN_DBG(TLAN_DEBUG_TX,
1441 priv->txHead, priv->txTail ); 1373 "TRANSMIT: handling TX EOC (Head=%d Tail=%d)\n",
1442 head_list = priv->txList + priv->txHead; 1374 priv->tx_head, priv->tx_tail);
1443 head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead; 1375 head_list = priv->tx_list + priv->tx_head;
1444 if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) { 1376 head_list_phys = priv->tx_list_dma
1445 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM ); 1377 + sizeof(struct tlan_list)*priv->tx_head;
1378 if ((head_list->c_stat & TLAN_CSTAT_READY)
1379 == TLAN_CSTAT_READY) {
1380 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1446 ack |= TLAN_HC_GO; 1381 ack |= TLAN_HC_GO;
1447 } else { 1382 } else {
1448 priv->txInProgress = 0; 1383 priv->tx_in_progress = 0;
1449 } 1384 }
1450 } 1385 }
1451 1386
1452 if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) { 1387 if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
1453 TLan_DioWrite8( dev->base_addr, 1388 tlan_dio_write8(dev->base_addr,
1454 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT ); 1389 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
1455 if ( priv->timer.function == NULL ) { 1390 if (priv->timer.function == NULL) {
1456 priv->timer.function = TLan_Timer; 1391 priv->timer.function = tlan_timer;
1457 priv->timer.data = (unsigned long) dev; 1392 priv->timer.data = (unsigned long) dev;
1458 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY; 1393 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
1459 priv->timerSetAt = jiffies; 1394 priv->timer_set_at = jiffies;
1460 priv->timerType = TLAN_TIMER_ACTIVITY; 1395 priv->timer_type = TLAN_TIMER_ACTIVITY;
1461 add_timer(&priv->timer); 1396 add_timer(&priv->timer);
1462 } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) { 1397 } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
1463 priv->timerSetAt = jiffies; 1398 priv->timer_set_at = jiffies;
1464 } 1399 }
1465 } 1400 }
1466 1401
1467 return ack; 1402 return ack;
1468 1403
1469} /* TLan_HandleTxEOF */ 1404}
1470 1405
1471 1406
1472 1407
1473 1408
1474 /*************************************************************** 1409/***************************************************************
1475 * TLan_HandleStatOverflow 1410 * TLan_HandleStatOverflow
1476 * 1411 *
1477 * Returns: 1412 * Returns:
1478 * 1 1413 * 1
1479 * Parms: 1414 * Parms:
1480 * dev Device assigned the IRQ that was 1415 * dev Device assigned the IRQ that was
1481 * raised. 1416 * raised.
1482 * host_int The contents of the HOST_INT 1417 * host_int The contents of the HOST_INT
1483 * port. 1418 * port.
1484 * 1419 *
1485 * This function handles the Statistics Overflow interrupt 1420 * This function handles the Statistics Overflow interrupt
1486 * which means that one or more of the TLAN statistics 1421 * which means that one or more of the TLAN statistics
1487 * registers has reached 1/2 capacity and needs to be read. 1422 * registers has reached 1/2 capacity and needs to be read.
1488 * 1423 *
1489 **************************************************************/ 1424 **************************************************************/
1490 1425
1491static u32 TLan_HandleStatOverflow( struct net_device *dev, u16 host_int ) 1426static u32 tlan_handle_stat_overflow(struct net_device *dev, u16 host_int)
1492{ 1427{
1493 TLan_ReadAndClearStats( dev, TLAN_RECORD ); 1428 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1494 1429
1495 return 1; 1430 return 1;
1496 1431
1497} /* TLan_HandleStatOverflow */ 1432}
1498 1433
1499 1434
1500 1435
1501 1436
1502 /*************************************************************** 1437/***************************************************************
1503 * TLan_HandleRxEOF 1438 * TLan_HandleRxEOF
1504 * 1439 *
1505 * Returns: 1440 * Returns:
1506 * 1 1441 * 1
1507 * Parms: 1442 * Parms:
1508 * dev Device assigned the IRQ that was 1443 * dev Device assigned the IRQ that was
1509 * raised. 1444 * raised.
1510 * host_int The contents of the HOST_INT 1445 * host_int The contents of the HOST_INT
1511 * port. 1446 * port.
1512 * 1447 *
1513 * This function handles the Rx EOF interrupt which 1448 * This function handles the Rx EOF interrupt which
1514 * indicates a frame has been received by the adapter from 1449 * indicates a frame has been received by the adapter from
1515 * the net and the frame has been transferred to memory. 1450 * the net and the frame has been transferred to memory.
1516 * The function determines the bounce buffer the frame has 1451 * The function determines the bounce buffer the frame has
1517 * been loaded into, creates a new sk_buff big enough to 1452 * been loaded into, creates a new sk_buff big enough to
1518 * hold the frame, and sends it to protocol stack. It 1453 * hold the frame, and sends it to protocol stack. It
1519 * then resets the used buffer and appends it to the end 1454 * then resets the used buffer and appends it to the end
1520 * of the list. If the frame was the last in the Rx 1455 * of the list. If the frame was the last in the Rx
1521 * channel (EOC), the function restarts the receive channel 1456 * channel (EOC), the function restarts the receive channel
1522 * by sending an Rx Go command to the adapter. Then it 1457 * by sending an Rx Go command to the adapter. Then it
1523 * activates/continues the activity LED. 1458 * activates/continues the activity LED.
1524 * 1459 *
1525 **************************************************************/ 1460 **************************************************************/
1526 1461
1527static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int ) 1462static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int)
1528{ 1463{
1529 TLanPrivateInfo *priv = netdev_priv(dev); 1464 struct tlan_priv *priv = netdev_priv(dev);
1530 u32 ack = 0; 1465 u32 ack = 0;
1531 int eoc = 0; 1466 int eoc = 0;
1532 TLanList *head_list; 1467 struct tlan_list *head_list;
1533 struct sk_buff *skb; 1468 struct sk_buff *skb;
1534 TLanList *tail_list; 1469 struct tlan_list *tail_list;
1535 u16 tmpCStat; 1470 u16 tmp_c_stat;
1536 dma_addr_t head_list_phys; 1471 dma_addr_t head_list_phys;
1537 1472
1538 TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOF (Head=%d Tail=%d)\n", 1473 TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: handling RX EOF (Head=%d Tail=%d)\n",
1539 priv->rxHead, priv->rxTail ); 1474 priv->rx_head, priv->rx_tail);
1540 head_list = priv->rxList + priv->rxHead; 1475 head_list = priv->rx_list + priv->rx_head;
1541 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; 1476 head_list_phys =
1477 priv->rx_list_dma + sizeof(struct tlan_list)*priv->rx_head;
1542 1478
1543 while (((tmpCStat = head_list->cStat) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) { 1479 while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
1544 dma_addr_t frameDma = head_list->buffer[0].address; 1480 && (ack < 255)) {
1545 u32 frameSize = head_list->frameSize; 1481 dma_addr_t frame_dma = head_list->buffer[0].address;
1482 u32 frame_size = head_list->frame_size;
1546 struct sk_buff *new_skb; 1483 struct sk_buff *new_skb;
1547 1484
1548 ack++; 1485 ack++;
1549 if (tmpCStat & TLAN_CSTAT_EOC) 1486 if (tmp_c_stat & TLAN_CSTAT_EOC)
1550 eoc = 1; 1487 eoc = 1;
1551 1488
1552 new_skb = netdev_alloc_skb_ip_align(dev, 1489 new_skb = netdev_alloc_skb_ip_align(dev,
1553 TLAN_MAX_FRAME_SIZE + 5); 1490 TLAN_MAX_FRAME_SIZE + 5);
1554 if ( !new_skb ) 1491 if (!new_skb)
1555 goto drop_and_reuse; 1492 goto drop_and_reuse;
1556 1493
1557 skb = TLan_GetSKB(head_list); 1494 skb = tlan_get_skb(head_list);
1558 pci_unmap_single(priv->pciDev, frameDma, 1495 pci_unmap_single(priv->pci_dev, frame_dma,
1559 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); 1496 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1560 skb_put( skb, frameSize ); 1497 skb_put(skb, frame_size);
1561 1498
1562 dev->stats.rx_bytes += frameSize; 1499 dev->stats.rx_bytes += frame_size;
1563 1500
1564 skb->protocol = eth_type_trans( skb, dev ); 1501 skb->protocol = eth_type_trans(skb, dev);
1565 netif_rx( skb ); 1502 netif_rx(skb);
1566 1503
1567 head_list->buffer[0].address = pci_map_single(priv->pciDev, 1504 head_list->buffer[0].address =
1568 new_skb->data, 1505 pci_map_single(priv->pci_dev, new_skb->data,
1569 TLAN_MAX_FRAME_SIZE, 1506 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1570 PCI_DMA_FROMDEVICE);
1571 1507
1572 TLan_StoreSKB(head_list, new_skb); 1508 tlan_store_skb(head_list, new_skb);
1573drop_and_reuse: 1509drop_and_reuse:
1574 head_list->forward = 0; 1510 head_list->forward = 0;
1575 head_list->cStat = 0; 1511 head_list->c_stat = 0;
1576 tail_list = priv->rxList + priv->rxTail; 1512 tail_list = priv->rx_list + priv->rx_tail;
1577 tail_list->forward = head_list_phys; 1513 tail_list->forward = head_list_phys;
1578 1514
1579 CIRC_INC( priv->rxHead, TLAN_NUM_RX_LISTS ); 1515 CIRC_INC(priv->rx_head, TLAN_NUM_RX_LISTS);
1580 CIRC_INC( priv->rxTail, TLAN_NUM_RX_LISTS ); 1516 CIRC_INC(priv->rx_tail, TLAN_NUM_RX_LISTS);
1581 head_list = priv->rxList + priv->rxHead; 1517 head_list = priv->rx_list + priv->rx_head;
1582 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; 1518 head_list_phys = priv->rx_list_dma
1519 + sizeof(struct tlan_list)*priv->rx_head;
1583 } 1520 }
1584 1521
1585 if (!ack) 1522 if (!ack)
1586 printk(KERN_INFO "TLAN: Received interrupt for uncompleted RX frame.\n"); 1523 netdev_info(dev,
1587 1524 "Received interrupt for uncompleted RX frame\n");
1588 1525
1589 if ( eoc ) { 1526
1590 TLAN_DBG( TLAN_DEBUG_RX, 1527 if (eoc) {
1591 "RECEIVE: Handling RX EOC (Head=%d Tail=%d)\n", 1528 TLAN_DBG(TLAN_DEBUG_RX,
1592 priv->rxHead, priv->rxTail ); 1529 "RECEIVE: handling RX EOC (Head=%d Tail=%d)\n",
1593 head_list = priv->rxList + priv->rxHead; 1530 priv->rx_head, priv->rx_tail);
1594 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; 1531 head_list = priv->rx_list + priv->rx_head;
1595 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM ); 1532 head_list_phys = priv->rx_list_dma
1533 + sizeof(struct tlan_list)*priv->rx_head;
1534 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1596 ack |= TLAN_HC_GO | TLAN_HC_RT; 1535 ack |= TLAN_HC_GO | TLAN_HC_RT;
1597 priv->rxEocCount++; 1536 priv->rx_eoc_count++;
1598 } 1537 }
1599 1538
1600 if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) { 1539 if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
1601 TLan_DioWrite8( dev->base_addr, 1540 tlan_dio_write8(dev->base_addr,
1602 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT ); 1541 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
1603 if ( priv->timer.function == NULL ) { 1542 if (priv->timer.function == NULL) {
1604 priv->timer.function = TLan_Timer; 1543 priv->timer.function = tlan_timer;
1605 priv->timer.data = (unsigned long) dev; 1544 priv->timer.data = (unsigned long) dev;
1606 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY; 1545 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
1607 priv->timerSetAt = jiffies; 1546 priv->timer_set_at = jiffies;
1608 priv->timerType = TLAN_TIMER_ACTIVITY; 1547 priv->timer_type = TLAN_TIMER_ACTIVITY;
1609 add_timer(&priv->timer); 1548 add_timer(&priv->timer);
1610 } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) { 1549 } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
1611 priv->timerSetAt = jiffies; 1550 priv->timer_set_at = jiffies;
1612 } 1551 }
1613 } 1552 }
1614 1553
1615 return ack; 1554 return ack;
1616 1555
1617} /* TLan_HandleRxEOF */ 1556}
1618 1557
1619 1558
1620 1559
1621 1560
1622 /*************************************************************** 1561/***************************************************************
1623 * TLan_HandleDummy 1562 * tlan_handle_dummy
1624 * 1563 *
1625 * Returns: 1564 * Returns:
1626 * 1 1565 * 1
1627 * Parms: 1566 * Parms:
1628 * dev Device assigned the IRQ that was 1567 * dev Device assigned the IRQ that was
1629 * raised. 1568 * raised.
1630 * host_int The contents of the HOST_INT 1569 * host_int The contents of the HOST_INT
1631 * port. 1570 * port.
1632 * 1571 *
1633 * This function handles the Dummy interrupt, which is 1572 * This function handles the Dummy interrupt, which is
1634 * raised whenever a test interrupt is generated by setting 1573 * raised whenever a test interrupt is generated by setting
1635 * the Req_Int bit of HOST_CMD to 1. 1574 * the Req_Int bit of HOST_CMD to 1.
1636 * 1575 *
1637 **************************************************************/ 1576 **************************************************************/
1638 1577
1639static u32 TLan_HandleDummy( struct net_device *dev, u16 host_int ) 1578static u32 tlan_handle_dummy(struct net_device *dev, u16 host_int)
1640{ 1579{
1641 printk( "TLAN: Test interrupt on %s.\n", dev->name ); 1580 netdev_info(dev, "Test interrupt\n");
1642 return 1; 1581 return 1;
1643 1582
1644} /* TLan_HandleDummy */ 1583}
1645 1584
1646 1585
1647 1586
1648 1587
1649 /*************************************************************** 1588/***************************************************************
1650 * TLan_HandleTxEOC 1589 * tlan_handle_tx_eoc
1651 * 1590 *
1652 * Returns: 1591 * Returns:
1653 * 1 1592 * 1
1654 * Parms: 1593 * Parms:
1655 * dev Device assigned the IRQ that was 1594 * dev Device assigned the IRQ that was
1656 * raised. 1595 * raised.
1657 * host_int The contents of the HOST_INT 1596 * host_int The contents of the HOST_INT
1658 * port. 1597 * port.
1659 * 1598 *
1660 * This driver is structured to determine EOC occurrences by 1599 * This driver is structured to determine EOC occurrences by
1661 * reading the CSTAT member of the list structure. Tx EOC 1600 * reading the CSTAT member of the list structure. Tx EOC
1662 * interrupts are disabled via the DIO INTDIS register. 1601 * interrupts are disabled via the DIO INTDIS register.
1663 * However, TLAN chips before revision 3.0 didn't have this 1602 * However, TLAN chips before revision 3.0 didn't have this
1664 * functionality, so process EOC events if this is the 1603 * functionality, so process EOC events if this is the
1665 * case. 1604 * case.
1666 * 1605 *
1667 **************************************************************/ 1606 **************************************************************/
1668 1607
1669static u32 TLan_HandleTxEOC( struct net_device *dev, u16 host_int ) 1608static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int)
1670{ 1609{
1671 TLanPrivateInfo *priv = netdev_priv(dev); 1610 struct tlan_priv *priv = netdev_priv(dev);
1672 TLanList *head_list; 1611 struct tlan_list *head_list;
1673 dma_addr_t head_list_phys; 1612 dma_addr_t head_list_phys;
1674 u32 ack = 1; 1613 u32 ack = 1;
1675 1614
1676 host_int = 0; 1615 host_int = 0;
1677 if ( priv->tlanRev < 0x30 ) { 1616 if (priv->tlan_rev < 0x30) {
1678 TLAN_DBG( TLAN_DEBUG_TX, 1617 TLAN_DBG(TLAN_DEBUG_TX,
1679 "TRANSMIT: Handling TX EOC (Head=%d Tail=%d) -- IRQ\n", 1618 "TRANSMIT: handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
1680 priv->txHead, priv->txTail ); 1619 priv->tx_head, priv->tx_tail);
1681 head_list = priv->txList + priv->txHead; 1620 head_list = priv->tx_list + priv->tx_head;
1682 head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead; 1621 head_list_phys = priv->tx_list_dma
1683 if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) { 1622 + sizeof(struct tlan_list)*priv->tx_head;
1623 if ((head_list->c_stat & TLAN_CSTAT_READY)
1624 == TLAN_CSTAT_READY) {
1684 netif_stop_queue(dev); 1625 netif_stop_queue(dev);
1685 outl( head_list_phys, dev->base_addr + TLAN_CH_PARM ); 1626 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1686 ack |= TLAN_HC_GO; 1627 ack |= TLAN_HC_GO;
1687 } else { 1628 } else {
1688 priv->txInProgress = 0; 1629 priv->tx_in_progress = 0;
1689 } 1630 }
1690 } 1631 }
1691 1632
1692 return ack; 1633 return ack;
1693 1634
1694} /* TLan_HandleTxEOC */ 1635}
1695 1636
1696 1637
1697 1638
1698 1639
1699 /*************************************************************** 1640/***************************************************************
1700 * TLan_HandleStatusCheck 1641 * tlan_handle_status_check
1701 * 1642 *
1702 * Returns: 1643 * Returns:
1703 * 0 if Adapter check, 1 if Network Status check. 1644 * 0 if Adapter check, 1 if Network Status check.
1704 * Parms: 1645 * Parms:
1705 * dev Device assigned the IRQ that was 1646 * dev Device assigned the IRQ that was
1706 * raised. 1647 * raised.
1707 * host_int The contents of the HOST_INT 1648 * host_int The contents of the HOST_INT
1708 * port. 1649 * port.
1709 * 1650 *
1710 * This function handles Adapter Check/Network Status 1651 * This function handles Adapter Check/Network Status
1711 * interrupts generated by the adapter. It checks the 1652 * interrupts generated by the adapter. It checks the
1712 * vector in the HOST_INT register to determine if it is 1653 * vector in the HOST_INT register to determine if it is
1713 * an Adapter Check interrupt. If so, it resets the 1654 * an Adapter Check interrupt. If so, it resets the
1714 * adapter. Otherwise it clears the status registers 1655 * adapter. Otherwise it clears the status registers
1715 * and services the PHY. 1656 * and services the PHY.
1716 * 1657 *
1717 **************************************************************/ 1658 **************************************************************/
1718 1659
1719static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int ) 1660static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int)
1720{ 1661{
1721 TLanPrivateInfo *priv = netdev_priv(dev); 1662 struct tlan_priv *priv = netdev_priv(dev);
1722 u32 ack; 1663 u32 ack;
1723 u32 error; 1664 u32 error;
1724 u8 net_sts; 1665 u8 net_sts;
@@ -1727,92 +1668,94 @@ static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int )
1727 u16 tlphy_sts; 1668 u16 tlphy_sts;
1728 1669
1729 ack = 1; 1670 ack = 1;
1730 if ( host_int & TLAN_HI_IV_MASK ) { 1671 if (host_int & TLAN_HI_IV_MASK) {
1731 netif_stop_queue( dev ); 1672 netif_stop_queue(dev);
1732 error = inl( dev->base_addr + TLAN_CH_PARM ); 1673 error = inl(dev->base_addr + TLAN_CH_PARM);
1733 printk( "TLAN: %s: Adaptor Error = 0x%x\n", dev->name, error ); 1674 netdev_info(dev, "Adaptor Error = 0x%x\n", error);
1734 TLan_ReadAndClearStats( dev, TLAN_RECORD ); 1675 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1735 outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD ); 1676 outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
1736 1677
1737 schedule_work(&priv->tlan_tqueue); 1678 schedule_work(&priv->tlan_tqueue);
1738 1679
1739 netif_wake_queue(dev); 1680 netif_wake_queue(dev);
1740 ack = 0; 1681 ack = 0;
1741 } else { 1682 } else {
1742 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name ); 1683 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name);
1743 phy = priv->phy[priv->phyNum]; 1684 phy = priv->phy[priv->phy_num];
1744 1685
1745 net_sts = TLan_DioRead8( dev->base_addr, TLAN_NET_STS ); 1686 net_sts = tlan_dio_read8(dev->base_addr, TLAN_NET_STS);
1746 if ( net_sts ) { 1687 if (net_sts) {
1747 TLan_DioWrite8( dev->base_addr, TLAN_NET_STS, net_sts ); 1688 tlan_dio_write8(dev->base_addr, TLAN_NET_STS, net_sts);
1748 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n", 1689 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n",
1749 dev->name, (unsigned) net_sts ); 1690 dev->name, (unsigned) net_sts);
1750 } 1691 }
1751 if ( ( net_sts & TLAN_NET_STS_MIRQ ) && ( priv->phyNum == 0 ) ) { 1692 if ((net_sts & TLAN_NET_STS_MIRQ) && (priv->phy_num == 0)) {
1752 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_STS, &tlphy_sts ); 1693 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts);
1753 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl ); 1694 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
1754 if ( ! ( tlphy_sts & TLAN_TS_POLOK ) && 1695 if (!(tlphy_sts & TLAN_TS_POLOK) &&
1755 ! ( tlphy_ctl & TLAN_TC_SWAPOL ) ) { 1696 !(tlphy_ctl & TLAN_TC_SWAPOL)) {
1756 tlphy_ctl |= TLAN_TC_SWAPOL; 1697 tlphy_ctl |= TLAN_TC_SWAPOL;
1757 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl); 1698 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
1758 } else if ( ( tlphy_sts & TLAN_TS_POLOK ) && 1699 tlphy_ctl);
1759 ( tlphy_ctl & TLAN_TC_SWAPOL ) ) { 1700 } else if ((tlphy_sts & TLAN_TS_POLOK) &&
1760 tlphy_ctl &= ~TLAN_TC_SWAPOL; 1701 (tlphy_ctl & TLAN_TC_SWAPOL)) {
1761 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl); 1702 tlphy_ctl &= ~TLAN_TC_SWAPOL;
1762 } 1703 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
1763 1704 tlphy_ctl);
1764 if (debug) {
1765 TLan_PhyPrint( dev );
1766 } 1705 }
1706
1707 if (debug)
1708 tlan_phy_print(dev);
1767 } 1709 }
1768 } 1710 }
1769 1711
1770 return ack; 1712 return ack;
1771 1713
1772} /* TLan_HandleStatusCheck */ 1714}
1773 1715
1774 1716
1775 1717
1776 1718
1777 /*************************************************************** 1719/***************************************************************
1778 * TLan_HandleRxEOC 1720 * tlan_handle_rx_eoc
1779 * 1721 *
1780 * Returns: 1722 * Returns:
1781 * 1 1723 * 1
1782 * Parms: 1724 * Parms:
1783 * dev Device assigned the IRQ that was 1725 * dev Device assigned the IRQ that was
1784 * raised. 1726 * raised.
1785 * host_int The contents of the HOST_INT 1727 * host_int The contents of the HOST_INT
1786 * port. 1728 * port.
1787 * 1729 *
1788 * This driver is structured to determine EOC occurrences by 1730 * This driver is structured to determine EOC occurrences by
1789 * reading the CSTAT member of the list structure. Rx EOC 1731 * reading the CSTAT member of the list structure. Rx EOC
1790 * interrupts are disabled via the DIO INTDIS register. 1732 * interrupts are disabled via the DIO INTDIS register.
1791 * However, TLAN chips before revision 3.0 didn't have this 1733 * However, TLAN chips before revision 3.0 didn't have this
1792 * CSTAT member or a INTDIS register, so if this chip is 1734 * CSTAT member or a INTDIS register, so if this chip is
1793 * pre-3.0, process EOC interrupts normally. 1735 * pre-3.0, process EOC interrupts normally.
1794 * 1736 *
1795 **************************************************************/ 1737 **************************************************************/
1796 1738
1797static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int ) 1739static u32 tlan_handle_rx_eoc(struct net_device *dev, u16 host_int)
1798{ 1740{
1799 TLanPrivateInfo *priv = netdev_priv(dev); 1741 struct tlan_priv *priv = netdev_priv(dev);
1800 dma_addr_t head_list_phys; 1742 dma_addr_t head_list_phys;
1801 u32 ack = 1; 1743 u32 ack = 1;
1802 1744
1803 if ( priv->tlanRev < 0x30 ) { 1745 if (priv->tlan_rev < 0x30) {
1804 TLAN_DBG( TLAN_DEBUG_RX, 1746 TLAN_DBG(TLAN_DEBUG_RX,
1805 "RECEIVE: Handling RX EOC (Head=%d Tail=%d) -- IRQ\n", 1747 "RECEIVE: Handling RX EOC (head=%d tail=%d) -- IRQ\n",
1806 priv->rxHead, priv->rxTail ); 1748 priv->rx_head, priv->rx_tail);
1807 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; 1749 head_list_phys = priv->rx_list_dma
1808 outl( head_list_phys, dev->base_addr + TLAN_CH_PARM ); 1750 + sizeof(struct tlan_list)*priv->rx_head;
1751 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1809 ack |= TLAN_HC_GO | TLAN_HC_RT; 1752 ack |= TLAN_HC_GO | TLAN_HC_RT;
1810 priv->rxEocCount++; 1753 priv->rx_eoc_count++;
1811 } 1754 }
1812 1755
1813 return ack; 1756 return ack;
1814 1757
1815} /* TLan_HandleRxEOC */ 1758}
1816 1759
1817 1760
1818 1761
@@ -1820,98 +1763,98 @@ static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int )
1820/***************************************************************************** 1763/*****************************************************************************
1821****************************************************************************** 1764******************************************************************************
1822 1765
1823 ThunderLAN Driver Timer Function 1766ThunderLAN driver timer function
1824 1767
1825****************************************************************************** 1768******************************************************************************
1826*****************************************************************************/ 1769*****************************************************************************/
1827 1770
1828 1771
1829 /*************************************************************** 1772/***************************************************************
1830 * TLan_Timer 1773 * tlan_timer
1831 * 1774 *
1832 * Returns: 1775 * Returns:
1833 * Nothing 1776 * Nothing
1834 * Parms: 1777 * Parms:
1835 * data A value given to add timer when 1778 * data A value given to add timer when
1836 * add_timer was called. 1779 * add_timer was called.
1837 * 1780 *
1838 * This function handles timed functionality for the 1781 * This function handles timed functionality for the
1839 * TLAN driver. The two current timer uses are for 1782 * TLAN driver. The two current timer uses are for
1840 * delaying for autonegotionation and driving the ACT LED. 1783 * delaying for autonegotionation and driving the ACT LED.
1841 * - Autonegotiation requires being allowed about 1784 * - Autonegotiation requires being allowed about
1842 * 2 1/2 seconds before attempting to transmit a 1785 * 2 1/2 seconds before attempting to transmit a
1843 * packet. It would be a very bad thing to hang 1786 * packet. It would be a very bad thing to hang
1844 * the kernel this long, so the driver doesn't 1787 * the kernel this long, so the driver doesn't
1845 * allow transmission 'til after this time, for 1788 * allow transmission 'til after this time, for
1846 * certain PHYs. It would be much nicer if all 1789 * certain PHYs. It would be much nicer if all
1847 * PHYs were interrupt-capable like the internal 1790 * PHYs were interrupt-capable like the internal
1848 * PHY. 1791 * PHY.
1849 * - The ACT LED, which shows adapter activity, is 1792 * - The ACT LED, which shows adapter activity, is
1850 * driven by the driver, and so must be left on 1793 * driven by the driver, and so must be left on
1851 * for a short period to power up the LED so it 1794 * for a short period to power up the LED so it
1852 * can be seen. This delay can be changed by 1795 * can be seen. This delay can be changed by
1853 * changing the TLAN_TIMER_ACT_DELAY in tlan.h, 1796 * changing the TLAN_TIMER_ACT_DELAY in tlan.h,
1854 * if desired. 100 ms produces a slightly 1797 * if desired. 100 ms produces a slightly
1855 * sluggish response. 1798 * sluggish response.
1856 * 1799 *
1857 **************************************************************/ 1800 **************************************************************/
1858 1801
1859static void TLan_Timer( unsigned long data ) 1802static void tlan_timer(unsigned long data)
1860{ 1803{
1861 struct net_device *dev = (struct net_device *) data; 1804 struct net_device *dev = (struct net_device *) data;
1862 TLanPrivateInfo *priv = netdev_priv(dev); 1805 struct tlan_priv *priv = netdev_priv(dev);
1863 u32 elapsed; 1806 u32 elapsed;
1864 unsigned long flags = 0; 1807 unsigned long flags = 0;
1865 1808
1866 priv->timer.function = NULL; 1809 priv->timer.function = NULL;
1867 1810
1868 switch ( priv->timerType ) { 1811 switch (priv->timer_type) {
1869#ifdef MONITOR 1812#ifdef MONITOR
1870 case TLAN_TIMER_LINK_BEAT: 1813 case TLAN_TIMER_LINK_BEAT:
1871 TLan_PhyMonitor( dev ); 1814 tlan_phy_monitor(dev);
1872 break; 1815 break;
1873#endif 1816#endif
1874 case TLAN_TIMER_PHY_PDOWN: 1817 case TLAN_TIMER_PHY_PDOWN:
1875 TLan_PhyPowerDown( dev ); 1818 tlan_phy_power_down(dev);
1876 break; 1819 break;
1877 case TLAN_TIMER_PHY_PUP: 1820 case TLAN_TIMER_PHY_PUP:
1878 TLan_PhyPowerUp( dev ); 1821 tlan_phy_power_up(dev);
1879 break; 1822 break;
1880 case TLAN_TIMER_PHY_RESET: 1823 case TLAN_TIMER_PHY_RESET:
1881 TLan_PhyReset( dev ); 1824 tlan_phy_reset(dev);
1882 break; 1825 break;
1883 case TLAN_TIMER_PHY_START_LINK: 1826 case TLAN_TIMER_PHY_START_LINK:
1884 TLan_PhyStartLink( dev ); 1827 tlan_phy_start_link(dev);
1885 break; 1828 break;
1886 case TLAN_TIMER_PHY_FINISH_AN: 1829 case TLAN_TIMER_PHY_FINISH_AN:
1887 TLan_PhyFinishAutoNeg( dev ); 1830 tlan_phy_finish_auto_neg(dev);
1888 break; 1831 break;
1889 case TLAN_TIMER_FINISH_RESET: 1832 case TLAN_TIMER_FINISH_RESET:
1890 TLan_FinishReset( dev ); 1833 tlan_finish_reset(dev);
1891 break; 1834 break;
1892 case TLAN_TIMER_ACTIVITY: 1835 case TLAN_TIMER_ACTIVITY:
1893 spin_lock_irqsave(&priv->lock, flags); 1836 spin_lock_irqsave(&priv->lock, flags);
1894 if ( priv->timer.function == NULL ) { 1837 if (priv->timer.function == NULL) {
1895 elapsed = jiffies - priv->timerSetAt; 1838 elapsed = jiffies - priv->timer_set_at;
1896 if ( elapsed >= TLAN_TIMER_ACT_DELAY ) { 1839 if (elapsed >= TLAN_TIMER_ACT_DELAY) {
1897 TLan_DioWrite8( dev->base_addr, 1840 tlan_dio_write8(dev->base_addr,
1898 TLAN_LED_REG, TLAN_LED_LINK ); 1841 TLAN_LED_REG, TLAN_LED_LINK);
1899 } else { 1842 } else {
1900 priv->timer.function = TLan_Timer; 1843 priv->timer.function = tlan_timer;
1901 priv->timer.expires = priv->timerSetAt 1844 priv->timer.expires = priv->timer_set_at
1902 + TLAN_TIMER_ACT_DELAY; 1845 + TLAN_TIMER_ACT_DELAY;
1903 spin_unlock_irqrestore(&priv->lock, flags); 1846 spin_unlock_irqrestore(&priv->lock, flags);
1904 add_timer( &priv->timer ); 1847 add_timer(&priv->timer);
1905 break; 1848 break;
1906 }
1907 } 1849 }
1908 spin_unlock_irqrestore(&priv->lock, flags); 1850 }
1909 break; 1851 spin_unlock_irqrestore(&priv->lock, flags);
1910 default: 1852 break;
1911 break; 1853 default:
1854 break;
1912 } 1855 }
1913 1856
1914} /* TLan_Timer */ 1857}
1915 1858
1916 1859
1917 1860
@@ -1919,39 +1862,39 @@ static void TLan_Timer( unsigned long data )
1919/***************************************************************************** 1862/*****************************************************************************
1920****************************************************************************** 1863******************************************************************************
1921 1864
1922 ThunderLAN Driver Adapter Related Routines 1865ThunderLAN driver adapter related routines
1923 1866
1924****************************************************************************** 1867******************************************************************************
1925*****************************************************************************/ 1868*****************************************************************************/
1926 1869
1927 1870
1928 /*************************************************************** 1871/***************************************************************
1929 * TLan_ResetLists 1872 * tlan_reset_lists
1930 * 1873 *
1931 * Returns: 1874 * Returns:
1932 * Nothing 1875 * Nothing
1933 * Parms: 1876 * Parms:
1934 * dev The device structure with the list 1877 * dev The device structure with the list
1935 * stuctures to be reset. 1878 * stuctures to be reset.
1936 * 1879 *
1937 * This routine sets the variables associated with managing 1880 * This routine sets the variables associated with managing
1938 * the TLAN lists to their initial values. 1881 * the TLAN lists to their initial values.
1939 * 1882 *
1940 **************************************************************/ 1883 **************************************************************/
1941 1884
1942static void TLan_ResetLists( struct net_device *dev ) 1885static void tlan_reset_lists(struct net_device *dev)
1943{ 1886{
1944 TLanPrivateInfo *priv = netdev_priv(dev); 1887 struct tlan_priv *priv = netdev_priv(dev);
1945 int i; 1888 int i;
1946 TLanList *list; 1889 struct tlan_list *list;
1947 dma_addr_t list_phys; 1890 dma_addr_t list_phys;
1948 struct sk_buff *skb; 1891 struct sk_buff *skb;
1949 1892
1950 priv->txHead = 0; 1893 priv->tx_head = 0;
1951 priv->txTail = 0; 1894 priv->tx_tail = 0;
1952 for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) { 1895 for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
1953 list = priv->txList + i; 1896 list = priv->tx_list + i;
1954 list->cStat = TLAN_CSTAT_UNUSED; 1897 list->c_stat = TLAN_CSTAT_UNUSED;
1955 list->buffer[0].address = 0; 1898 list->buffer[0].address = 0;
1956 list->buffer[2].count = 0; 1899 list->buffer[2].count = 0;
1957 list->buffer[2].address = 0; 1900 list->buffer[2].address = 0;
@@ -1959,169 +1902,169 @@ static void TLan_ResetLists( struct net_device *dev )
1959 list->buffer[9].address = 0; 1902 list->buffer[9].address = 0;
1960 } 1903 }
1961 1904
1962 priv->rxHead = 0; 1905 priv->rx_head = 0;
1963 priv->rxTail = TLAN_NUM_RX_LISTS - 1; 1906 priv->rx_tail = TLAN_NUM_RX_LISTS - 1;
1964 for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) { 1907 for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
1965 list = priv->rxList + i; 1908 list = priv->rx_list + i;
1966 list_phys = priv->rxListDMA + sizeof(TLanList) * i; 1909 list_phys = priv->rx_list_dma + sizeof(struct tlan_list)*i;
1967 list->cStat = TLAN_CSTAT_READY; 1910 list->c_stat = TLAN_CSTAT_READY;
1968 list->frameSize = TLAN_MAX_FRAME_SIZE; 1911 list->frame_size = TLAN_MAX_FRAME_SIZE;
1969 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER; 1912 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
1970 skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5); 1913 skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
1971 if ( !skb ) { 1914 if (!skb) {
1972 pr_err("TLAN: out of memory for received data.\n" ); 1915 netdev_err(dev, "Out of memory for received data\n");
1973 break; 1916 break;
1974 } 1917 }
1975 1918
1976 list->buffer[0].address = pci_map_single(priv->pciDev, 1919 list->buffer[0].address = pci_map_single(priv->pci_dev,
1977 skb->data, 1920 skb->data,
1978 TLAN_MAX_FRAME_SIZE, 1921 TLAN_MAX_FRAME_SIZE,
1979 PCI_DMA_FROMDEVICE); 1922 PCI_DMA_FROMDEVICE);
1980 TLan_StoreSKB(list, skb); 1923 tlan_store_skb(list, skb);
1981 list->buffer[1].count = 0; 1924 list->buffer[1].count = 0;
1982 list->buffer[1].address = 0; 1925 list->buffer[1].address = 0;
1983 list->forward = list_phys + sizeof(TLanList); 1926 list->forward = list_phys + sizeof(struct tlan_list);
1984 } 1927 }
1985 1928
1986 /* in case ran out of memory early, clear bits */ 1929 /* in case ran out of memory early, clear bits */
1987 while (i < TLAN_NUM_RX_LISTS) { 1930 while (i < TLAN_NUM_RX_LISTS) {
1988 TLan_StoreSKB(priv->rxList + i, NULL); 1931 tlan_store_skb(priv->rx_list + i, NULL);
1989 ++i; 1932 ++i;
1990 } 1933 }
1991 list->forward = 0; 1934 list->forward = 0;
1992 1935
1993} /* TLan_ResetLists */ 1936}
1994 1937
1995 1938
1996static void TLan_FreeLists( struct net_device *dev ) 1939static void tlan_free_lists(struct net_device *dev)
1997{ 1940{
1998 TLanPrivateInfo *priv = netdev_priv(dev); 1941 struct tlan_priv *priv = netdev_priv(dev);
1999 int i; 1942 int i;
2000 TLanList *list; 1943 struct tlan_list *list;
2001 struct sk_buff *skb; 1944 struct sk_buff *skb;
2002 1945
2003 for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) { 1946 for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
2004 list = priv->txList + i; 1947 list = priv->tx_list + i;
2005 skb = TLan_GetSKB(list); 1948 skb = tlan_get_skb(list);
2006 if ( skb ) { 1949 if (skb) {
2007 pci_unmap_single( 1950 pci_unmap_single(
2008 priv->pciDev, 1951 priv->pci_dev,
2009 list->buffer[0].address, 1952 list->buffer[0].address,
2010 max(skb->len, 1953 max(skb->len,
2011 (unsigned int)TLAN_MIN_FRAME_SIZE), 1954 (unsigned int)TLAN_MIN_FRAME_SIZE),
2012 PCI_DMA_TODEVICE); 1955 PCI_DMA_TODEVICE);
2013 dev_kfree_skb_any( skb ); 1956 dev_kfree_skb_any(skb);
2014 list->buffer[8].address = 0; 1957 list->buffer[8].address = 0;
2015 list->buffer[9].address = 0; 1958 list->buffer[9].address = 0;
2016 } 1959 }
2017 } 1960 }
2018 1961
2019 for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) { 1962 for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
2020 list = priv->rxList + i; 1963 list = priv->rx_list + i;
2021 skb = TLan_GetSKB(list); 1964 skb = tlan_get_skb(list);
2022 if ( skb ) { 1965 if (skb) {
2023 pci_unmap_single(priv->pciDev, 1966 pci_unmap_single(priv->pci_dev,
2024 list->buffer[0].address, 1967 list->buffer[0].address,
2025 TLAN_MAX_FRAME_SIZE, 1968 TLAN_MAX_FRAME_SIZE,
2026 PCI_DMA_FROMDEVICE); 1969 PCI_DMA_FROMDEVICE);
2027 dev_kfree_skb_any( skb ); 1970 dev_kfree_skb_any(skb);
2028 list->buffer[8].address = 0; 1971 list->buffer[8].address = 0;
2029 list->buffer[9].address = 0; 1972 list->buffer[9].address = 0;
2030 } 1973 }
2031 } 1974 }
2032} /* TLan_FreeLists */ 1975}
2033 1976
2034 1977
2035 1978
2036 1979
2037 /*************************************************************** 1980/***************************************************************
2038 * TLan_PrintDio 1981 * tlan_print_dio
2039 * 1982 *
2040 * Returns: 1983 * Returns:
2041 * Nothing 1984 * Nothing
2042 * Parms: 1985 * Parms:
2043 * io_base Base IO port of the device of 1986 * io_base Base IO port of the device of
2044 * which to print DIO registers. 1987 * which to print DIO registers.
2045 * 1988 *
2046 * This function prints out all the internal (DIO) 1989 * This function prints out all the internal (DIO)
2047 * registers of a TLAN chip. 1990 * registers of a TLAN chip.
2048 * 1991 *
2049 **************************************************************/ 1992 **************************************************************/
2050 1993
2051static void TLan_PrintDio( u16 io_base ) 1994static void tlan_print_dio(u16 io_base)
2052{ 1995{
2053 u32 data0, data1; 1996 u32 data0, data1;
2054 int i; 1997 int i;
2055 1998
2056 printk( "TLAN: Contents of internal registers for io base 0x%04hx.\n", 1999 pr_info("Contents of internal registers for io base 0x%04hx\n",
2057 io_base ); 2000 io_base);
2058 printk( "TLAN: Off. +0 +4\n" ); 2001 pr_info("Off. +0 +4\n");
2059 for ( i = 0; i < 0x4C; i+= 8 ) { 2002 for (i = 0; i < 0x4C; i += 8) {
2060 data0 = TLan_DioRead32( io_base, i ); 2003 data0 = tlan_dio_read32(io_base, i);
2061 data1 = TLan_DioRead32( io_base, i + 0x4 ); 2004 data1 = tlan_dio_read32(io_base, i + 0x4);
2062 printk( "TLAN: 0x%02x 0x%08x 0x%08x\n", i, data0, data1 ); 2005 pr_info("0x%02x 0x%08x 0x%08x\n", i, data0, data1);
2063 } 2006 }
2064 2007
2065} /* TLan_PrintDio */ 2008}
2066 2009
2067 2010
2068 2011
2069 2012
2070 /*************************************************************** 2013/***************************************************************
2071 * TLan_PrintList 2014 * TLan_PrintList
2072 * 2015 *
2073 * Returns: 2016 * Returns:
2074 * Nothing 2017 * Nothing
2075 * Parms: 2018 * Parms:
2076 * list A pointer to the TLanList structure to 2019 * list A pointer to the struct tlan_list structure to
2077 * be printed. 2020 * be printed.
2078 * type A string to designate type of list, 2021 * type A string to designate type of list,
2079 * "Rx" or "Tx". 2022 * "Rx" or "Tx".
2080 * num The index of the list. 2023 * num The index of the list.
2081 * 2024 *
2082 * This function prints out the contents of the list 2025 * This function prints out the contents of the list
2083 * pointed to by the list parameter. 2026 * pointed to by the list parameter.
2084 * 2027 *
2085 **************************************************************/ 2028 **************************************************************/
2086 2029
2087static void TLan_PrintList( TLanList *list, char *type, int num) 2030static void tlan_print_list(struct tlan_list *list, char *type, int num)
2088{ 2031{
2089 int i; 2032 int i;
2090 2033
2091 printk( "TLAN: %s List %d at %p\n", type, num, list ); 2034 pr_info("%s List %d at %p\n", type, num, list);
2092 printk( "TLAN: Forward = 0x%08x\n", list->forward ); 2035 pr_info(" Forward = 0x%08x\n", list->forward);
2093 printk( "TLAN: CSTAT = 0x%04hx\n", list->cStat ); 2036 pr_info(" CSTAT = 0x%04hx\n", list->c_stat);
2094 printk( "TLAN: Frame Size = 0x%04hx\n", list->frameSize ); 2037 pr_info(" Frame Size = 0x%04hx\n", list->frame_size);
2095 /* for ( i = 0; i < 10; i++ ) { */ 2038 /* for (i = 0; i < 10; i++) { */
2096 for ( i = 0; i < 2; i++ ) { 2039 for (i = 0; i < 2; i++) {
2097 printk( "TLAN: Buffer[%d].count, addr = 0x%08x, 0x%08x\n", 2040 pr_info(" Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
2098 i, list->buffer[i].count, list->buffer[i].address ); 2041 i, list->buffer[i].count, list->buffer[i].address);
2099 } 2042 }
2100 2043
2101} /* TLan_PrintList */ 2044}
2102 2045
2103 2046
2104 2047
2105 2048
2106 /*************************************************************** 2049/***************************************************************
2107 * TLan_ReadAndClearStats 2050 * tlan_read_and_clear_stats
2108 * 2051 *
2109 * Returns: 2052 * Returns:
2110 * Nothing 2053 * Nothing
2111 * Parms: 2054 * Parms:
2112 * dev Pointer to device structure of adapter 2055 * dev Pointer to device structure of adapter
2113 * to which to read stats. 2056 * to which to read stats.
2114 * record Flag indicating whether to add 2057 * record Flag indicating whether to add
2115 * 2058 *
2116 * This functions reads all the internal status registers 2059 * This functions reads all the internal status registers
2117 * of the TLAN chip, which clears them as a side effect. 2060 * of the TLAN chip, which clears them as a side effect.
2118 * It then either adds the values to the device's status 2061 * It then either adds the values to the device's status
2119 * struct, or discards them, depending on whether record 2062 * struct, or discards them, depending on whether record
2120 * is TLAN_RECORD (!=0) or TLAN_IGNORE (==0). 2063 * is TLAN_RECORD (!=0) or TLAN_IGNORE (==0).
2121 * 2064 *
2122 **************************************************************/ 2065 **************************************************************/
2123 2066
2124static void TLan_ReadAndClearStats( struct net_device *dev, int record ) 2067static void tlan_read_and_clear_stats(struct net_device *dev, int record)
2125{ 2068{
2126 u32 tx_good, tx_under; 2069 u32 tx_good, tx_under;
2127 u32 rx_good, rx_over; 2070 u32 rx_good, rx_over;
@@ -2129,41 +2072,42 @@ static void TLan_ReadAndClearStats( struct net_device *dev, int record )
2129 u32 multi_col, single_col; 2072 u32 multi_col, single_col;
2130 u32 excess_col, late_col, loss; 2073 u32 excess_col, late_col, loss;
2131 2074
2132 outw( TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR ); 2075 outw(TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR);
2133 tx_good = inb( dev->base_addr + TLAN_DIO_DATA ); 2076 tx_good = inb(dev->base_addr + TLAN_DIO_DATA);
2134 tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8; 2077 tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2135 tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16; 2078 tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
2136 tx_under = inb( dev->base_addr + TLAN_DIO_DATA + 3 ); 2079 tx_under = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2137 2080
2138 outw( TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR ); 2081 outw(TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR);
2139 rx_good = inb( dev->base_addr + TLAN_DIO_DATA ); 2082 rx_good = inb(dev->base_addr + TLAN_DIO_DATA);
2140 rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8; 2083 rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2141 rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16; 2084 rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
2142 rx_over = inb( dev->base_addr + TLAN_DIO_DATA + 3 ); 2085 rx_over = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2143 2086
2144 outw( TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR ); 2087 outw(TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR);
2145 def_tx = inb( dev->base_addr + TLAN_DIO_DATA ); 2088 def_tx = inb(dev->base_addr + TLAN_DIO_DATA);
2146 def_tx += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8; 2089 def_tx += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2147 crc = inb( dev->base_addr + TLAN_DIO_DATA + 2 ); 2090 crc = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2148 code = inb( dev->base_addr + TLAN_DIO_DATA + 3 ); 2091 code = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2149 2092
2150 outw( TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR ); 2093 outw(TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
2151 multi_col = inb( dev->base_addr + TLAN_DIO_DATA ); 2094 multi_col = inb(dev->base_addr + TLAN_DIO_DATA);
2152 multi_col += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8; 2095 multi_col += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2153 single_col = inb( dev->base_addr + TLAN_DIO_DATA + 2 ); 2096 single_col = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2154 single_col += inb( dev->base_addr + TLAN_DIO_DATA + 3 ) << 8; 2097 single_col += inb(dev->base_addr + TLAN_DIO_DATA + 3) << 8;
2155 2098
2156 outw( TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR ); 2099 outw(TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
2157 excess_col = inb( dev->base_addr + TLAN_DIO_DATA ); 2100 excess_col = inb(dev->base_addr + TLAN_DIO_DATA);
2158 late_col = inb( dev->base_addr + TLAN_DIO_DATA + 1 ); 2101 late_col = inb(dev->base_addr + TLAN_DIO_DATA + 1);
2159 loss = inb( dev->base_addr + TLAN_DIO_DATA + 2 ); 2102 loss = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2160 2103
2161 if ( record ) { 2104 if (record) {
2162 dev->stats.rx_packets += rx_good; 2105 dev->stats.rx_packets += rx_good;
2163 dev->stats.rx_errors += rx_over + crc + code; 2106 dev->stats.rx_errors += rx_over + crc + code;
2164 dev->stats.tx_packets += tx_good; 2107 dev->stats.tx_packets += tx_good;
2165 dev->stats.tx_errors += tx_under + loss; 2108 dev->stats.tx_errors += tx_under + loss;
2166 dev->stats.collisions += multi_col + single_col + excess_col + late_col; 2109 dev->stats.collisions += multi_col
2110 + single_col + excess_col + late_col;
2167 2111
2168 dev->stats.rx_over_errors += rx_over; 2112 dev->stats.rx_over_errors += rx_over;
2169 dev->stats.rx_crc_errors += crc; 2113 dev->stats.rx_crc_errors += crc;
@@ -2173,39 +2117,39 @@ static void TLan_ReadAndClearStats( struct net_device *dev, int record )
2173 dev->stats.tx_carrier_errors += loss; 2117 dev->stats.tx_carrier_errors += loss;
2174 } 2118 }
2175 2119
2176} /* TLan_ReadAndClearStats */ 2120}
2177 2121
2178 2122
2179 2123
2180 2124
2181 /*************************************************************** 2125/***************************************************************
2182 * TLan_Reset 2126 * TLan_Reset
2183 * 2127 *
2184 * Returns: 2128 * Returns:
2185 * 0 2129 * 0
2186 * Parms: 2130 * Parms:
2187 * dev Pointer to device structure of adapter 2131 * dev Pointer to device structure of adapter
2188 * to be reset. 2132 * to be reset.
2189 * 2133 *
2190 * This function resets the adapter and it's physical 2134 * This function resets the adapter and it's physical
2191 * device. See Chap. 3, pp. 9-10 of the "ThunderLAN 2135 * device. See Chap. 3, pp. 9-10 of the "ThunderLAN
2192 * Programmer's Guide" for details. The routine tries to 2136 * Programmer's Guide" for details. The routine tries to
2193 * implement what is detailed there, though adjustments 2137 * implement what is detailed there, though adjustments
2194 * have been made. 2138 * have been made.
2195 * 2139 *
2196 **************************************************************/ 2140 **************************************************************/
2197 2141
2198static void 2142static void
2199TLan_ResetAdapter( struct net_device *dev ) 2143tlan_reset_adapter(struct net_device *dev)
2200{ 2144{
2201 TLanPrivateInfo *priv = netdev_priv(dev); 2145 struct tlan_priv *priv = netdev_priv(dev);
2202 int i; 2146 int i;
2203 u32 addr; 2147 u32 addr;
2204 u32 data; 2148 u32 data;
2205 u8 data8; 2149 u8 data8;
2206 2150
2207 priv->tlanFullDuplex = false; 2151 priv->tlan_full_duplex = false;
2208 priv->phyOnline=0; 2152 priv->phy_online = 0;
2209 netif_carrier_off(dev); 2153 netif_carrier_off(dev);
2210 2154
2211/* 1. Assert reset bit. */ 2155/* 1. Assert reset bit. */
@@ -2216,7 +2160,7 @@ TLan_ResetAdapter( struct net_device *dev )
2216 2160
2217 udelay(1000); 2161 udelay(1000);
2218 2162
2219/* 2. Turn off interrupts. ( Probably isn't necessary ) */ 2163/* 2. Turn off interrupts. (Probably isn't necessary) */
2220 2164
2221 data = inl(dev->base_addr + TLAN_HOST_CMD); 2165 data = inl(dev->base_addr + TLAN_HOST_CMD);
2222 data |= TLAN_HC_INT_OFF; 2166 data |= TLAN_HC_INT_OFF;
@@ -2224,207 +2168,204 @@ TLan_ResetAdapter( struct net_device *dev )
2224 2168
2225/* 3. Clear AREGs and HASHs. */ 2169/* 3. Clear AREGs and HASHs. */
2226 2170
2227 for ( i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4 ) { 2171 for (i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4)
2228 TLan_DioWrite32( dev->base_addr, (u16) i, 0 ); 2172 tlan_dio_write32(dev->base_addr, (u16) i, 0);
2229 }
2230 2173
2231/* 4. Setup NetConfig register. */ 2174/* 4. Setup NetConfig register. */
2232 2175
2233 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN; 2176 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
2234 TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data ); 2177 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
2235 2178
2236/* 5. Load Ld_Tmr and Ld_Thr in HOST_CMD. */ 2179/* 5. Load Ld_Tmr and Ld_Thr in HOST_CMD. */
2237 2180
2238 outl( TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD ); 2181 outl(TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD);
2239 outl( TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD ); 2182 outl(TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD);
2240 2183
2241/* 6. Unreset the MII by setting NMRST (in NetSio) to 1. */ 2184/* 6. Unreset the MII by setting NMRST (in NetSio) to 1. */
2242 2185
2243 outw( TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR ); 2186 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
2244 addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO; 2187 addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
2245 TLan_SetBit( TLAN_NET_SIO_NMRST, addr ); 2188 tlan_set_bit(TLAN_NET_SIO_NMRST, addr);
2246 2189
2247/* 7. Setup the remaining registers. */ 2190/* 7. Setup the remaining registers. */
2248 2191
2249 if ( priv->tlanRev >= 0x30 ) { 2192 if (priv->tlan_rev >= 0x30) {
2250 data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC; 2193 data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC;
2251 TLan_DioWrite8( dev->base_addr, TLAN_INT_DIS, data8 ); 2194 tlan_dio_write8(dev->base_addr, TLAN_INT_DIS, data8);
2252 } 2195 }
2253 TLan_PhyDetect( dev ); 2196 tlan_phy_detect(dev);
2254 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN; 2197 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN;
2255 2198
2256 if ( priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY ) { 2199 if (priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY) {
2257 data |= TLAN_NET_CFG_BIT; 2200 data |= TLAN_NET_CFG_BIT;
2258 if ( priv->aui == 1 ) { 2201 if (priv->aui == 1) {
2259 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x0a ); 2202 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x0a);
2260 } else if ( priv->duplex == TLAN_DUPLEX_FULL ) { 2203 } else if (priv->duplex == TLAN_DUPLEX_FULL) {
2261 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x00 ); 2204 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x00);
2262 priv->tlanFullDuplex = true; 2205 priv->tlan_full_duplex = true;
2263 } else { 2206 } else {
2264 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x08 ); 2207 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x08);
2265 } 2208 }
2266 } 2209 }
2267 2210
2268 if ( priv->phyNum == 0 ) { 2211 if (priv->phy_num == 0)
2269 data |= TLAN_NET_CFG_PHY_EN; 2212 data |= TLAN_NET_CFG_PHY_EN;
2270 } 2213 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
2271 TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data );
2272 2214
2273 if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) { 2215 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY)
2274 TLan_FinishReset( dev ); 2216 tlan_finish_reset(dev);
2275 } else { 2217 else
2276 TLan_PhyPowerDown( dev ); 2218 tlan_phy_power_down(dev);
2277 }
2278 2219
2279} /* TLan_ResetAdapter */ 2220}
2280 2221
2281 2222
2282 2223
2283 2224
2284static void 2225static void
2285TLan_FinishReset( struct net_device *dev ) 2226tlan_finish_reset(struct net_device *dev)
2286{ 2227{
2287 TLanPrivateInfo *priv = netdev_priv(dev); 2228 struct tlan_priv *priv = netdev_priv(dev);
2288 u8 data; 2229 u8 data;
2289 u32 phy; 2230 u32 phy;
2290 u8 sio; 2231 u8 sio;
2291 u16 status; 2232 u16 status;
2292 u16 partner; 2233 u16 partner;
2293 u16 tlphy_ctl; 2234 u16 tlphy_ctl;
2294 u16 tlphy_par; 2235 u16 tlphy_par;
2295 u16 tlphy_id1, tlphy_id2; 2236 u16 tlphy_id1, tlphy_id2;
2296 int i; 2237 int i;
2297 2238
2298 phy = priv->phy[priv->phyNum]; 2239 phy = priv->phy[priv->phy_num];
2299 2240
2300 data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP; 2241 data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP;
2301 if ( priv->tlanFullDuplex ) { 2242 if (priv->tlan_full_duplex)
2302 data |= TLAN_NET_CMD_DUPLEX; 2243 data |= TLAN_NET_CMD_DUPLEX;
2303 } 2244 tlan_dio_write8(dev->base_addr, TLAN_NET_CMD, data);
2304 TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, data );
2305 data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5; 2245 data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5;
2306 if ( priv->phyNum == 0 ) { 2246 if (priv->phy_num == 0)
2307 data |= TLAN_NET_MASK_MASK7; 2247 data |= TLAN_NET_MASK_MASK7;
2308 } 2248 tlan_dio_write8(dev->base_addr, TLAN_NET_MASK, data);
2309 TLan_DioWrite8( dev->base_addr, TLAN_NET_MASK, data ); 2249 tlan_dio_write16(dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7);
2310 TLan_DioWrite16( dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7 ); 2250 tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &tlphy_id1);
2311 TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &tlphy_id1 ); 2251 tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &tlphy_id2);
2312 TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &tlphy_id2 );
2313 2252
2314 if ( ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) || 2253 if ((priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) ||
2315 ( priv->aui ) ) { 2254 (priv->aui)) {
2316 status = MII_GS_LINK; 2255 status = MII_GS_LINK;
2317 printk( "TLAN: %s: Link forced.\n", dev->name ); 2256 netdev_info(dev, "Link forced\n");
2318 } else { 2257 } else {
2319 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); 2258 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2320 udelay( 1000 ); 2259 udelay(1000);
2321 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); 2260 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2322 if ( (status & MII_GS_LINK) && 2261 if ((status & MII_GS_LINK) &&
2323 /* We only support link info on Nat.Sem. PHY's */ 2262 /* We only support link info on Nat.Sem. PHY's */
2324 (tlphy_id1 == NAT_SEM_ID1) && 2263 (tlphy_id1 == NAT_SEM_ID1) &&
2325 (tlphy_id2 == NAT_SEM_ID2) ) { 2264 (tlphy_id2 == NAT_SEM_ID2)) {
2326 TLan_MiiReadReg( dev, phy, MII_AN_LPA, &partner ); 2265 tlan_mii_read_reg(dev, phy, MII_AN_LPA, &partner);
2327 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_PAR, &tlphy_par ); 2266 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR, &tlphy_par);
2328 2267
2329 printk( "TLAN: %s: Link active with ", dev->name ); 2268 netdev_info(dev,
2330 if (!(tlphy_par & TLAN_PHY_AN_EN_STAT)) { 2269 "Link active with %s %uMbps %s-Duplex\n",
2331 printk( "forced 10%sMbps %s-Duplex\n", 2270 !(tlphy_par & TLAN_PHY_AN_EN_STAT)
2332 tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0", 2271 ? "forced" : "Autonegotiation enabled,",
2333 tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half"); 2272 tlphy_par & TLAN_PHY_SPEED_100
2334 } else { 2273 ? 100 : 10,
2335 printk( "AutoNegotiation enabled, at 10%sMbps %s-Duplex\n", 2274 tlphy_par & TLAN_PHY_DUPLEX_FULL
2336 tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0", 2275 ? "Full" : "Half");
2337 tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half"); 2276
2338 printk("TLAN: Partner capability: "); 2277 if (tlphy_par & TLAN_PHY_AN_EN_STAT) {
2339 for (i = 5; i <= 10; i++) 2278 netdev_info(dev, "Partner capability:");
2340 if (partner & (1<<i)) 2279 for (i = 5; i < 10; i++)
2341 printk("%s",media[i-5]); 2280 if (partner & (1 << i))
2342 printk("\n"); 2281 pr_cont(" %s", media[i-5]);
2282 pr_cont("\n");
2343 } 2283 }
2344 2284
2345 TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK ); 2285 tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
2286 TLAN_LED_LINK);
2346#ifdef MONITOR 2287#ifdef MONITOR
2347 /* We have link beat..for now anyway */ 2288 /* We have link beat..for now anyway */
2348 priv->link = 1; 2289 priv->link = 1;
2349 /*Enabling link beat monitoring */ 2290 /*Enabling link beat monitoring */
2350 TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_LINK_BEAT ); 2291 tlan_set_timer(dev, (10*HZ), TLAN_TIMER_LINK_BEAT);
2351#endif 2292#endif
2352 } else if (status & MII_GS_LINK) { 2293 } else if (status & MII_GS_LINK) {
2353 printk( "TLAN: %s: Link active\n", dev->name ); 2294 netdev_info(dev, "Link active\n");
2354 TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK ); 2295 tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
2296 TLAN_LED_LINK);
2355 } 2297 }
2356 } 2298 }
2357 2299
2358 if ( priv->phyNum == 0 ) { 2300 if (priv->phy_num == 0) {
2359 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl ); 2301 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
2360 tlphy_ctl |= TLAN_TC_INTEN; 2302 tlphy_ctl |= TLAN_TC_INTEN;
2361 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl ); 2303 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
2362 sio = TLan_DioRead8( dev->base_addr, TLAN_NET_SIO ); 2304 sio = tlan_dio_read8(dev->base_addr, TLAN_NET_SIO);
2363 sio |= TLAN_NET_SIO_MINTEN; 2305 sio |= TLAN_NET_SIO_MINTEN;
2364 TLan_DioWrite8( dev->base_addr, TLAN_NET_SIO, sio ); 2306 tlan_dio_write8(dev->base_addr, TLAN_NET_SIO, sio);
2365 } 2307 }
2366 2308
2367 if ( status & MII_GS_LINK ) { 2309 if (status & MII_GS_LINK) {
2368 TLan_SetMac( dev, 0, dev->dev_addr ); 2310 tlan_set_mac(dev, 0, dev->dev_addr);
2369 priv->phyOnline = 1; 2311 priv->phy_online = 1;
2370 outb( ( TLAN_HC_INT_ON >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 ); 2312 outb((TLAN_HC_INT_ON >> 8), dev->base_addr + TLAN_HOST_CMD + 1);
2371 if ( debug >= 1 && debug != TLAN_DEBUG_PROBE ) { 2313 if (debug >= 1 && debug != TLAN_DEBUG_PROBE)
2372 outb( ( TLAN_HC_REQ_INT >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 ); 2314 outb((TLAN_HC_REQ_INT >> 8),
2373 } 2315 dev->base_addr + TLAN_HOST_CMD + 1);
2374 outl( priv->rxListDMA, dev->base_addr + TLAN_CH_PARM ); 2316 outl(priv->rx_list_dma, dev->base_addr + TLAN_CH_PARM);
2375 outl( TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD ); 2317 outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD);
2376 netif_carrier_on(dev); 2318 netif_carrier_on(dev);
2377 } else { 2319 } else {
2378 printk( "TLAN: %s: Link inactive, will retry in 10 secs...\n", 2320 netdev_info(dev, "Link inactive, will retry in 10 secs...\n");
2379 dev->name ); 2321 tlan_set_timer(dev, (10*HZ), TLAN_TIMER_FINISH_RESET);
2380 TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_FINISH_RESET );
2381 return; 2322 return;
2382 } 2323 }
2383 TLan_SetMulticastList(dev); 2324 tlan_set_multicast_list(dev);
2384 2325
2385} /* TLan_FinishReset */ 2326}
2386 2327
2387 2328
2388 2329
2389 2330
2390 /*************************************************************** 2331/***************************************************************
2391 * TLan_SetMac 2332 * tlan_set_mac
2392 * 2333 *
2393 * Returns: 2334 * Returns:
2394 * Nothing 2335 * Nothing
2395 * Parms: 2336 * Parms:
2396 * dev Pointer to device structure of adapter 2337 * dev Pointer to device structure of adapter
2397 * on which to change the AREG. 2338 * on which to change the AREG.
2398 * areg The AREG to set the address in (0 - 3). 2339 * areg The AREG to set the address in (0 - 3).
2399 * mac A pointer to an array of chars. Each 2340 * mac A pointer to an array of chars. Each
2400 * element stores one byte of the address. 2341 * element stores one byte of the address.
2401 * IE, it isn't in ascii. 2342 * IE, it isn't in ascii.
2402 * 2343 *
2403 * This function transfers a MAC address to one of the 2344 * This function transfers a MAC address to one of the
2404 * TLAN AREGs (address registers). The TLAN chip locks 2345 * TLAN AREGs (address registers). The TLAN chip locks
2405 * the register on writing to offset 0 and unlocks the 2346 * the register on writing to offset 0 and unlocks the
2406 * register after writing to offset 5. If NULL is passed 2347 * register after writing to offset 5. If NULL is passed
2407 * in mac, then the AREG is filled with 0's. 2348 * in mac, then the AREG is filled with 0's.
2408 * 2349 *
2409 **************************************************************/ 2350 **************************************************************/
2410 2351
2411static void TLan_SetMac( struct net_device *dev, int areg, char *mac ) 2352static void tlan_set_mac(struct net_device *dev, int areg, char *mac)
2412{ 2353{
2413 int i; 2354 int i;
2414 2355
2415 areg *= 6; 2356 areg *= 6;
2416 2357
2417 if ( mac != NULL ) { 2358 if (mac != NULL) {
2418 for ( i = 0; i < 6; i++ ) 2359 for (i = 0; i < 6; i++)
2419 TLan_DioWrite8( dev->base_addr, 2360 tlan_dio_write8(dev->base_addr,
2420 TLAN_AREG_0 + areg + i, mac[i] ); 2361 TLAN_AREG_0 + areg + i, mac[i]);
2421 } else { 2362 } else {
2422 for ( i = 0; i < 6; i++ ) 2363 for (i = 0; i < 6; i++)
2423 TLan_DioWrite8( dev->base_addr, 2364 tlan_dio_write8(dev->base_addr,
2424 TLAN_AREG_0 + areg + i, 0 ); 2365 TLAN_AREG_0 + areg + i, 0);
2425 } 2366 }
2426 2367
2427} /* TLan_SetMac */ 2368}
2428 2369
2429 2370
2430 2371
@@ -2432,205 +2373,199 @@ static void TLan_SetMac( struct net_device *dev, int areg, char *mac )
2432/***************************************************************************** 2373/*****************************************************************************
2433****************************************************************************** 2374******************************************************************************
2434 2375
2435 ThunderLAN Driver PHY Layer Routines 2376ThunderLAN driver PHY layer routines
2436 2377
2437****************************************************************************** 2378******************************************************************************
2438*****************************************************************************/ 2379*****************************************************************************/
2439 2380
2440 2381
2441 2382
2442 /********************************************************************* 2383/*********************************************************************
2443 * TLan_PhyPrint 2384 * tlan_phy_print
2444 * 2385 *
2445 * Returns: 2386 * Returns:
2446 * Nothing 2387 * Nothing
2447 * Parms: 2388 * Parms:
2448 * dev A pointer to the device structure of the 2389 * dev A pointer to the device structure of the
2449 * TLAN device having the PHYs to be detailed. 2390 * TLAN device having the PHYs to be detailed.
2450 * 2391 *
2451 * This function prints the registers a PHY (aka transceiver). 2392 * This function prints the registers a PHY (aka transceiver).
2452 * 2393 *
2453 ********************************************************************/ 2394 ********************************************************************/
2454 2395
2455static void TLan_PhyPrint( struct net_device *dev ) 2396static void tlan_phy_print(struct net_device *dev)
2456{ 2397{
2457 TLanPrivateInfo *priv = netdev_priv(dev); 2398 struct tlan_priv *priv = netdev_priv(dev);
2458 u16 i, data0, data1, data2, data3, phy; 2399 u16 i, data0, data1, data2, data3, phy;
2459 2400
2460 phy = priv->phy[priv->phyNum]; 2401 phy = priv->phy[priv->phy_num];
2461 2402
2462 if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) { 2403 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
2463 printk( "TLAN: Device %s, Unmanaged PHY.\n", dev->name ); 2404 netdev_info(dev, "Unmanaged PHY\n");
2464 } else if ( phy <= TLAN_PHY_MAX_ADDR ) { 2405 } else if (phy <= TLAN_PHY_MAX_ADDR) {
2465 printk( "TLAN: Device %s, PHY 0x%02x.\n", dev->name, phy ); 2406 netdev_info(dev, "PHY 0x%02x\n", phy);
2466 printk( "TLAN: Off. +0 +1 +2 +3\n" ); 2407 pr_info(" Off. +0 +1 +2 +3\n");
2467 for ( i = 0; i < 0x20; i+= 4 ) { 2408 for (i = 0; i < 0x20; i += 4) {
2468 printk( "TLAN: 0x%02x", i ); 2409 tlan_mii_read_reg(dev, phy, i, &data0);
2469 TLan_MiiReadReg( dev, phy, i, &data0 ); 2410 tlan_mii_read_reg(dev, phy, i + 1, &data1);
2470 printk( " 0x%04hx", data0 ); 2411 tlan_mii_read_reg(dev, phy, i + 2, &data2);
2471 TLan_MiiReadReg( dev, phy, i + 1, &data1 ); 2412 tlan_mii_read_reg(dev, phy, i + 3, &data3);
2472 printk( " 0x%04hx", data1 ); 2413 pr_info(" 0x%02x 0x%04hx 0x%04hx 0x%04hx 0x%04hx\n",
2473 TLan_MiiReadReg( dev, phy, i + 2, &data2 ); 2414 i, data0, data1, data2, data3);
2474 printk( " 0x%04hx", data2 );
2475 TLan_MiiReadReg( dev, phy, i + 3, &data3 );
2476 printk( " 0x%04hx\n", data3 );
2477 } 2415 }
2478 } else { 2416 } else {
2479 printk( "TLAN: Device %s, Invalid PHY.\n", dev->name ); 2417 netdev_info(dev, "Invalid PHY\n");
2480 } 2418 }
2481 2419
2482} /* TLan_PhyPrint */ 2420}
2483 2421
2484 2422
2485 2423
2486 2424
2487 /********************************************************************* 2425/*********************************************************************
2488 * TLan_PhyDetect 2426 * tlan_phy_detect
2489 * 2427 *
2490 * Returns: 2428 * Returns:
2491 * Nothing 2429 * Nothing
2492 * Parms: 2430 * Parms:
2493 * dev A pointer to the device structure of the adapter 2431 * dev A pointer to the device structure of the adapter
2494 * for which the PHY needs determined. 2432 * for which the PHY needs determined.
2495 * 2433 *
2496 * So far I've found that adapters which have external PHYs 2434 * So far I've found that adapters which have external PHYs
2497 * may also use the internal PHY for part of the functionality. 2435 * may also use the internal PHY for part of the functionality.
2498 * (eg, AUI/Thinnet). This function finds out if this TLAN 2436 * (eg, AUI/Thinnet). This function finds out if this TLAN
2499 * chip has an internal PHY, and then finds the first external 2437 * chip has an internal PHY, and then finds the first external
2500 * PHY (starting from address 0) if it exists). 2438 * PHY (starting from address 0) if it exists).
2501 * 2439 *
2502 ********************************************************************/ 2440 ********************************************************************/
2503 2441
2504static void TLan_PhyDetect( struct net_device *dev ) 2442static void tlan_phy_detect(struct net_device *dev)
2505{ 2443{
2506 TLanPrivateInfo *priv = netdev_priv(dev); 2444 struct tlan_priv *priv = netdev_priv(dev);
2507 u16 control; 2445 u16 control;
2508 u16 hi; 2446 u16 hi;
2509 u16 lo; 2447 u16 lo;
2510 u32 phy; 2448 u32 phy;
2511 2449
2512 if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) { 2450 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
2513 priv->phyNum = 0xFFFF; 2451 priv->phy_num = 0xffff;
2514 return; 2452 return;
2515 } 2453 }
2516 2454
2517 TLan_MiiReadReg( dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi ); 2455 tlan_mii_read_reg(dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi);
2518 2456
2519 if ( hi != 0xFFFF ) { 2457 if (hi != 0xffff)
2520 priv->phy[0] = TLAN_PHY_MAX_ADDR; 2458 priv->phy[0] = TLAN_PHY_MAX_ADDR;
2521 } else { 2459 else
2522 priv->phy[0] = TLAN_PHY_NONE; 2460 priv->phy[0] = TLAN_PHY_NONE;
2523 }
2524 2461
2525 priv->phy[1] = TLAN_PHY_NONE; 2462 priv->phy[1] = TLAN_PHY_NONE;
2526 for ( phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++ ) { 2463 for (phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++) {
2527 TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &control ); 2464 tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &control);
2528 TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &hi ); 2465 tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &hi);
2529 TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &lo ); 2466 tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &lo);
2530 if ( ( control != 0xFFFF ) || 2467 if ((control != 0xffff) ||
2531 ( hi != 0xFFFF ) || ( lo != 0xFFFF ) ) { 2468 (hi != 0xffff) || (lo != 0xffff)) {
2532 TLAN_DBG( TLAN_DEBUG_GNRL, 2469 TLAN_DBG(TLAN_DEBUG_GNRL,
2533 "PHY found at %02x %04x %04x %04x\n", 2470 "PHY found at %02x %04x %04x %04x\n",
2534 phy, control, hi, lo ); 2471 phy, control, hi, lo);
2535 if ( ( priv->phy[1] == TLAN_PHY_NONE ) && 2472 if ((priv->phy[1] == TLAN_PHY_NONE) &&
2536 ( phy != TLAN_PHY_MAX_ADDR ) ) { 2473 (phy != TLAN_PHY_MAX_ADDR)) {
2537 priv->phy[1] = phy; 2474 priv->phy[1] = phy;
2538 } 2475 }
2539 } 2476 }
2540 } 2477 }
2541 2478
2542 if ( priv->phy[1] != TLAN_PHY_NONE ) { 2479 if (priv->phy[1] != TLAN_PHY_NONE)
2543 priv->phyNum = 1; 2480 priv->phy_num = 1;
2544 } else if ( priv->phy[0] != TLAN_PHY_NONE ) { 2481 else if (priv->phy[0] != TLAN_PHY_NONE)
2545 priv->phyNum = 0; 2482 priv->phy_num = 0;
2546 } else { 2483 else
2547 printk( "TLAN: Cannot initialize device, no PHY was found!\n" ); 2484 netdev_info(dev, "Cannot initialize device, no PHY was found!\n");
2548 }
2549 2485
2550} /* TLan_PhyDetect */ 2486}
2551 2487
2552 2488
2553 2489
2554 2490
2555static void TLan_PhyPowerDown( struct net_device *dev ) 2491static void tlan_phy_power_down(struct net_device *dev)
2556{ 2492{
2557 TLanPrivateInfo *priv = netdev_priv(dev); 2493 struct tlan_priv *priv = netdev_priv(dev);
2558 u16 value; 2494 u16 value;
2559 2495
2560 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name ); 2496 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name);
2561 value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE; 2497 value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
2562 TLan_MiiSync( dev->base_addr ); 2498 tlan_mii_sync(dev->base_addr);
2563 TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value ); 2499 tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
2564 if ( ( priv->phyNum == 0 ) && 2500 if ((priv->phy_num == 0) &&
2565 ( priv->phy[1] != TLAN_PHY_NONE ) && 2501 (priv->phy[1] != TLAN_PHY_NONE) &&
2566 ( ! ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) ) ) { 2502 (!(priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))) {
2567 TLan_MiiSync( dev->base_addr ); 2503 tlan_mii_sync(dev->base_addr);
2568 TLan_MiiWriteReg( dev, priv->phy[1], MII_GEN_CTL, value ); 2504 tlan_mii_write_reg(dev, priv->phy[1], MII_GEN_CTL, value);
2569 } 2505 }
2570 2506
2571 /* Wait for 50 ms and powerup 2507 /* Wait for 50 ms and powerup
2572 * This is abitrary. It is intended to make sure the 2508 * This is abitrary. It is intended to make sure the
2573 * transceiver settles. 2509 * transceiver settles.
2574 */ 2510 */
2575 TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_PUP ); 2511 tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_PUP);
2576 2512
2577} /* TLan_PhyPowerDown */ 2513}
2578 2514
2579 2515
2580 2516
2581 2517
2582static void TLan_PhyPowerUp( struct net_device *dev ) 2518static void tlan_phy_power_up(struct net_device *dev)
2583{ 2519{
2584 TLanPrivateInfo *priv = netdev_priv(dev); 2520 struct tlan_priv *priv = netdev_priv(dev);
2585 u16 value; 2521 u16 value;
2586 2522
2587 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name ); 2523 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name);
2588 TLan_MiiSync( dev->base_addr ); 2524 tlan_mii_sync(dev->base_addr);
2589 value = MII_GC_LOOPBK; 2525 value = MII_GC_LOOPBK;
2590 TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value ); 2526 tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
2591 TLan_MiiSync(dev->base_addr); 2527 tlan_mii_sync(dev->base_addr);
2592 /* Wait for 500 ms and reset the 2528 /* Wait for 500 ms and reset the
2593 * transceiver. The TLAN docs say both 50 ms and 2529 * transceiver. The TLAN docs say both 50 ms and
2594 * 500 ms, so do the longer, just in case. 2530 * 500 ms, so do the longer, just in case.
2595 */ 2531 */
2596 TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_RESET ); 2532 tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_RESET);
2597 2533
2598} /* TLan_PhyPowerUp */ 2534}
2599 2535
2600 2536
2601 2537
2602 2538
2603static void TLan_PhyReset( struct net_device *dev ) 2539static void tlan_phy_reset(struct net_device *dev)
2604{ 2540{
2605 TLanPrivateInfo *priv = netdev_priv(dev); 2541 struct tlan_priv *priv = netdev_priv(dev);
2606 u16 phy; 2542 u16 phy;
2607 u16 value; 2543 u16 value;
2608 2544
2609 phy = priv->phy[priv->phyNum]; 2545 phy = priv->phy[priv->phy_num];
2610 2546
2611 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name ); 2547 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name);
2612 TLan_MiiSync( dev->base_addr ); 2548 tlan_mii_sync(dev->base_addr);
2613 value = MII_GC_LOOPBK | MII_GC_RESET; 2549 value = MII_GC_LOOPBK | MII_GC_RESET;
2614 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, value ); 2550 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
2615 TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value ); 2551 tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
2616 while ( value & MII_GC_RESET ) { 2552 while (value & MII_GC_RESET)
2617 TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value ); 2553 tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
2618 }
2619 2554
2620 /* Wait for 500 ms and initialize. 2555 /* Wait for 500 ms and initialize.
2621 * I don't remember why I wait this long. 2556 * I don't remember why I wait this long.
2622 * I've changed this to 50ms, as it seems long enough. 2557 * I've changed this to 50ms, as it seems long enough.
2623 */ 2558 */
2624 TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_START_LINK ); 2559 tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_START_LINK);
2625 2560
2626} /* TLan_PhyReset */ 2561}
2627 2562
2628 2563
2629 2564
2630 2565
2631static void TLan_PhyStartLink( struct net_device *dev ) 2566static void tlan_phy_start_link(struct net_device *dev)
2632{ 2567{
2633 TLanPrivateInfo *priv = netdev_priv(dev); 2568 struct tlan_priv *priv = netdev_priv(dev);
2634 u16 ability; 2569 u16 ability;
2635 u16 control; 2570 u16 control;
2636 u16 data; 2571 u16 data;
@@ -2638,86 +2573,87 @@ static void TLan_PhyStartLink( struct net_device *dev )
2638 u16 status; 2573 u16 status;
2639 u16 tctl; 2574 u16 tctl;
2640 2575
2641 phy = priv->phy[priv->phyNum]; 2576 phy = priv->phy[priv->phy_num];
2642 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name ); 2577 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name);
2643 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); 2578 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2644 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &ability ); 2579 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &ability);
2645 2580
2646 if ( ( status & MII_GS_AUTONEG ) && 2581 if ((status & MII_GS_AUTONEG) &&
2647 ( ! priv->aui ) ) { 2582 (!priv->aui)) {
2648 ability = status >> 11; 2583 ability = status >> 11;
2649 if ( priv->speed == TLAN_SPEED_10 && 2584 if (priv->speed == TLAN_SPEED_10 &&
2650 priv->duplex == TLAN_DUPLEX_HALF) { 2585 priv->duplex == TLAN_DUPLEX_HALF) {
2651 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0000); 2586 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0000);
2652 } else if ( priv->speed == TLAN_SPEED_10 && 2587 } else if (priv->speed == TLAN_SPEED_10 &&
2653 priv->duplex == TLAN_DUPLEX_FULL) { 2588 priv->duplex == TLAN_DUPLEX_FULL) {
2654 priv->tlanFullDuplex = true; 2589 priv->tlan_full_duplex = true;
2655 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0100); 2590 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0100);
2656 } else if ( priv->speed == TLAN_SPEED_100 && 2591 } else if (priv->speed == TLAN_SPEED_100 &&
2657 priv->duplex == TLAN_DUPLEX_HALF) { 2592 priv->duplex == TLAN_DUPLEX_HALF) {
2658 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2000); 2593 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2000);
2659 } else if ( priv->speed == TLAN_SPEED_100 && 2594 } else if (priv->speed == TLAN_SPEED_100 &&
2660 priv->duplex == TLAN_DUPLEX_FULL) { 2595 priv->duplex == TLAN_DUPLEX_FULL) {
2661 priv->tlanFullDuplex = true; 2596 priv->tlan_full_duplex = true;
2662 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2100); 2597 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2100);
2663 } else { 2598 } else {
2664 2599
2665 /* Set Auto-Neg advertisement */ 2600 /* Set Auto-Neg advertisement */
2666 TLan_MiiWriteReg( dev, phy, MII_AN_ADV, (ability << 5) | 1); 2601 tlan_mii_write_reg(dev, phy, MII_AN_ADV,
2602 (ability << 5) | 1);
2667 /* Enablee Auto-Neg */ 2603 /* Enablee Auto-Neg */
2668 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1000 ); 2604 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1000);
2669 /* Restart Auto-Neg */ 2605 /* Restart Auto-Neg */
2670 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1200 ); 2606 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1200);
2671 /* Wait for 4 sec for autonegotiation 2607 /* Wait for 4 sec for autonegotiation
2672 * to complete. The max spec time is less than this 2608 * to complete. The max spec time is less than this
2673 * but the card need additional time to start AN. 2609 * but the card need additional time to start AN.
2674 * .5 sec should be plenty extra. 2610 * .5 sec should be plenty extra.
2675 */ 2611 */
2676 printk( "TLAN: %s: Starting autonegotiation.\n", dev->name ); 2612 netdev_info(dev, "Starting autonegotiation\n");
2677 TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN ); 2613 tlan_set_timer(dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN);
2678 return; 2614 return;
2679 } 2615 }
2680 2616
2681 } 2617 }
2682 2618
2683 if ( ( priv->aui ) && ( priv->phyNum != 0 ) ) { 2619 if ((priv->aui) && (priv->phy_num != 0)) {
2684 priv->phyNum = 0; 2620 priv->phy_num = 0;
2685 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN; 2621 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
2686 TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data ); 2622 | TLAN_NET_CFG_PHY_EN;
2687 TLan_SetTimer( dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN ); 2623 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
2624 tlan_set_timer(dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN);
2688 return; 2625 return;
2689 } else if ( priv->phyNum == 0 ) { 2626 } else if (priv->phy_num == 0) {
2690 control = 0; 2627 control = 0;
2691 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tctl ); 2628 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tctl);
2692 if ( priv->aui ) { 2629 if (priv->aui) {
2693 tctl |= TLAN_TC_AUISEL; 2630 tctl |= TLAN_TC_AUISEL;
2694 } else { 2631 } else {
2695 tctl &= ~TLAN_TC_AUISEL; 2632 tctl &= ~TLAN_TC_AUISEL;
2696 if ( priv->duplex == TLAN_DUPLEX_FULL ) { 2633 if (priv->duplex == TLAN_DUPLEX_FULL) {
2697 control |= MII_GC_DUPLEX; 2634 control |= MII_GC_DUPLEX;
2698 priv->tlanFullDuplex = true; 2635 priv->tlan_full_duplex = true;
2699 } 2636 }
2700 if ( priv->speed == TLAN_SPEED_100 ) { 2637 if (priv->speed == TLAN_SPEED_100)
2701 control |= MII_GC_SPEEDSEL; 2638 control |= MII_GC_SPEEDSEL;
2702 }
2703 } 2639 }
2704 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, control ); 2640 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, control);
2705 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tctl ); 2641 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tctl);
2706 } 2642 }
2707 2643
2708 /* Wait for 2 sec to give the transceiver time 2644 /* Wait for 2 sec to give the transceiver time
2709 * to establish link. 2645 * to establish link.
2710 */ 2646 */
2711 TLan_SetTimer( dev, (4*HZ), TLAN_TIMER_FINISH_RESET ); 2647 tlan_set_timer(dev, (4*HZ), TLAN_TIMER_FINISH_RESET);
2712 2648
2713} /* TLan_PhyStartLink */ 2649}
2714 2650
2715 2651
2716 2652
2717 2653
2718static void TLan_PhyFinishAutoNeg( struct net_device *dev ) 2654static void tlan_phy_finish_auto_neg(struct net_device *dev)
2719{ 2655{
2720 TLanPrivateInfo *priv = netdev_priv(dev); 2656 struct tlan_priv *priv = netdev_priv(dev);
2721 u16 an_adv; 2657 u16 an_adv;
2722 u16 an_lpa; 2658 u16 an_lpa;
2723 u16 data; 2659 u16 data;
@@ -2725,115 +2661,118 @@ static void TLan_PhyFinishAutoNeg( struct net_device *dev )
2725 u16 phy; 2661 u16 phy;
2726 u16 status; 2662 u16 status;
2727 2663
2728 phy = priv->phy[priv->phyNum]; 2664 phy = priv->phy[priv->phy_num];
2729 2665
2730 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); 2666 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2731 udelay( 1000 ); 2667 udelay(1000);
2732 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); 2668 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2733 2669
2734 if ( ! ( status & MII_GS_AUTOCMPLT ) ) { 2670 if (!(status & MII_GS_AUTOCMPLT)) {
2735 /* Wait for 8 sec to give the process 2671 /* Wait for 8 sec to give the process
2736 * more time. Perhaps we should fail after a while. 2672 * more time. Perhaps we should fail after a while.
2737 */ 2673 */
2738 if (!priv->neg_be_verbose++) { 2674 if (!priv->neg_be_verbose++) {
2739 pr_info("TLAN: Giving autonegotiation more time.\n"); 2675 pr_info("Giving autonegotiation more time.\n");
2740 pr_info("TLAN: Please check that your adapter has\n"); 2676 pr_info("Please check that your adapter has\n");
2741 pr_info("TLAN: been properly connected to a HUB or Switch.\n"); 2677 pr_info("been properly connected to a HUB or Switch.\n");
2742 pr_info("TLAN: Trying to establish link in the background...\n"); 2678 pr_info("Trying to establish link in the background...\n");
2743 } 2679 }
2744 TLan_SetTimer( dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN ); 2680 tlan_set_timer(dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN);
2745 return; 2681 return;
2746 } 2682 }
2747 2683
2748 printk( "TLAN: %s: Autonegotiation complete.\n", dev->name ); 2684 netdev_info(dev, "Autonegotiation complete\n");
2749 TLan_MiiReadReg( dev, phy, MII_AN_ADV, &an_adv ); 2685 tlan_mii_read_reg(dev, phy, MII_AN_ADV, &an_adv);
2750 TLan_MiiReadReg( dev, phy, MII_AN_LPA, &an_lpa ); 2686 tlan_mii_read_reg(dev, phy, MII_AN_LPA, &an_lpa);
2751 mode = an_adv & an_lpa & 0x03E0; 2687 mode = an_adv & an_lpa & 0x03E0;
2752 if ( mode & 0x0100 ) { 2688 if (mode & 0x0100)
2753 priv->tlanFullDuplex = true; 2689 priv->tlan_full_duplex = true;
2754 } else if ( ! ( mode & 0x0080 ) && ( mode & 0x0040 ) ) { 2690 else if (!(mode & 0x0080) && (mode & 0x0040))
2755 priv->tlanFullDuplex = true; 2691 priv->tlan_full_duplex = true;
2756 } 2692
2757 2693 if ((!(mode & 0x0180)) &&
2758 if ( ( ! ( mode & 0x0180 ) ) && 2694 (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
2759 ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) && 2695 (priv->phy_num != 0)) {
2760 ( priv->phyNum != 0 ) ) { 2696 priv->phy_num = 0;
2761 priv->phyNum = 0; 2697 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
2762 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN; 2698 | TLAN_NET_CFG_PHY_EN;
2763 TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data ); 2699 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
2764 TLan_SetTimer( dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN ); 2700 tlan_set_timer(dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN);
2765 return; 2701 return;
2766 } 2702 }
2767 2703
2768 if ( priv->phyNum == 0 ) { 2704 if (priv->phy_num == 0) {
2769 if ( ( priv->duplex == TLAN_DUPLEX_FULL ) || 2705 if ((priv->duplex == TLAN_DUPLEX_FULL) ||
2770 ( an_adv & an_lpa & 0x0040 ) ) { 2706 (an_adv & an_lpa & 0x0040)) {
2771 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 2707 tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
2772 MII_GC_AUTOENB | MII_GC_DUPLEX ); 2708 MII_GC_AUTOENB | MII_GC_DUPLEX);
2773 pr_info("TLAN: Starting internal PHY with FULL-DUPLEX\n" ); 2709 netdev_info(dev, "Starting internal PHY with FULL-DUPLEX\n");
2774 } else { 2710 } else {
2775 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB ); 2711 tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
2776 pr_info( "TLAN: Starting internal PHY with HALF-DUPLEX\n" ); 2712 MII_GC_AUTOENB);
2713 netdev_info(dev, "Starting internal PHY with HALF-DUPLEX\n");
2777 } 2714 }
2778 } 2715 }
2779 2716
2780 /* Wait for 100 ms. No reason in partiticular. 2717 /* Wait for 100 ms. No reason in partiticular.
2781 */ 2718 */
2782 TLan_SetTimer( dev, (HZ/10), TLAN_TIMER_FINISH_RESET ); 2719 tlan_set_timer(dev, (HZ/10), TLAN_TIMER_FINISH_RESET);
2783 2720
2784} /* TLan_PhyFinishAutoNeg */ 2721}
2785 2722
2786#ifdef MONITOR 2723#ifdef MONITOR
2787 2724
2788 /********************************************************************* 2725/*********************************************************************
2789 * 2726 *
2790 * TLan_phyMonitor 2727 * tlan_phy_monitor
2791 * 2728 *
2792 * Returns: 2729 * Returns:
2793 * None 2730 * None
2794 * 2731 *
2795 * Params: 2732 * Params:
2796 * dev The device structure of this device. 2733 * dev The device structure of this device.
2797 * 2734 *
2798 * 2735 *
2799 * This function monitors PHY condition by reading the status 2736 * This function monitors PHY condition by reading the status
2800 * register via the MII bus. This can be used to give info 2737 * register via the MII bus. This can be used to give info
2801 * about link changes (up/down), and possible switch to alternate 2738 * about link changes (up/down), and possible switch to alternate
2802 * media. 2739 * media.
2803 * 2740 *
2804 * ******************************************************************/ 2741 *******************************************************************/
2805 2742
2806void TLan_PhyMonitor( struct net_device *dev ) 2743void tlan_phy_monitor(struct net_device *dev)
2807{ 2744{
2808 TLanPrivateInfo *priv = netdev_priv(dev); 2745 struct tlan_priv *priv = netdev_priv(dev);
2809 u16 phy; 2746 u16 phy;
2810 u16 phy_status; 2747 u16 phy_status;
2811 2748
2812 phy = priv->phy[priv->phyNum]; 2749 phy = priv->phy[priv->phy_num];
2813 2750
2814 /* Get PHY status register */ 2751 /* Get PHY status register */
2815 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &phy_status ); 2752 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &phy_status);
2816 2753
2817 /* Check if link has been lost */ 2754 /* Check if link has been lost */
2818 if (!(phy_status & MII_GS_LINK)) { 2755 if (!(phy_status & MII_GS_LINK)) {
2819 if (priv->link) { 2756 if (priv->link) {
2820 priv->link = 0; 2757 priv->link = 0;
2821 printk(KERN_DEBUG "TLAN: %s has lost link\n", dev->name); 2758 printk(KERN_DEBUG "TLAN: %s has lost link\n",
2822 netif_carrier_off(dev); 2759 dev->name);
2823 TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT ); 2760 netif_carrier_off(dev);
2824 return; 2761 tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
2762 return;
2825 } 2763 }
2826 } 2764 }
2827 2765
2828 /* Link restablished? */ 2766 /* Link restablished? */
2829 if ((phy_status & MII_GS_LINK) && !priv->link) { 2767 if ((phy_status & MII_GS_LINK) && !priv->link) {
2830 priv->link = 1; 2768 priv->link = 1;
2831 printk(KERN_DEBUG "TLAN: %s has reestablished link\n", dev->name); 2769 printk(KERN_DEBUG "TLAN: %s has reestablished link\n",
2770 dev->name);
2832 netif_carrier_on(dev); 2771 netif_carrier_on(dev);
2833 } 2772 }
2834 2773
2835 /* Setup a new monitor */ 2774 /* Setup a new monitor */
2836 TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT ); 2775 tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
2837} 2776}
2838 2777
2839#endif /* MONITOR */ 2778#endif /* MONITOR */
@@ -2842,47 +2781,48 @@ void TLan_PhyMonitor( struct net_device *dev )
2842/***************************************************************************** 2781/*****************************************************************************
2843****************************************************************************** 2782******************************************************************************
2844 2783
2845 ThunderLAN Driver MII Routines 2784ThunderLAN driver MII routines
2846 2785
2847 These routines are based on the information in Chap. 2 of the 2786these routines are based on the information in chap. 2 of the
2848 "ThunderLAN Programmer's Guide", pp. 15-24. 2787"ThunderLAN Programmer's Guide", pp. 15-24.
2849 2788
2850****************************************************************************** 2789******************************************************************************
2851*****************************************************************************/ 2790*****************************************************************************/
2852 2791
2853 2792
2854 /*************************************************************** 2793/***************************************************************
2855 * TLan_MiiReadReg 2794 * tlan_mii_read_reg
2856 * 2795 *
2857 * Returns: 2796 * Returns:
2858 * false if ack received ok 2797 * false if ack received ok
2859 * true if no ack received or other error 2798 * true if no ack received or other error
2860 * 2799 *
2861 * Parms: 2800 * Parms:
2862 * dev The device structure containing 2801 * dev The device structure containing
2863 * The io address and interrupt count 2802 * The io address and interrupt count
2864 * for this device. 2803 * for this device.
2865 * phy The address of the PHY to be queried. 2804 * phy The address of the PHY to be queried.
2866 * reg The register whose contents are to be 2805 * reg The register whose contents are to be
2867 * retrieved. 2806 * retrieved.
2868 * val A pointer to a variable to store the 2807 * val A pointer to a variable to store the
2869 * retrieved value. 2808 * retrieved value.
2870 * 2809 *
2871 * This function uses the TLAN's MII bus to retrieve the contents 2810 * This function uses the TLAN's MII bus to retrieve the contents
2872 * of a given register on a PHY. It sends the appropriate info 2811 * of a given register on a PHY. It sends the appropriate info
2873 * and then reads the 16-bit register value from the MII bus via 2812 * and then reads the 16-bit register value from the MII bus via
2874 * the TLAN SIO register. 2813 * the TLAN SIO register.
2875 * 2814 *
2876 **************************************************************/ 2815 **************************************************************/
2877 2816
2878static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val ) 2817static bool
2818tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
2879{ 2819{
2880 u8 nack; 2820 u8 nack;
2881 u16 sio, tmp; 2821 u16 sio, tmp;
2882 u32 i; 2822 u32 i;
2883 bool err; 2823 bool err;
2884 int minten; 2824 int minten;
2885 TLanPrivateInfo *priv = netdev_priv(dev); 2825 struct tlan_priv *priv = netdev_priv(dev);
2886 unsigned long flags = 0; 2826 unsigned long flags = 0;
2887 2827
2888 err = false; 2828 err = false;
@@ -2892,48 +2832,48 @@ static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val
2892 if (!in_irq()) 2832 if (!in_irq())
2893 spin_lock_irqsave(&priv->lock, flags); 2833 spin_lock_irqsave(&priv->lock, flags);
2894 2834
2895 TLan_MiiSync(dev->base_addr); 2835 tlan_mii_sync(dev->base_addr);
2896 2836
2897 minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio ); 2837 minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
2898 if ( minten ) 2838 if (minten)
2899 TLan_ClearBit(TLAN_NET_SIO_MINTEN, sio); 2839 tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
2900 2840
2901 TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Start ( 01b ) */ 2841 tlan_mii_send_data(dev->base_addr, 0x1, 2); /* start (01b) */
2902 TLan_MiiSendData( dev->base_addr, 0x2, 2 ); /* Read ( 10b ) */ 2842 tlan_mii_send_data(dev->base_addr, 0x2, 2); /* read (10b) */
2903 TLan_MiiSendData( dev->base_addr, phy, 5 ); /* Device # */ 2843 tlan_mii_send_data(dev->base_addr, phy, 5); /* device # */
2904 TLan_MiiSendData( dev->base_addr, reg, 5 ); /* Register # */ 2844 tlan_mii_send_data(dev->base_addr, reg, 5); /* register # */
2905 2845
2906 2846
2907 TLan_ClearBit(TLAN_NET_SIO_MTXEN, sio); /* Change direction */ 2847 tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio); /* change direction */
2908 2848
2909 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Clock Idle bit */ 2849 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* clock idle bit */
2910 TLan_SetBit(TLAN_NET_SIO_MCLK, sio); 2850 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2911 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Wait 300ns */ 2851 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* wait 300ns */
2912 2852
2913 nack = TLan_GetBit(TLAN_NET_SIO_MDATA, sio); /* Check for ACK */ 2853 nack = tlan_get_bit(TLAN_NET_SIO_MDATA, sio); /* check for ACK */
2914 TLan_SetBit(TLAN_NET_SIO_MCLK, sio); /* Finish ACK */ 2854 tlan_set_bit(TLAN_NET_SIO_MCLK, sio); /* finish ACK */
2915 if (nack) { /* No ACK, so fake it */ 2855 if (nack) { /* no ACK, so fake it */
2916 for (i = 0; i < 16; i++) { 2856 for (i = 0; i < 16; i++) {
2917 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); 2857 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2918 TLan_SetBit(TLAN_NET_SIO_MCLK, sio); 2858 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2919 } 2859 }
2920 tmp = 0xffff; 2860 tmp = 0xffff;
2921 err = true; 2861 err = true;
2922 } else { /* ACK, so read data */ 2862 } else { /* ACK, so read data */
2923 for (tmp = 0, i = 0x8000; i; i >>= 1) { 2863 for (tmp = 0, i = 0x8000; i; i >>= 1) {
2924 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); 2864 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2925 if (TLan_GetBit(TLAN_NET_SIO_MDATA, sio)) 2865 if (tlan_get_bit(TLAN_NET_SIO_MDATA, sio))
2926 tmp |= i; 2866 tmp |= i;
2927 TLan_SetBit(TLAN_NET_SIO_MCLK, sio); 2867 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2928 } 2868 }
2929 } 2869 }
2930 2870
2931 2871
2932 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Idle cycle */ 2872 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */
2933 TLan_SetBit(TLAN_NET_SIO_MCLK, sio); 2873 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2934 2874
2935 if ( minten ) 2875 if (minten)
2936 TLan_SetBit(TLAN_NET_SIO_MINTEN, sio); 2876 tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
2937 2877
2938 *val = tmp; 2878 *val = tmp;
2939 2879
@@ -2942,116 +2882,117 @@ static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val
2942 2882
2943 return err; 2883 return err;
2944 2884
2945} /* TLan_MiiReadReg */ 2885}
2946 2886
2947 2887
2948 2888
2949 2889
2950 /*************************************************************** 2890/***************************************************************
2951 * TLan_MiiSendData 2891 * tlan_mii_send_data
2952 * 2892 *
2953 * Returns: 2893 * Returns:
2954 * Nothing 2894 * Nothing
2955 * Parms: 2895 * Parms:
2956 * base_port The base IO port of the adapter in 2896 * base_port The base IO port of the adapter in
2957 * question. 2897 * question.
2958 * dev The address of the PHY to be queried. 2898 * dev The address of the PHY to be queried.
2959 * data The value to be placed on the MII bus. 2899 * data The value to be placed on the MII bus.
2960 * num_bits The number of bits in data that are to 2900 * num_bits The number of bits in data that are to
2961 * be placed on the MII bus. 2901 * be placed on the MII bus.
2962 * 2902 *
2963 * This function sends on sequence of bits on the MII 2903 * This function sends on sequence of bits on the MII
2964 * configuration bus. 2904 * configuration bus.
2965 * 2905 *
2966 **************************************************************/ 2906 **************************************************************/
2967 2907
2968static void TLan_MiiSendData( u16 base_port, u32 data, unsigned num_bits ) 2908static void tlan_mii_send_data(u16 base_port, u32 data, unsigned num_bits)
2969{ 2909{
2970 u16 sio; 2910 u16 sio;
2971 u32 i; 2911 u32 i;
2972 2912
2973 if ( num_bits == 0 ) 2913 if (num_bits == 0)
2974 return; 2914 return;
2975 2915
2976 outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR ); 2916 outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
2977 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO; 2917 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
2978 TLan_SetBit( TLAN_NET_SIO_MTXEN, sio ); 2918 tlan_set_bit(TLAN_NET_SIO_MTXEN, sio);
2979 2919
2980 for ( i = ( 0x1 << ( num_bits - 1 ) ); i; i >>= 1 ) { 2920 for (i = (0x1 << (num_bits - 1)); i; i >>= 1) {
2981 TLan_ClearBit( TLAN_NET_SIO_MCLK, sio ); 2921 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2982 (void) TLan_GetBit( TLAN_NET_SIO_MCLK, sio ); 2922 (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
2983 if ( data & i ) 2923 if (data & i)
2984 TLan_SetBit( TLAN_NET_SIO_MDATA, sio ); 2924 tlan_set_bit(TLAN_NET_SIO_MDATA, sio);
2985 else 2925 else
2986 TLan_ClearBit( TLAN_NET_SIO_MDATA, sio ); 2926 tlan_clear_bit(TLAN_NET_SIO_MDATA, sio);
2987 TLan_SetBit( TLAN_NET_SIO_MCLK, sio ); 2927 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2988 (void) TLan_GetBit( TLAN_NET_SIO_MCLK, sio ); 2928 (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
2989 } 2929 }
2990 2930
2991} /* TLan_MiiSendData */ 2931}
2992 2932
2993 2933
2994 2934
2995 2935
2996 /*************************************************************** 2936/***************************************************************
2997 * TLan_MiiSync 2937 * TLan_MiiSync
2998 * 2938 *
2999 * Returns: 2939 * Returns:
3000 * Nothing 2940 * Nothing
3001 * Parms: 2941 * Parms:
3002 * base_port The base IO port of the adapter in 2942 * base_port The base IO port of the adapter in
3003 * question. 2943 * question.
3004 * 2944 *
3005 * This functions syncs all PHYs in terms of the MII configuration 2945 * This functions syncs all PHYs in terms of the MII configuration
3006 * bus. 2946 * bus.
3007 * 2947 *
3008 **************************************************************/ 2948 **************************************************************/
3009 2949
3010static void TLan_MiiSync( u16 base_port ) 2950static void tlan_mii_sync(u16 base_port)
3011{ 2951{
3012 int i; 2952 int i;
3013 u16 sio; 2953 u16 sio;
3014 2954
3015 outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR ); 2955 outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
3016 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO; 2956 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
3017 2957
3018 TLan_ClearBit( TLAN_NET_SIO_MTXEN, sio ); 2958 tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
3019 for ( i = 0; i < 32; i++ ) { 2959 for (i = 0; i < 32; i++) {
3020 TLan_ClearBit( TLAN_NET_SIO_MCLK, sio ); 2960 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
3021 TLan_SetBit( TLAN_NET_SIO_MCLK, sio ); 2961 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
3022 } 2962 }
3023 2963
3024} /* TLan_MiiSync */ 2964}
3025 2965
3026 2966
3027 2967
3028 2968
3029 /*************************************************************** 2969/***************************************************************
3030 * TLan_MiiWriteReg 2970 * tlan_mii_write_reg
3031 * 2971 *
3032 * Returns: 2972 * Returns:
3033 * Nothing 2973 * Nothing
3034 * Parms: 2974 * Parms:
3035 * dev The device structure for the device 2975 * dev The device structure for the device
3036 * to write to. 2976 * to write to.
3037 * phy The address of the PHY to be written to. 2977 * phy The address of the PHY to be written to.
3038 * reg The register whose contents are to be 2978 * reg The register whose contents are to be
3039 * written. 2979 * written.
3040 * val The value to be written to the register. 2980 * val The value to be written to the register.
3041 * 2981 *
3042 * This function uses the TLAN's MII bus to write the contents of a 2982 * This function uses the TLAN's MII bus to write the contents of a
3043 * given register on a PHY. It sends the appropriate info and then 2983 * given register on a PHY. It sends the appropriate info and then
3044 * writes the 16-bit register value from the MII configuration bus 2984 * writes the 16-bit register value from the MII configuration bus
3045 * via the TLAN SIO register. 2985 * via the TLAN SIO register.
3046 * 2986 *
3047 **************************************************************/ 2987 **************************************************************/
3048 2988
3049static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val ) 2989static void
2990tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
3050{ 2991{
3051 u16 sio; 2992 u16 sio;
3052 int minten; 2993 int minten;
3053 unsigned long flags = 0; 2994 unsigned long flags = 0;
3054 TLanPrivateInfo *priv = netdev_priv(dev); 2995 struct tlan_priv *priv = netdev_priv(dev);
3055 2996
3056 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR); 2997 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
3057 sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO; 2998 sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
@@ -3059,30 +3000,30 @@ static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val
3059 if (!in_irq()) 3000 if (!in_irq())
3060 spin_lock_irqsave(&priv->lock, flags); 3001 spin_lock_irqsave(&priv->lock, flags);
3061 3002
3062 TLan_MiiSync( dev->base_addr ); 3003 tlan_mii_sync(dev->base_addr);
3063 3004
3064 minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio ); 3005 minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
3065 if ( minten ) 3006 if (minten)
3066 TLan_ClearBit( TLAN_NET_SIO_MINTEN, sio ); 3007 tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
3067 3008
3068 TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Start ( 01b ) */ 3009 tlan_mii_send_data(dev->base_addr, 0x1, 2); /* start (01b) */
3069 TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Write ( 01b ) */ 3010 tlan_mii_send_data(dev->base_addr, 0x1, 2); /* write (01b) */
3070 TLan_MiiSendData( dev->base_addr, phy, 5 ); /* Device # */ 3011 tlan_mii_send_data(dev->base_addr, phy, 5); /* device # */
3071 TLan_MiiSendData( dev->base_addr, reg, 5 ); /* Register # */ 3012 tlan_mii_send_data(dev->base_addr, reg, 5); /* register # */
3072 3013
3073 TLan_MiiSendData( dev->base_addr, 0x2, 2 ); /* Send ACK */ 3014 tlan_mii_send_data(dev->base_addr, 0x2, 2); /* send ACK */
3074 TLan_MiiSendData( dev->base_addr, val, 16 ); /* Send Data */ 3015 tlan_mii_send_data(dev->base_addr, val, 16); /* send data */
3075 3016
3076 TLan_ClearBit( TLAN_NET_SIO_MCLK, sio ); /* Idle cycle */ 3017 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */
3077 TLan_SetBit( TLAN_NET_SIO_MCLK, sio ); 3018 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
3078 3019
3079 if ( minten ) 3020 if (minten)
3080 TLan_SetBit( TLAN_NET_SIO_MINTEN, sio ); 3021 tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
3081 3022
3082 if (!in_irq()) 3023 if (!in_irq())
3083 spin_unlock_irqrestore(&priv->lock, flags); 3024 spin_unlock_irqrestore(&priv->lock, flags);
3084 3025
3085} /* TLan_MiiWriteReg */ 3026}
3086 3027
3087 3028
3088 3029
@@ -3090,229 +3031,226 @@ static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val
3090/***************************************************************************** 3031/*****************************************************************************
3091****************************************************************************** 3032******************************************************************************
3092 3033
3093 ThunderLAN Driver Eeprom routines 3034ThunderLAN driver eeprom routines
3094 3035
3095 The Compaq Netelligent 10 and 10/100 cards use a Microchip 24C02A 3036the Compaq netelligent 10 and 10/100 cards use a microchip 24C02A
3096 EEPROM. These functions are based on information in Microchip's 3037EEPROM. these functions are based on information in microchip's
3097 data sheet. I don't know how well this functions will work with 3038data sheet. I don't know how well this functions will work with
3098 other EEPROMs. 3039other Eeproms.
3099 3040
3100****************************************************************************** 3041******************************************************************************
3101*****************************************************************************/ 3042*****************************************************************************/
3102 3043
3103 3044
3104 /*************************************************************** 3045/***************************************************************
3105 * TLan_EeSendStart 3046 * tlan_ee_send_start
3106 * 3047 *
3107 * Returns: 3048 * Returns:
3108 * Nothing 3049 * Nothing
3109 * Parms: 3050 * Parms:
3110 * io_base The IO port base address for the 3051 * io_base The IO port base address for the
3111 * TLAN device with the EEPROM to 3052 * TLAN device with the EEPROM to
3112 * use. 3053 * use.
3113 * 3054 *
3114 * This function sends a start cycle to an EEPROM attached 3055 * This function sends a start cycle to an EEPROM attached
3115 * to a TLAN chip. 3056 * to a TLAN chip.
3116 * 3057 *
3117 **************************************************************/ 3058 **************************************************************/
3118 3059
3119static void TLan_EeSendStart( u16 io_base ) 3060static void tlan_ee_send_start(u16 io_base)
3120{ 3061{
3121 u16 sio; 3062 u16 sio;
3122 3063
3123 outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR ); 3064 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3124 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO; 3065 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3125 3066
3126 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3067 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3127 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); 3068 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3128 TLan_SetBit( TLAN_NET_SIO_ETXEN, sio ); 3069 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3129 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); 3070 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3130 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); 3071 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3131 3072
3132} /* TLan_EeSendStart */ 3073}
3133 3074
3134 3075
3135 3076
3136 3077
3137 /*************************************************************** 3078/***************************************************************
3138 * TLan_EeSendByte 3079 * tlan_ee_send_byte
3139 * 3080 *
3140 * Returns: 3081 * Returns:
3141 * If the correct ack was received, 0, otherwise 1 3082 * If the correct ack was received, 0, otherwise 1
3142 * Parms: io_base The IO port base address for the 3083 * Parms: io_base The IO port base address for the
3143 * TLAN device with the EEPROM to 3084 * TLAN device with the EEPROM to
3144 * use. 3085 * use.
3145 * data The 8 bits of information to 3086 * data The 8 bits of information to
3146 * send to the EEPROM. 3087 * send to the EEPROM.
3147 * stop If TLAN_EEPROM_STOP is passed, a 3088 * stop If TLAN_EEPROM_STOP is passed, a
3148 * stop cycle is sent after the 3089 * stop cycle is sent after the
3149 * byte is sent after the ack is 3090 * byte is sent after the ack is
3150 * read. 3091 * read.
3151 * 3092 *
3152 * This function sends a byte on the serial EEPROM line, 3093 * This function sends a byte on the serial EEPROM line,
3153 * driving the clock to send each bit. The function then 3094 * driving the clock to send each bit. The function then
3154 * reverses transmission direction and reads an acknowledge 3095 * reverses transmission direction and reads an acknowledge
3155 * bit. 3096 * bit.
3156 * 3097 *
3157 **************************************************************/ 3098 **************************************************************/
3158 3099
3159static int TLan_EeSendByte( u16 io_base, u8 data, int stop ) 3100static int tlan_ee_send_byte(u16 io_base, u8 data, int stop)
3160{ 3101{
3161 int err; 3102 int err;
3162 u8 place; 3103 u8 place;
3163 u16 sio; 3104 u16 sio;
3164 3105
3165 outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR ); 3106 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3166 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO; 3107 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3167 3108
3168 /* Assume clock is low, tx is enabled; */ 3109 /* Assume clock is low, tx is enabled; */
3169 for ( place = 0x80; place != 0; place >>= 1 ) { 3110 for (place = 0x80; place != 0; place >>= 1) {
3170 if ( place & data ) 3111 if (place & data)
3171 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); 3112 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3172 else 3113 else
3173 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); 3114 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3174 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3115 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3175 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); 3116 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3176 } 3117 }
3177 TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio ); 3118 tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
3178 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3119 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3179 err = TLan_GetBit( TLAN_NET_SIO_EDATA, sio ); 3120 err = tlan_get_bit(TLAN_NET_SIO_EDATA, sio);
3180 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); 3121 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3181 TLan_SetBit( TLAN_NET_SIO_ETXEN, sio ); 3122 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3182 3123
3183 if ( ( ! err ) && stop ) { 3124 if ((!err) && stop) {
3184 /* STOP, raise data while clock is high */ 3125 /* STOP, raise data while clock is high */
3185 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); 3126 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3186 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3127 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3187 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); 3128 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3188 } 3129 }
3189 3130
3190 return err; 3131 return err;
3191 3132
3192} /* TLan_EeSendByte */ 3133}
3193 3134
3194 3135
3195 3136
3196 3137
3197 /*************************************************************** 3138/***************************************************************
3198 * TLan_EeReceiveByte 3139 * tlan_ee_receive_byte
3199 * 3140 *
3200 * Returns: 3141 * Returns:
3201 * Nothing 3142 * Nothing
3202 * Parms: 3143 * Parms:
3203 * io_base The IO port base address for the 3144 * io_base The IO port base address for the
3204 * TLAN device with the EEPROM to 3145 * TLAN device with the EEPROM to
3205 * use. 3146 * use.
3206 * data An address to a char to hold the 3147 * data An address to a char to hold the
3207 * data sent from the EEPROM. 3148 * data sent from the EEPROM.
3208 * stop If TLAN_EEPROM_STOP is passed, a 3149 * stop If TLAN_EEPROM_STOP is passed, a
3209 * stop cycle is sent after the 3150 * stop cycle is sent after the
3210 * byte is received, and no ack is 3151 * byte is received, and no ack is
3211 * sent. 3152 * sent.
3212 * 3153 *
3213 * This function receives 8 bits of data from the EEPROM 3154 * This function receives 8 bits of data from the EEPROM
3214 * over the serial link. It then sends and ack bit, or no 3155 * over the serial link. It then sends and ack bit, or no
3215 * ack and a stop bit. This function is used to retrieve 3156 * ack and a stop bit. This function is used to retrieve
3216 * data after the address of a byte in the EEPROM has been 3157 * data after the address of a byte in the EEPROM has been
3217 * sent. 3158 * sent.
3218 * 3159 *
3219 **************************************************************/ 3160 **************************************************************/
3220 3161
3221static void TLan_EeReceiveByte( u16 io_base, u8 *data, int stop ) 3162static void tlan_ee_receive_byte(u16 io_base, u8 *data, int stop)
3222{ 3163{
3223 u8 place; 3164 u8 place;
3224 u16 sio; 3165 u16 sio;
3225 3166
3226 outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR ); 3167 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3227 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO; 3168 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3228 *data = 0; 3169 *data = 0;
3229 3170
3230 /* Assume clock is low, tx is enabled; */ 3171 /* Assume clock is low, tx is enabled; */
3231 TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio ); 3172 tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
3232 for ( place = 0x80; place; place >>= 1 ) { 3173 for (place = 0x80; place; place >>= 1) {
3233 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3174 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3234 if ( TLan_GetBit( TLAN_NET_SIO_EDATA, sio ) ) 3175 if (tlan_get_bit(TLAN_NET_SIO_EDATA, sio))
3235 *data |= place; 3176 *data |= place;
3236 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); 3177 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3237 } 3178 }
3238 3179
3239 TLan_SetBit( TLAN_NET_SIO_ETXEN, sio ); 3180 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3240 if ( ! stop ) { 3181 if (!stop) {
3241 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); /* Ack = 0 */ 3182 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); /* ack = 0 */
3242 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3183 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3243 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); 3184 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3244 } else { 3185 } else {
3245 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); /* No ack = 1 (?) */ 3186 tlan_set_bit(TLAN_NET_SIO_EDATA, sio); /* no ack = 1 (?) */
3246 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3187 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3247 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); 3188 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3248 /* STOP, raise data while clock is high */ 3189 /* STOP, raise data while clock is high */
3249 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); 3190 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3250 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3191 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3251 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); 3192 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3252 } 3193 }
3253 3194
3254} /* TLan_EeReceiveByte */ 3195}
3255 3196
3256 3197
3257 3198
3258 3199
3259 /*************************************************************** 3200/***************************************************************
3260 * TLan_EeReadByte 3201 * tlan_ee_read_byte
3261 * 3202 *
3262 * Returns: 3203 * Returns:
3263 * No error = 0, else, the stage at which the error 3204 * No error = 0, else, the stage at which the error
3264 * occurred. 3205 * occurred.
3265 * Parms: 3206 * Parms:
3266 * io_base The IO port base address for the 3207 * io_base The IO port base address for the
3267 * TLAN device with the EEPROM to 3208 * TLAN device with the EEPROM to
3268 * use. 3209 * use.
3269 * ee_addr The address of the byte in the 3210 * ee_addr The address of the byte in the
3270 * EEPROM whose contents are to be 3211 * EEPROM whose contents are to be
3271 * retrieved. 3212 * retrieved.
3272 * data An address to a char to hold the 3213 * data An address to a char to hold the
3273 * data obtained from the EEPROM. 3214 * data obtained from the EEPROM.
3274 * 3215 *
3275 * This function reads a byte of information from an byte 3216 * This function reads a byte of information from an byte
3276 * cell in the EEPROM. 3217 * cell in the EEPROM.
3277 * 3218 *
3278 **************************************************************/ 3219 **************************************************************/
3279 3220
3280static int TLan_EeReadByte( struct net_device *dev, u8 ee_addr, u8 *data ) 3221static int tlan_ee_read_byte(struct net_device *dev, u8 ee_addr, u8 *data)
3281{ 3222{
3282 int err; 3223 int err;
3283 TLanPrivateInfo *priv = netdev_priv(dev); 3224 struct tlan_priv *priv = netdev_priv(dev);
3284 unsigned long flags = 0; 3225 unsigned long flags = 0;
3285 int ret=0; 3226 int ret = 0;
3286 3227
3287 spin_lock_irqsave(&priv->lock, flags); 3228 spin_lock_irqsave(&priv->lock, flags);
3288 3229
3289 TLan_EeSendStart( dev->base_addr ); 3230 tlan_ee_send_start(dev->base_addr);
3290 err = TLan_EeSendByte( dev->base_addr, 0xA0, TLAN_EEPROM_ACK ); 3231 err = tlan_ee_send_byte(dev->base_addr, 0xa0, TLAN_EEPROM_ACK);
3291 if (err) 3232 if (err) {
3292 { 3233 ret = 1;
3293 ret=1;
3294 goto fail; 3234 goto fail;
3295 } 3235 }
3296 err = TLan_EeSendByte( dev->base_addr, ee_addr, TLAN_EEPROM_ACK ); 3236 err = tlan_ee_send_byte(dev->base_addr, ee_addr, TLAN_EEPROM_ACK);
3297 if (err) 3237 if (err) {
3298 { 3238 ret = 2;
3299 ret=2;
3300 goto fail; 3239 goto fail;
3301 } 3240 }
3302 TLan_EeSendStart( dev->base_addr ); 3241 tlan_ee_send_start(dev->base_addr);
3303 err = TLan_EeSendByte( dev->base_addr, 0xA1, TLAN_EEPROM_ACK ); 3242 err = tlan_ee_send_byte(dev->base_addr, 0xa1, TLAN_EEPROM_ACK);
3304 if (err) 3243 if (err) {
3305 { 3244 ret = 3;
3306 ret=3;
3307 goto fail; 3245 goto fail;
3308 } 3246 }
3309 TLan_EeReceiveByte( dev->base_addr, data, TLAN_EEPROM_STOP ); 3247 tlan_ee_receive_byte(dev->base_addr, data, TLAN_EEPROM_STOP);
3310fail: 3248fail:
3311 spin_unlock_irqrestore(&priv->lock, flags); 3249 spin_unlock_irqrestore(&priv->lock, flags);
3312 3250
3313 return ret; 3251 return ret;
3314 3252
3315} /* TLan_EeReadByte */ 3253}
3316 3254
3317 3255
3318 3256
diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h
index 3315ced774e2..5fc98a8e4889 100644
--- a/drivers/net/tlan.h
+++ b/drivers/net/tlan.h
@@ -20,8 +20,8 @@
20 ********************************************************************/ 20 ********************************************************************/
21 21
22 22
23#include <asm/io.h> 23#include <linux/io.h>
24#include <asm/types.h> 24#include <linux/types.h>
25#include <linux/netdevice.h> 25#include <linux/netdevice.h>
26 26
27 27
@@ -40,8 +40,11 @@
40#define TLAN_IGNORE 0 40#define TLAN_IGNORE 0
41#define TLAN_RECORD 1 41#define TLAN_RECORD 1
42 42
43#define TLAN_DBG(lvl, format, args...) \ 43#define TLAN_DBG(lvl, format, args...) \
44 do { if (debug&lvl) printk(KERN_DEBUG "TLAN: " format, ##args ); } while(0) 44 do { \
45 if (debug&lvl) \
46 printk(KERN_DEBUG "TLAN: " format, ##args); \
47 } while (0)
45 48
46#define TLAN_DEBUG_GNRL 0x0001 49#define TLAN_DEBUG_GNRL 0x0001
47#define TLAN_DEBUG_TX 0x0002 50#define TLAN_DEBUG_TX 0x0002
@@ -50,7 +53,8 @@
50#define TLAN_DEBUG_PROBE 0x0010 53#define TLAN_DEBUG_PROBE 0x0010
51 54
52#define TX_TIMEOUT (10*HZ) /* We need time for auto-neg */ 55#define TX_TIMEOUT (10*HZ) /* We need time for auto-neg */
53#define MAX_TLAN_BOARDS 8 /* Max number of boards installed at a time */ 56#define MAX_TLAN_BOARDS 8 /* Max number of boards installed
57 at a time */
54 58
55 59
56 /***************************************************************** 60 /*****************************************************************
@@ -70,13 +74,13 @@
70#define PCI_DEVICE_ID_OLICOM_OC2326 0x0014 74#define PCI_DEVICE_ID_OLICOM_OC2326 0x0014
71#endif 75#endif
72 76
73typedef struct tlan_adapter_entry { 77struct tlan_adapter_entry {
74 u16 vendorId; 78 u16 vendor_id;
75 u16 deviceId; 79 u16 device_id;
76 char *deviceLabel; 80 char *device_label;
77 u32 flags; 81 u32 flags;
78 u16 addrOfs; 82 u16 addr_ofs;
79} TLanAdapterEntry; 83};
80 84
81#define TLAN_ADAPTER_NONE 0x00000000 85#define TLAN_ADAPTER_NONE 0x00000000
82#define TLAN_ADAPTER_UNMANAGED_PHY 0x00000001 86#define TLAN_ADAPTER_UNMANAGED_PHY 0x00000001
@@ -129,18 +133,18 @@ typedef struct tlan_adapter_entry {
129#define TLAN_CSTAT_DP_PR 0x0100 133#define TLAN_CSTAT_DP_PR 0x0100
130 134
131 135
132typedef struct tlan_buffer_ref_tag { 136struct tlan_buffer {
133 u32 count; 137 u32 count;
134 u32 address; 138 u32 address;
135} TLanBufferRef; 139};
136 140
137 141
138typedef struct tlan_list_tag { 142struct tlan_list {
139 u32 forward; 143 u32 forward;
140 u16 cStat; 144 u16 c_stat;
141 u16 frameSize; 145 u16 frame_size;
142 TLanBufferRef buffer[TLAN_BUFFERS_PER_LIST]; 146 struct tlan_buffer buffer[TLAN_BUFFERS_PER_LIST];
143} TLanList; 147};
144 148
145 149
146typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE]; 150typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
@@ -164,49 +168,49 @@ typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
164 * 168 *
165 ****************************************************************/ 169 ****************************************************************/
166 170
167typedef struct tlan_private_tag { 171struct tlan_priv {
168 struct net_device *nextDevice; 172 struct net_device *next_device;
169 struct pci_dev *pciDev; 173 struct pci_dev *pci_dev;
170 struct net_device *dev; 174 struct net_device *dev;
171 void *dmaStorage; 175 void *dma_storage;
172 dma_addr_t dmaStorageDMA; 176 dma_addr_t dma_storage_dma;
173 unsigned int dmaSize; 177 unsigned int dma_size;
174 u8 *padBuffer; 178 u8 *pad_buffer;
175 TLanList *rxList; 179 struct tlan_list *rx_list;
176 dma_addr_t rxListDMA; 180 dma_addr_t rx_list_dma;
177 u8 *rxBuffer; 181 u8 *rx_buffer;
178 dma_addr_t rxBufferDMA; 182 dma_addr_t rx_buffer_dma;
179 u32 rxHead; 183 u32 rx_head;
180 u32 rxTail; 184 u32 rx_tail;
181 u32 rxEocCount; 185 u32 rx_eoc_count;
182 TLanList *txList; 186 struct tlan_list *tx_list;
183 dma_addr_t txListDMA; 187 dma_addr_t tx_list_dma;
184 u8 *txBuffer; 188 u8 *tx_buffer;
185 dma_addr_t txBufferDMA; 189 dma_addr_t tx_buffer_dma;
186 u32 txHead; 190 u32 tx_head;
187 u32 txInProgress; 191 u32 tx_in_progress;
188 u32 txTail; 192 u32 tx_tail;
189 u32 txBusyCount; 193 u32 tx_busy_count;
190 u32 phyOnline; 194 u32 phy_online;
191 u32 timerSetAt; 195 u32 timer_set_at;
192 u32 timerType; 196 u32 timer_type;
193 struct timer_list timer; 197 struct timer_list timer;
194 struct board *adapter; 198 struct board *adapter;
195 u32 adapterRev; 199 u32 adapter_rev;
196 u32 aui; 200 u32 aui;
197 u32 debug; 201 u32 debug;
198 u32 duplex; 202 u32 duplex;
199 u32 phy[2]; 203 u32 phy[2];
200 u32 phyNum; 204 u32 phy_num;
201 u32 speed; 205 u32 speed;
202 u8 tlanRev; 206 u8 tlan_rev;
203 u8 tlanFullDuplex; 207 u8 tlan_full_duplex;
204 spinlock_t lock; 208 spinlock_t lock;
205 u8 link; 209 u8 link;
206 u8 is_eisa; 210 u8 is_eisa;
207 struct work_struct tlan_tqueue; 211 struct work_struct tlan_tqueue;
208 u8 neg_be_verbose; 212 u8 neg_be_verbose;
209} TLanPrivateInfo; 213};
210 214
211 215
212 216
@@ -247,7 +251,7 @@ typedef struct tlan_private_tag {
247 ****************************************************************/ 251 ****************************************************************/
248 252
249#define TLAN_HOST_CMD 0x00 253#define TLAN_HOST_CMD 0x00
250#define TLAN_HC_GO 0x80000000 254#define TLAN_HC_GO 0x80000000
251#define TLAN_HC_STOP 0x40000000 255#define TLAN_HC_STOP 0x40000000
252#define TLAN_HC_ACK 0x20000000 256#define TLAN_HC_ACK 0x20000000
253#define TLAN_HC_CS_MASK 0x1FE00000 257#define TLAN_HC_CS_MASK 0x1FE00000
@@ -283,7 +287,7 @@ typedef struct tlan_private_tag {
283#define TLAN_NET_CMD_TRFRAM 0x02 287#define TLAN_NET_CMD_TRFRAM 0x02
284#define TLAN_NET_CMD_TXPACE 0x01 288#define TLAN_NET_CMD_TXPACE 0x01
285#define TLAN_NET_SIO 0x01 289#define TLAN_NET_SIO 0x01
286#define TLAN_NET_SIO_MINTEN 0x80 290#define TLAN_NET_SIO_MINTEN 0x80
287#define TLAN_NET_SIO_ECLOK 0x40 291#define TLAN_NET_SIO_ECLOK 0x40
288#define TLAN_NET_SIO_ETXEN 0x20 292#define TLAN_NET_SIO_ETXEN 0x20
289#define TLAN_NET_SIO_EDATA 0x10 293#define TLAN_NET_SIO_EDATA 0x10
@@ -304,7 +308,7 @@ typedef struct tlan_private_tag {
304#define TLAN_NET_MASK_MASK4 0x10 308#define TLAN_NET_MASK_MASK4 0x10
305#define TLAN_NET_MASK_RSRVD 0x0F 309#define TLAN_NET_MASK_RSRVD 0x0F
306#define TLAN_NET_CONFIG 0x04 310#define TLAN_NET_CONFIG 0x04
307#define TLAN_NET_CFG_RCLK 0x8000 311#define TLAN_NET_CFG_RCLK 0x8000
308#define TLAN_NET_CFG_TCLK 0x4000 312#define TLAN_NET_CFG_TCLK 0x4000
309#define TLAN_NET_CFG_BIT 0x2000 313#define TLAN_NET_CFG_BIT 0x2000
310#define TLAN_NET_CFG_RXCRC 0x1000 314#define TLAN_NET_CFG_RXCRC 0x1000
@@ -372,7 +376,7 @@ typedef struct tlan_private_tag {
372/* Generic MII/PHY Registers */ 376/* Generic MII/PHY Registers */
373 377
374#define MII_GEN_CTL 0x00 378#define MII_GEN_CTL 0x00
375#define MII_GC_RESET 0x8000 379#define MII_GC_RESET 0x8000
376#define MII_GC_LOOPBK 0x4000 380#define MII_GC_LOOPBK 0x4000
377#define MII_GC_SPEEDSEL 0x2000 381#define MII_GC_SPEEDSEL 0x2000
378#define MII_GC_AUTOENB 0x1000 382#define MII_GC_AUTOENB 0x1000
@@ -397,9 +401,9 @@ typedef struct tlan_private_tag {
397#define MII_GS_EXTCAP 0x0001 401#define MII_GS_EXTCAP 0x0001
398#define MII_GEN_ID_HI 0x02 402#define MII_GEN_ID_HI 0x02
399#define MII_GEN_ID_LO 0x03 403#define MII_GEN_ID_LO 0x03
400#define MII_GIL_OUI 0xFC00 404#define MII_GIL_OUI 0xFC00
401#define MII_GIL_MODEL 0x03F0 405#define MII_GIL_MODEL 0x03F0
402#define MII_GIL_REVISION 0x000F 406#define MII_GIL_REVISION 0x000F
403#define MII_AN_ADV 0x04 407#define MII_AN_ADV 0x04
404#define MII_AN_LPA 0x05 408#define MII_AN_LPA 0x05
405#define MII_AN_EXP 0x06 409#define MII_AN_EXP 0x06
@@ -408,7 +412,7 @@ typedef struct tlan_private_tag {
408 412
409#define TLAN_TLPHY_ID 0x10 413#define TLAN_TLPHY_ID 0x10
410#define TLAN_TLPHY_CTL 0x11 414#define TLAN_TLPHY_CTL 0x11
411#define TLAN_TC_IGLINK 0x8000 415#define TLAN_TC_IGLINK 0x8000
412#define TLAN_TC_SWAPOL 0x4000 416#define TLAN_TC_SWAPOL 0x4000
413#define TLAN_TC_AUISEL 0x2000 417#define TLAN_TC_AUISEL 0x2000
414#define TLAN_TC_SQEEN 0x1000 418#define TLAN_TC_SQEEN 0x1000
@@ -435,41 +439,41 @@ typedef struct tlan_private_tag {
435#define LEVEL1_ID1 0x7810 439#define LEVEL1_ID1 0x7810
436#define LEVEL1_ID2 0x0000 440#define LEVEL1_ID2 0x0000
437 441
438#define CIRC_INC( a, b ) if ( ++a >= b ) a = 0 442#define CIRC_INC(a, b) if (++a >= b) a = 0
439 443
440/* Routines to access internal registers. */ 444/* Routines to access internal registers. */
441 445
442static inline u8 TLan_DioRead8(u16 base_addr, u16 internal_addr) 446static inline u8 tlan_dio_read8(u16 base_addr, u16 internal_addr)
443{ 447{
444 outw(internal_addr, base_addr + TLAN_DIO_ADR); 448 outw(internal_addr, base_addr + TLAN_DIO_ADR);
445 return inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3)); 449 return inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3));
446 450
447} /* TLan_DioRead8 */ 451}
448 452
449 453
450 454
451 455
452static inline u16 TLan_DioRead16(u16 base_addr, u16 internal_addr) 456static inline u16 tlan_dio_read16(u16 base_addr, u16 internal_addr)
453{ 457{
454 outw(internal_addr, base_addr + TLAN_DIO_ADR); 458 outw(internal_addr, base_addr + TLAN_DIO_ADR);
455 return inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2)); 459 return inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2));
456 460
457} /* TLan_DioRead16 */ 461}
458 462
459 463
460 464
461 465
462static inline u32 TLan_DioRead32(u16 base_addr, u16 internal_addr) 466static inline u32 tlan_dio_read32(u16 base_addr, u16 internal_addr)
463{ 467{
464 outw(internal_addr, base_addr + TLAN_DIO_ADR); 468 outw(internal_addr, base_addr + TLAN_DIO_ADR);
465 return inl(base_addr + TLAN_DIO_DATA); 469 return inl(base_addr + TLAN_DIO_DATA);
466 470
467} /* TLan_DioRead32 */ 471}
468 472
469 473
470 474
471 475
472static inline void TLan_DioWrite8(u16 base_addr, u16 internal_addr, u8 data) 476static inline void tlan_dio_write8(u16 base_addr, u16 internal_addr, u8 data)
473{ 477{
474 outw(internal_addr, base_addr + TLAN_DIO_ADR); 478 outw(internal_addr, base_addr + TLAN_DIO_ADR);
475 outb(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x3)); 479 outb(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x3));
@@ -479,7 +483,7 @@ static inline void TLan_DioWrite8(u16 base_addr, u16 internal_addr, u8 data)
479 483
480 484
481 485
482static inline void TLan_DioWrite16(u16 base_addr, u16 internal_addr, u16 data) 486static inline void tlan_dio_write16(u16 base_addr, u16 internal_addr, u16 data)
483{ 487{
484 outw(internal_addr, base_addr + TLAN_DIO_ADR); 488 outw(internal_addr, base_addr + TLAN_DIO_ADR);
485 outw(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2)); 489 outw(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
@@ -489,16 +493,16 @@ static inline void TLan_DioWrite16(u16 base_addr, u16 internal_addr, u16 data)
489 493
490 494
491 495
492static inline void TLan_DioWrite32(u16 base_addr, u16 internal_addr, u32 data) 496static inline void tlan_dio_write32(u16 base_addr, u16 internal_addr, u32 data)
493{ 497{
494 outw(internal_addr, base_addr + TLAN_DIO_ADR); 498 outw(internal_addr, base_addr + TLAN_DIO_ADR);
495 outl(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2)); 499 outl(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
496 500
497} 501}
498 502
499#define TLan_ClearBit( bit, port ) outb_p(inb_p(port) & ~bit, port) 503#define tlan_clear_bit(bit, port) outb_p(inb_p(port) & ~bit, port)
500#define TLan_GetBit( bit, port ) ((int) (inb_p(port) & bit)) 504#define tlan_get_bit(bit, port) ((int) (inb_p(port) & bit))
501#define TLan_SetBit( bit, port ) outb_p(inb_p(port) | bit, port) 505#define tlan_set_bit(bit, port) outb_p(inb_p(port) | bit, port)
502 506
503/* 507/*
504 * given 6 bytes, view them as 8 6-bit numbers and return the XOR of those 508 * given 6 bytes, view them as 8 6-bit numbers and return the XOR of those
@@ -506,37 +510,37 @@ static inline void TLan_DioWrite32(u16 base_addr, u16 internal_addr, u32 data)
506 * 510 *
507 * The original code was: 511 * The original code was:
508 * 512 *
509 * u32 xor( u32 a, u32 b ) { return ( ( a && ! b ) || ( ! a && b ) ); } 513 * u32 xor(u32 a, u32 b) { return ((a && !b ) || (! a && b )); }
510 * 514 *
511 * #define XOR8( a, b, c, d, e, f, g, h ) \ 515 * #define XOR8(a, b, c, d, e, f, g, h) \
512 * xor( a, xor( b, xor( c, xor( d, xor( e, xor( f, xor( g, h ) ) ) ) ) ) ) 516 * xor(a, xor(b, xor(c, xor(d, xor(e, xor(f, xor(g, h)) ) ) ) ) )
513 * #define DA( a, bit ) ( ( (u8) a[bit/8] ) & ( (u8) ( 1 << bit%8 ) ) ) 517 * #define DA(a, bit) (( (u8) a[bit/8] ) & ( (u8) (1 << bit%8)) )
514 * 518 *
515 * hash = XOR8( DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24), 519 * hash = XOR8(DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24),
516 * DA(a,30), DA(a,36), DA(a,42) ); 520 * DA(a,30), DA(a,36), DA(a,42));
517 * hash |= XOR8( DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25), 521 * hash |= XOR8(DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25),
518 * DA(a,31), DA(a,37), DA(a,43) ) << 1; 522 * DA(a,31), DA(a,37), DA(a,43)) << 1;
519 * hash |= XOR8( DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26), 523 * hash |= XOR8(DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26),
520 * DA(a,32), DA(a,38), DA(a,44) ) << 2; 524 * DA(a,32), DA(a,38), DA(a,44)) << 2;
521 * hash |= XOR8( DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27), 525 * hash |= XOR8(DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27),
522 * DA(a,33), DA(a,39), DA(a,45) ) << 3; 526 * DA(a,33), DA(a,39), DA(a,45)) << 3;
523 * hash |= XOR8( DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28), 527 * hash |= XOR8(DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28),
524 * DA(a,34), DA(a,40), DA(a,46) ) << 4; 528 * DA(a,34), DA(a,40), DA(a,46)) << 4;
525 * hash |= XOR8( DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29), 529 * hash |= XOR8(DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29),
526 * DA(a,35), DA(a,41), DA(a,47) ) << 5; 530 * DA(a,35), DA(a,41), DA(a,47)) << 5;
527 * 531 *
528 */ 532 */
529static inline u32 TLan_HashFunc( const u8 *a ) 533static inline u32 tlan_hash_func(const u8 *a)
530{ 534{
531 u8 hash; 535 u8 hash;
532 536
533 hash = (a[0]^a[3]); /* & 077 */ 537 hash = (a[0]^a[3]); /* & 077 */
534 hash ^= ((a[0]^a[3])>>6); /* & 003 */ 538 hash ^= ((a[0]^a[3])>>6); /* & 003 */
535 hash ^= ((a[1]^a[4])<<2); /* & 074 */ 539 hash ^= ((a[1]^a[4])<<2); /* & 074 */
536 hash ^= ((a[1]^a[4])>>4); /* & 017 */ 540 hash ^= ((a[1]^a[4])>>4); /* & 017 */
537 hash ^= ((a[2]^a[5])<<4); /* & 060 */ 541 hash ^= ((a[2]^a[5])<<4); /* & 060 */
538 hash ^= ((a[2]^a[5])>>2); /* & 077 */ 542 hash ^= ((a[2]^a[5])>>2); /* & 077 */
539 543
540 return hash & 077; 544 return hash & 077;
541} 545}
542#endif 546#endif
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index b100bd50a0d7..f5e9ac00a07b 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -34,6 +34,8 @@
34 * Modifications for 2.3.99-pre5 kernel. 34 * Modifications for 2.3.99-pre5 kernel.
35 */ 35 */
36 36
37#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38
37#define DRV_NAME "tun" 39#define DRV_NAME "tun"
38#define DRV_VERSION "1.6" 40#define DRV_VERSION "1.6"
39#define DRV_DESCRIPTION "Universal TUN/TAP device driver" 41#define DRV_DESCRIPTION "Universal TUN/TAP device driver"
@@ -76,11 +78,27 @@
76#ifdef TUN_DEBUG 78#ifdef TUN_DEBUG
77static int debug; 79static int debug;
78 80
79#define DBG if(tun->debug)printk 81#define tun_debug(level, tun, fmt, args...) \
80#define DBG1 if(debug==2)printk 82do { \
83 if (tun->debug) \
84 netdev_printk(level, tun->dev, fmt, ##args); \
85} while (0)
86#define DBG1(level, fmt, args...) \
87do { \
88 if (debug == 2) \
89 printk(level fmt, ##args); \
90} while (0)
81#else 91#else
82#define DBG( a... ) 92#define tun_debug(level, tun, fmt, args...) \
83#define DBG1( a... ) 93do { \
94 if (0) \
95 netdev_printk(level, tun->dev, fmt, ##args); \
96} while (0)
97#define DBG1(level, fmt, args...) \
98do { \
99 if (0) \
100 printk(level fmt, ##args); \
101} while (0)
84#endif 102#endif
85 103
86#define FLT_EXACT_COUNT 8 104#define FLT_EXACT_COUNT 8
@@ -205,7 +223,7 @@ static void tun_put(struct tun_struct *tun)
205 tun_detach(tfile->tun); 223 tun_detach(tfile->tun);
206} 224}
207 225
208/* TAP filterting */ 226/* TAP filtering */
209static void addr_hash_set(u32 *mask, const u8 *addr) 227static void addr_hash_set(u32 *mask, const u8 *addr)
210{ 228{
211 int n = ether_crc(ETH_ALEN, addr) >> 26; 229 int n = ether_crc(ETH_ALEN, addr) >> 26;
@@ -360,7 +378,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
360{ 378{
361 struct tun_struct *tun = netdev_priv(dev); 379 struct tun_struct *tun = netdev_priv(dev);
362 380
363 DBG(KERN_INFO "%s: tun_net_xmit %d\n", tun->dev->name, skb->len); 381 tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
364 382
365 /* Drop packet if interface is not attached */ 383 /* Drop packet if interface is not attached */
366 if (!tun->tfile) 384 if (!tun->tfile)
@@ -499,7 +517,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
499 517
500 sk = tun->socket.sk; 518 sk = tun->socket.sk;
501 519
502 DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name); 520 tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
503 521
504 poll_wait(file, &tun->wq.wait, wait); 522 poll_wait(file, &tun->wq.wait, wait);
505 523
@@ -690,7 +708,7 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
690 if (!tun) 708 if (!tun)
691 return -EBADFD; 709 return -EBADFD;
692 710
693 DBG(KERN_INFO "%s: tun_chr_write %ld\n", tun->dev->name, count); 711 tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count);
694 712
695 result = tun_get_user(tun, iv, iov_length(iv, count), 713 result = tun_get_user(tun, iv, iov_length(iv, count),
696 file->f_flags & O_NONBLOCK); 714 file->f_flags & O_NONBLOCK);
@@ -739,7 +757,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
739 else if (sinfo->gso_type & SKB_GSO_UDP) 757 else if (sinfo->gso_type & SKB_GSO_UDP)
740 gso.gso_type = VIRTIO_NET_HDR_GSO_UDP; 758 gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
741 else { 759 else {
742 printk(KERN_ERR "tun: unexpected GSO type: " 760 pr_err("unexpected GSO type: "
743 "0x%x, gso_size %d, hdr_len %d\n", 761 "0x%x, gso_size %d, hdr_len %d\n",
744 sinfo->gso_type, gso.gso_size, 762 sinfo->gso_type, gso.gso_size,
745 gso.hdr_len); 763 gso.hdr_len);
@@ -786,7 +804,7 @@ static ssize_t tun_do_read(struct tun_struct *tun,
786 struct sk_buff *skb; 804 struct sk_buff *skb;
787 ssize_t ret = 0; 805 ssize_t ret = 0;
788 806
789 DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name); 807 tun_debug(KERN_INFO, tun, "tun_chr_read\n");
790 808
791 add_wait_queue(&tun->wq.wait, &wait); 809 add_wait_queue(&tun->wq.wait, &wait);
792 while (len) { 810 while (len) {
@@ -1083,7 +1101,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1083 if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) || 1101 if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
1084 device_create_file(&tun->dev->dev, &dev_attr_owner) || 1102 device_create_file(&tun->dev->dev, &dev_attr_owner) ||
1085 device_create_file(&tun->dev->dev, &dev_attr_group)) 1103 device_create_file(&tun->dev->dev, &dev_attr_group))
1086 printk(KERN_ERR "Failed to create tun sysfs files\n"); 1104 pr_err("Failed to create tun sysfs files\n");
1087 1105
1088 sk->sk_destruct = tun_sock_destruct; 1106 sk->sk_destruct = tun_sock_destruct;
1089 1107
@@ -1092,7 +1110,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1092 goto failed; 1110 goto failed;
1093 } 1111 }
1094 1112
1095 DBG(KERN_INFO "%s: tun_set_iff\n", tun->dev->name); 1113 tun_debug(KERN_INFO, tun, "tun_set_iff\n");
1096 1114
1097 if (ifr->ifr_flags & IFF_NO_PI) 1115 if (ifr->ifr_flags & IFF_NO_PI)
1098 tun->flags |= TUN_NO_PI; 1116 tun->flags |= TUN_NO_PI;
@@ -1129,7 +1147,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1129static int tun_get_iff(struct net *net, struct tun_struct *tun, 1147static int tun_get_iff(struct net *net, struct tun_struct *tun,
1130 struct ifreq *ifr) 1148 struct ifreq *ifr)
1131{ 1149{
1132 DBG(KERN_INFO "%s: tun_get_iff\n", tun->dev->name); 1150 tun_debug(KERN_INFO, tun, "tun_get_iff\n");
1133 1151
1134 strcpy(ifr->ifr_name, tun->dev->name); 1152 strcpy(ifr->ifr_name, tun->dev->name);
1135 1153
@@ -1142,7 +1160,7 @@ static int tun_get_iff(struct net *net, struct tun_struct *tun,
1142 * privs required. */ 1160 * privs required. */
1143static int set_offload(struct net_device *dev, unsigned long arg) 1161static int set_offload(struct net_device *dev, unsigned long arg)
1144{ 1162{
1145 unsigned int old_features, features; 1163 u32 old_features, features;
1146 1164
1147 old_features = dev->features; 1165 old_features = dev->features;
1148 /* Unset features, set them as we chew on the arg. */ 1166 /* Unset features, set them as we chew on the arg. */
@@ -1229,7 +1247,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1229 if (!tun) 1247 if (!tun)
1230 goto unlock; 1248 goto unlock;
1231 1249
1232 DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd); 1250 tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %d\n", cmd);
1233 1251
1234 ret = 0; 1252 ret = 0;
1235 switch (cmd) { 1253 switch (cmd) {
@@ -1249,8 +1267,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1249 else 1267 else
1250 tun->flags &= ~TUN_NOCHECKSUM; 1268 tun->flags &= ~TUN_NOCHECKSUM;
1251 1269
1252 DBG(KERN_INFO "%s: checksum %s\n", 1270 tun_debug(KERN_INFO, tun, "checksum %s\n",
1253 tun->dev->name, arg ? "disabled" : "enabled"); 1271 arg ? "disabled" : "enabled");
1254 break; 1272 break;
1255 1273
1256 case TUNSETPERSIST: 1274 case TUNSETPERSIST:
@@ -1260,33 +1278,34 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1260 else 1278 else
1261 tun->flags &= ~TUN_PERSIST; 1279 tun->flags &= ~TUN_PERSIST;
1262 1280
1263 DBG(KERN_INFO "%s: persist %s\n", 1281 tun_debug(KERN_INFO, tun, "persist %s\n",
1264 tun->dev->name, arg ? "enabled" : "disabled"); 1282 arg ? "enabled" : "disabled");
1265 break; 1283 break;
1266 1284
1267 case TUNSETOWNER: 1285 case TUNSETOWNER:
1268 /* Set owner of the device */ 1286 /* Set owner of the device */
1269 tun->owner = (uid_t) arg; 1287 tun->owner = (uid_t) arg;
1270 1288
1271 DBG(KERN_INFO "%s: owner set to %d\n", tun->dev->name, tun->owner); 1289 tun_debug(KERN_INFO, tun, "owner set to %d\n", tun->owner);
1272 break; 1290 break;
1273 1291
1274 case TUNSETGROUP: 1292 case TUNSETGROUP:
1275 /* Set group of the device */ 1293 /* Set group of the device */
1276 tun->group= (gid_t) arg; 1294 tun->group= (gid_t) arg;
1277 1295
1278 DBG(KERN_INFO "%s: group set to %d\n", tun->dev->name, tun->group); 1296 tun_debug(KERN_INFO, tun, "group set to %d\n", tun->group);
1279 break; 1297 break;
1280 1298
1281 case TUNSETLINK: 1299 case TUNSETLINK:
1282 /* Only allow setting the type when the interface is down */ 1300 /* Only allow setting the type when the interface is down */
1283 if (tun->dev->flags & IFF_UP) { 1301 if (tun->dev->flags & IFF_UP) {
1284 DBG(KERN_INFO "%s: Linktype set failed because interface is up\n", 1302 tun_debug(KERN_INFO, tun,
1285 tun->dev->name); 1303 "Linktype set failed because interface is up\n");
1286 ret = -EBUSY; 1304 ret = -EBUSY;
1287 } else { 1305 } else {
1288 tun->dev->type = (int) arg; 1306 tun->dev->type = (int) arg;
1289 DBG(KERN_INFO "%s: linktype set to %d\n", tun->dev->name, tun->dev->type); 1307 tun_debug(KERN_INFO, tun, "linktype set to %d\n",
1308 tun->dev->type);
1290 ret = 0; 1309 ret = 0;
1291 } 1310 }
1292 break; 1311 break;
@@ -1318,8 +1337,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1318 1337
1319 case SIOCSIFHWADDR: 1338 case SIOCSIFHWADDR:
1320 /* Set hw address */ 1339 /* Set hw address */
1321 DBG(KERN_DEBUG "%s: set hw address: %pM\n", 1340 tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
1322 tun->dev->name, ifr.ifr_hwaddr.sa_data); 1341 ifr.ifr_hwaddr.sa_data);
1323 1342
1324 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); 1343 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
1325 break; 1344 break;
@@ -1433,7 +1452,7 @@ static int tun_chr_fasync(int fd, struct file *file, int on)
1433 if (!tun) 1452 if (!tun)
1434 return -EBADFD; 1453 return -EBADFD;
1435 1454
1436 DBG(KERN_INFO "%s: tun_chr_fasync %d\n", tun->dev->name, on); 1455 tun_debug(KERN_INFO, tun, "tun_chr_fasync %d\n", on);
1437 1456
1438 if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0) 1457 if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0)
1439 goto out; 1458 goto out;
@@ -1455,7 +1474,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
1455{ 1474{
1456 struct tun_file *tfile; 1475 struct tun_file *tfile;
1457 1476
1458 DBG1(KERN_INFO "tunX: tun_chr_open\n"); 1477 DBG1(KERN_INFO, "tunX: tun_chr_open\n");
1459 1478
1460 tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); 1479 tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
1461 if (!tfile) 1480 if (!tfile)
@@ -1476,7 +1495,7 @@ static int tun_chr_close(struct inode *inode, struct file *file)
1476 if (tun) { 1495 if (tun) {
1477 struct net_device *dev = tun->dev; 1496 struct net_device *dev = tun->dev;
1478 1497
1479 DBG(KERN_INFO "%s: tun_chr_close\n", dev->name); 1498 tun_debug(KERN_INFO, tun, "tun_chr_close\n");
1480 1499
1481 __tun_detach(tun); 1500 __tun_detach(tun);
1482 1501
@@ -1607,18 +1626,18 @@ static int __init tun_init(void)
1607{ 1626{
1608 int ret = 0; 1627 int ret = 0;
1609 1628
1610 printk(KERN_INFO "tun: %s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 1629 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
1611 printk(KERN_INFO "tun: %s\n", DRV_COPYRIGHT); 1630 pr_info("%s\n", DRV_COPYRIGHT);
1612 1631
1613 ret = rtnl_link_register(&tun_link_ops); 1632 ret = rtnl_link_register(&tun_link_ops);
1614 if (ret) { 1633 if (ret) {
1615 printk(KERN_ERR "tun: Can't register link_ops\n"); 1634 pr_err("Can't register link_ops\n");
1616 goto err_linkops; 1635 goto err_linkops;
1617 } 1636 }
1618 1637
1619 ret = misc_register(&tun_miscdev); 1638 ret = misc_register(&tun_miscdev);
1620 if (ret) { 1639 if (ret) {
1621 printk(KERN_ERR "tun: Can't register misc device %d\n", TUN_MINOR); 1640 pr_err("Can't register misc device %d\n", TUN_MINOR);
1622 goto err_misc; 1641 goto err_misc;
1623 } 1642 }
1624 return 0; 1643 return 0;
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index a3c46f6a15e7..7fa5ec2de942 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -123,12 +123,11 @@ static const int multicast_filter_limit = 32;
123#include <linux/in6.h> 123#include <linux/in6.h>
124#include <linux/dma-mapping.h> 124#include <linux/dma-mapping.h>
125#include <linux/firmware.h> 125#include <linux/firmware.h>
126#include <generated/utsrelease.h>
127 126
128#include "typhoon.h" 127#include "typhoon.h"
129 128
130MODULE_AUTHOR("David Dillow <dave@thedillows.org>"); 129MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
131MODULE_VERSION(UTS_RELEASE); 130MODULE_VERSION("1.0");
132MODULE_LICENSE("GPL"); 131MODULE_LICENSE("GPL");
133MODULE_FIRMWARE(FIRMWARE_NAME); 132MODULE_FIRMWARE(FIRMWARE_NAME);
134MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)"); 133MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index 109751bad3bb..f967913e11bc 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -328,13 +328,13 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
328{ 328{
329 static const char ifname[] = "usbpn%d"; 329 static const char ifname[] = "usbpn%d";
330 const struct usb_cdc_union_desc *union_header = NULL; 330 const struct usb_cdc_union_desc *union_header = NULL;
331 const struct usb_cdc_header_desc *phonet_header = NULL;
332 const struct usb_host_interface *data_desc; 331 const struct usb_host_interface *data_desc;
333 struct usb_interface *data_intf; 332 struct usb_interface *data_intf;
334 struct usb_device *usbdev = interface_to_usbdev(intf); 333 struct usb_device *usbdev = interface_to_usbdev(intf);
335 struct net_device *dev; 334 struct net_device *dev;
336 struct usbpn_dev *pnd; 335 struct usbpn_dev *pnd;
337 u8 *data; 336 u8 *data;
337 int phonet = 0;
338 int len, err; 338 int len, err;
339 339
340 data = intf->altsetting->extra; 340 data = intf->altsetting->extra;
@@ -355,10 +355,7 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
355 (struct usb_cdc_union_desc *)data; 355 (struct usb_cdc_union_desc *)data;
356 break; 356 break;
357 case 0xAB: 357 case 0xAB:
358 if (phonet_header || dlen < 5) 358 phonet = 1;
359 break;
360 phonet_header =
361 (struct usb_cdc_header_desc *)data;
362 break; 359 break;
363 } 360 }
364 } 361 }
@@ -366,7 +363,7 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
366 len -= dlen; 363 len -= dlen;
367 } 364 }
368 365
369 if (!union_header || !phonet_header) 366 if (!union_header || !phonet)
370 return -EINVAL; 367 return -EINVAL;
371 368
372 data_intf = usb_ifnum_to_if(usbdev, union_header->bSlaveInterface0); 369 data_intf = usb_ifnum_to_if(usbdev, union_header->bSlaveInterface0);
@@ -392,7 +389,6 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
392 389
393 pnd = netdev_priv(dev); 390 pnd = netdev_priv(dev);
394 SET_NETDEV_DEV(dev, &intf->dev); 391 SET_NETDEV_DEV(dev, &intf->dev);
395 netif_stop_queue(dev);
396 392
397 pnd->dev = dev; 393 pnd->dev = dev;
398 pnd->usb = usb_get_dev(usbdev); 394 pnd->usb = usb_get_dev(usbdev);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index cc83fa71c3ff..105d7f0630cc 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -403,17 +403,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
403 if (tb[IFLA_ADDRESS] == NULL) 403 if (tb[IFLA_ADDRESS] == NULL)
404 random_ether_addr(dev->dev_addr); 404 random_ether_addr(dev->dev_addr);
405 405
406 if (tb[IFLA_IFNAME])
407 nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
408 else
409 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
410
411 if (strchr(dev->name, '%')) {
412 err = dev_alloc_name(dev, dev->name);
413 if (err < 0)
414 goto err_alloc_name;
415 }
416
417 err = register_netdevice(dev); 406 err = register_netdevice(dev);
418 if (err < 0) 407 if (err < 0)
419 goto err_register_dev; 408 goto err_register_dev;
@@ -433,7 +422,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
433 422
434err_register_dev: 423err_register_dev:
435 /* nothing to do */ 424 /* nothing to do */
436err_alloc_name:
437err_configure_peer: 425err_configure_peer:
438 unregister_netdevice(peer); 426 unregister_netdevice(peer);
439 return err; 427 return err;
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 09cac704fdd7..0d6fec6b7d93 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -2923,6 +2923,7 @@ static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
2923static int velocity_set_wol(struct velocity_info *vptr) 2923static int velocity_set_wol(struct velocity_info *vptr)
2924{ 2924{
2925 struct mac_regs __iomem *regs = vptr->mac_regs; 2925 struct mac_regs __iomem *regs = vptr->mac_regs;
2926 enum speed_opt spd_dpx = vptr->options.spd_dpx;
2926 static u8 buf[256]; 2927 static u8 buf[256];
2927 int i; 2928 int i;
2928 2929
@@ -2968,6 +2969,12 @@ static int velocity_set_wol(struct velocity_info *vptr)
2968 2969
2969 writew(0x0FFF, &regs->WOLSRClr); 2970 writew(0x0FFF, &regs->WOLSRClr);
2970 2971
2972 if (spd_dpx == SPD_DPX_1000_FULL)
2973 goto mac_done;
2974
2975 if (spd_dpx != SPD_DPX_AUTO)
2976 goto advertise_done;
2977
2971 if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) { 2978 if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
2972 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) 2979 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
2973 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs); 2980 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
@@ -2978,6 +2985,7 @@ static int velocity_set_wol(struct velocity_info *vptr)
2978 if (vptr->mii_status & VELOCITY_SPEED_1000) 2985 if (vptr->mii_status & VELOCITY_SPEED_1000)
2979 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs); 2986 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
2980 2987
2988advertise_done:
2981 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR); 2989 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
2982 2990
2983 { 2991 {
@@ -2987,6 +2995,7 @@ static int velocity_set_wol(struct velocity_info *vptr)
2987 writeb(GCR, &regs->CHIPGCR); 2995 writeb(GCR, &regs->CHIPGCR);
2988 } 2996 }
2989 2997
2998mac_done:
2990 BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR); 2999 BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
2991 /* Turn on SWPTAG just before entering power mode */ 3000 /* Turn on SWPTAG just before entering power mode */
2992 BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW); 3001 BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index aa2e69b9ff61..d7227539484e 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -361,7 +361,7 @@ enum velocity_owner {
361#define MAC_REG_CHIPGSR 0x9C 361#define MAC_REG_CHIPGSR 0x9C
362#define MAC_REG_TESTCFG 0x9D 362#define MAC_REG_TESTCFG 0x9D
363#define MAC_REG_DEBUG 0x9E 363#define MAC_REG_DEBUG 0x9E
364#define MAC_REG_CHIPGCR 0x9F 364#define MAC_REG_CHIPGCR 0x9F /* Chip Operation and Diagnostic Control */
365#define MAC_REG_WOLCR0_SET 0xA0 365#define MAC_REG_WOLCR0_SET 0xA0
366#define MAC_REG_WOLCR1_SET 0xA1 366#define MAC_REG_WOLCR1_SET 0xA1
367#define MAC_REG_PWCFG_SET 0xA2 367#define MAC_REG_PWCFG_SET 0xA2
@@ -848,10 +848,10 @@ enum velocity_owner {
848 * Bits in CHIPGCR register 848 * Bits in CHIPGCR register
849 */ 849 */
850 850
851#define CHIPGCR_FCGMII 0x80 /* enable GMII mode */ 851#define CHIPGCR_FCGMII 0x80 /* force GMII (else MII only) */
852#define CHIPGCR_FCFDX 0x40 852#define CHIPGCR_FCFDX 0x40 /* force full duplex */
853#define CHIPGCR_FCRESV 0x20 853#define CHIPGCR_FCRESV 0x20
854#define CHIPGCR_FCMODE 0x10 854#define CHIPGCR_FCMODE 0x10 /* enable MAC forced mode */
855#define CHIPGCR_LPSOPT 0x08 855#define CHIPGCR_LPSOPT 0x08
856#define CHIPGCR_TM1US 0x04 856#define CHIPGCR_TM1US 0x04
857#define CHIPGCR_TM0US 0x02 857#define CHIPGCR_TM0US 0x02
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 228d4f7a58af..e74e4b42592d 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -387,8 +387,8 @@ vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
387 data1 = steer_ctrl = 0; 387 data1 = steer_ctrl = 0;
388 388
389 status = vxge_hw_vpath_fw_api(vpath, 389 status = vxge_hw_vpath_fw_api(vpath,
390 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
391 VXGE_HW_FW_API_GET_EPROM_REV, 390 VXGE_HW_FW_API_GET_EPROM_REV,
391 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
392 0, &data0, &data1, &steer_ctrl); 392 0, &data0, &data1, &steer_ctrl);
393 if (status != VXGE_HW_OK) 393 if (status != VXGE_HW_OK)
394 break; 394 break;
@@ -2868,6 +2868,8 @@ __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
2868 ring->rxd_init = attr->rxd_init; 2868 ring->rxd_init = attr->rxd_init;
2869 ring->rxd_term = attr->rxd_term; 2869 ring->rxd_term = attr->rxd_term;
2870 ring->buffer_mode = config->buffer_mode; 2870 ring->buffer_mode = config->buffer_mode;
2871 ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved;
2872 ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved;
2871 ring->rxds_limit = config->rxds_limit; 2873 ring->rxds_limit = config->rxds_limit;
2872 2874
2873 ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode); 2875 ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
@@ -3511,6 +3513,8 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
3511 3513
3512 /* apply "interrupts per txdl" attribute */ 3514 /* apply "interrupts per txdl" attribute */
3513 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ; 3515 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
3516 fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved;
3517 fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved;
3514 3518
3515 if (fifo->config->intr) 3519 if (fifo->config->intr)
3516 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST; 3520 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
@@ -4377,6 +4381,8 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4377 } 4381 }
4378 4382
4379 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); 4383 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4384 vpath->tim_tti_cfg1_saved = val64;
4385
4380 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]); 4386 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
4381 4387
4382 if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { 4388 if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -4433,6 +4439,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4433 } 4439 }
4434 4440
4435 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); 4441 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
4442 vpath->tim_tti_cfg3_saved = val64;
4436 } 4443 }
4437 4444
4438 if (config->ring.enable == VXGE_HW_RING_ENABLE) { 4445 if (config->ring.enable == VXGE_HW_RING_ENABLE) {
@@ -4481,6 +4488,8 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4481 } 4488 }
4482 4489
4483 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); 4490 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
4491 vpath->tim_rti_cfg1_saved = val64;
4492
4484 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]); 4493 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
4485 4494
4486 if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { 4495 if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -4537,6 +4546,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4537 } 4546 }
4538 4547
4539 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); 4548 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
4549 vpath->tim_rti_cfg3_saved = val64;
4540 } 4550 }
4541 4551
4542 val64 = 0; 4552 val64 = 0;
@@ -4555,26 +4565,6 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4555 return status; 4565 return status;
4556} 4566}
4557 4567
4558void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
4559{
4560 struct __vxge_hw_virtualpath *vpath;
4561 struct vxge_hw_vpath_reg __iomem *vp_reg;
4562 struct vxge_hw_vp_config *config;
4563 u64 val64;
4564
4565 vpath = &hldev->virtual_paths[vp_id];
4566 vp_reg = vpath->vp_reg;
4567 config = vpath->vp_config;
4568
4569 if (config->fifo.enable == VXGE_HW_FIFO_ENABLE &&
4570 config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
4571 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
4572 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4573 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4574 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4575 }
4576}
4577
4578/* 4568/*
4579 * __vxge_hw_vpath_initialize 4569 * __vxge_hw_vpath_initialize
4580 * This routine is the final phase of init which initializes the 4570 * This routine is the final phase of init which initializes the
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index e249e288d160..3c53aa732c9d 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -682,6 +682,10 @@ struct __vxge_hw_virtualpath {
682 u32 vsport_number; 682 u32 vsport_number;
683 u32 max_kdfc_db; 683 u32 max_kdfc_db;
684 u32 max_nofl_db; 684 u32 max_nofl_db;
685 u64 tim_tti_cfg1_saved;
686 u64 tim_tti_cfg3_saved;
687 u64 tim_rti_cfg1_saved;
688 u64 tim_rti_cfg3_saved;
685 689
686 struct __vxge_hw_ring *____cacheline_aligned ringh; 690 struct __vxge_hw_ring *____cacheline_aligned ringh;
687 struct __vxge_hw_fifo *____cacheline_aligned fifoh; 691 struct __vxge_hw_fifo *____cacheline_aligned fifoh;
@@ -921,6 +925,9 @@ struct __vxge_hw_ring {
921 u32 doorbell_cnt; 925 u32 doorbell_cnt;
922 u32 total_db_cnt; 926 u32 total_db_cnt;
923 u64 rxds_limit; 927 u64 rxds_limit;
928 u32 rtimer;
929 u64 tim_rti_cfg1_saved;
930 u64 tim_rti_cfg3_saved;
924 931
925 enum vxge_hw_status (*callback)( 932 enum vxge_hw_status (*callback)(
926 struct __vxge_hw_ring *ringh, 933 struct __vxge_hw_ring *ringh,
@@ -1000,6 +1007,9 @@ struct __vxge_hw_fifo {
1000 u32 per_txdl_space; 1007 u32 per_txdl_space;
1001 u32 vp_id; 1008 u32 vp_id;
1002 u32 tx_intr_num; 1009 u32 tx_intr_num;
1010 u32 rtimer;
1011 u64 tim_tti_cfg1_saved;
1012 u64 tim_tti_cfg3_saved;
1003 1013
1004 enum vxge_hw_status (*callback)( 1014 enum vxge_hw_status (*callback)(
1005 struct __vxge_hw_fifo *fifo_handle, 1015 struct __vxge_hw_fifo *fifo_handle,
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index c81a6512c683..395423aeec00 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -371,9 +371,6 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
371 struct vxge_hw_ring_rxd_info ext_info; 371 struct vxge_hw_ring_rxd_info ext_info;
372 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 372 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
373 ring->ndev->name, __func__, __LINE__); 373 ring->ndev->name, __func__, __LINE__);
374 ring->pkts_processed = 0;
375
376 vxge_hw_ring_replenish(ringh);
377 374
378 do { 375 do {
379 prefetch((char *)dtr + L1_CACHE_BYTES); 376 prefetch((char *)dtr + L1_CACHE_BYTES);
@@ -1588,6 +1585,36 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
1588 return ret; 1585 return ret;
1589} 1586}
1590 1587
1588/* Configure CI */
1589static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev)
1590{
1591 int i = 0;
1592
1593 /* Enable CI for RTI */
1594 if (vdev->config.intr_type == MSI_X) {
1595 for (i = 0; i < vdev->no_of_vpath; i++) {
1596 struct __vxge_hw_ring *hw_ring;
1597
1598 hw_ring = vdev->vpaths[i].ring.handle;
1599 vxge_hw_vpath_dynamic_rti_ci_set(hw_ring);
1600 }
1601 }
1602
1603 /* Enable CI for TTI */
1604 for (i = 0; i < vdev->no_of_vpath; i++) {
1605 struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle;
1606 vxge_hw_vpath_tti_ci_set(hw_fifo);
1607 /*
1608 * For Inta (with or without napi), Set CI ON for only one
1609 * vpath. (Have only one free running timer).
1610 */
1611 if ((vdev->config.intr_type == INTA) && (i == 0))
1612 break;
1613 }
1614
1615 return;
1616}
1617
1591static int do_vxge_reset(struct vxgedev *vdev, int event) 1618static int do_vxge_reset(struct vxgedev *vdev, int event)
1592{ 1619{
1593 enum vxge_hw_status status; 1620 enum vxge_hw_status status;
@@ -1753,6 +1780,9 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
1753 netif_tx_wake_all_queues(vdev->ndev); 1780 netif_tx_wake_all_queues(vdev->ndev);
1754 } 1781 }
1755 1782
1783 /* configure CI */
1784 vxge_config_ci_for_tti_rti(vdev);
1785
1756out: 1786out:
1757 vxge_debug_entryexit(VXGE_TRACE, 1787 vxge_debug_entryexit(VXGE_TRACE,
1758 "%s:%d Exiting...", __func__, __LINE__); 1788 "%s:%d Exiting...", __func__, __LINE__);
@@ -1793,22 +1823,29 @@ static void vxge_reset(struct work_struct *work)
1793 */ 1823 */
1794static int vxge_poll_msix(struct napi_struct *napi, int budget) 1824static int vxge_poll_msix(struct napi_struct *napi, int budget)
1795{ 1825{
1796 struct vxge_ring *ring = 1826 struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi);
1797 container_of(napi, struct vxge_ring, napi); 1827 int pkts_processed;
1798 int budget_org = budget; 1828 int budget_org = budget;
1799 ring->budget = budget;
1800 1829
1830 ring->budget = budget;
1831 ring->pkts_processed = 0;
1801 vxge_hw_vpath_poll_rx(ring->handle); 1832 vxge_hw_vpath_poll_rx(ring->handle);
1833 pkts_processed = ring->pkts_processed;
1802 1834
1803 if (ring->pkts_processed < budget_org) { 1835 if (ring->pkts_processed < budget_org) {
1804 napi_complete(napi); 1836 napi_complete(napi);
1837
1805 /* Re enable the Rx interrupts for the vpath */ 1838 /* Re enable the Rx interrupts for the vpath */
1806 vxge_hw_channel_msix_unmask( 1839 vxge_hw_channel_msix_unmask(
1807 (struct __vxge_hw_channel *)ring->handle, 1840 (struct __vxge_hw_channel *)ring->handle,
1808 ring->rx_vector_no); 1841 ring->rx_vector_no);
1842 mmiowb();
1809 } 1843 }
1810 1844
1811 return ring->pkts_processed; 1845 /* We are copying and returning the local variable, in case if after
1846 * clearing the msix interrupt above, if the interrupt fires right
1847 * away which can preempt this NAPI thread */
1848 return pkts_processed;
1812} 1849}
1813 1850
1814static int vxge_poll_inta(struct napi_struct *napi, int budget) 1851static int vxge_poll_inta(struct napi_struct *napi, int budget)
@@ -1824,6 +1861,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
1824 for (i = 0; i < vdev->no_of_vpath; i++) { 1861 for (i = 0; i < vdev->no_of_vpath; i++) {
1825 ring = &vdev->vpaths[i].ring; 1862 ring = &vdev->vpaths[i].ring;
1826 ring->budget = budget; 1863 ring->budget = budget;
1864 ring->pkts_processed = 0;
1827 vxge_hw_vpath_poll_rx(ring->handle); 1865 vxge_hw_vpath_poll_rx(ring->handle);
1828 pkts_processed += ring->pkts_processed; 1866 pkts_processed += ring->pkts_processed;
1829 budget -= ring->pkts_processed; 1867 budget -= ring->pkts_processed;
@@ -2054,6 +2092,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
2054 netdev_get_tx_queue(vdev->ndev, 0); 2092 netdev_get_tx_queue(vdev->ndev, 0);
2055 vpath->fifo.indicate_max_pkts = 2093 vpath->fifo.indicate_max_pkts =
2056 vdev->config.fifo_indicate_max_pkts; 2094 vdev->config.fifo_indicate_max_pkts;
2095 vpath->fifo.tx_vector_no = 0;
2057 vpath->ring.rx_vector_no = 0; 2096 vpath->ring.rx_vector_no = 0;
2058 vpath->ring.rx_csum = vdev->rx_csum; 2097 vpath->ring.rx_csum = vdev->rx_csum;
2059 vpath->ring.rx_hwts = vdev->rx_hwts; 2098 vpath->ring.rx_hwts = vdev->rx_hwts;
@@ -2079,6 +2118,61 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
2079 return VXGE_HW_OK; 2118 return VXGE_HW_OK;
2080} 2119}
2081 2120
2121/**
2122 * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing
2123 * if the interrupts are not within a range
2124 * @fifo: pointer to transmit fifo structure
2125 * Description: The function changes boundary timer and restriction timer
2126 * value depends on the traffic
2127 * Return Value: None
2128 */
2129static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
2130{
2131 fifo->interrupt_count++;
2132 if (jiffies > fifo->jiffies + HZ / 100) {
2133 struct __vxge_hw_fifo *hw_fifo = fifo->handle;
2134
2135 fifo->jiffies = jiffies;
2136 if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT &&
2137 hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) {
2138 hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL;
2139 vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
2140 } else if (hw_fifo->rtimer != 0) {
2141 hw_fifo->rtimer = 0;
2142 vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
2143 }
2144 fifo->interrupt_count = 0;
2145 }
2146}
2147
2148/**
2149 * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing
2150 * if the interrupts are not within a range
2151 * @ring: pointer to receive ring structure
2152 * Description: The function increases of decreases the packet counts within
2153 * the ranges of traffic utilization, if the interrupts due to this ring are
2154 * not within a fixed range.
2155 * Return Value: Nothing
2156 */
2157static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
2158{
2159 ring->interrupt_count++;
2160 if (jiffies > ring->jiffies + HZ / 100) {
2161 struct __vxge_hw_ring *hw_ring = ring->handle;
2162
2163 ring->jiffies = jiffies;
2164 if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT &&
2165 hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) {
2166 hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL;
2167 vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
2168 } else if (hw_ring->rtimer != 0) {
2169 hw_ring->rtimer = 0;
2170 vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
2171 }
2172 ring->interrupt_count = 0;
2173 }
2174}
2175
2082/* 2176/*
2083 * vxge_isr_napi 2177 * vxge_isr_napi
2084 * @irq: the irq of the device. 2178 * @irq: the irq of the device.
@@ -2139,24 +2233,39 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
2139 2233
2140#ifdef CONFIG_PCI_MSI 2234#ifdef CONFIG_PCI_MSI
2141 2235
2142static irqreturn_t 2236static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
2143vxge_tx_msix_handle(int irq, void *dev_id)
2144{ 2237{
2145 struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id; 2238 struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
2146 2239
2240 adaptive_coalesce_tx_interrupts(fifo);
2241
2242 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle,
2243 fifo->tx_vector_no);
2244
2245 vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle,
2246 fifo->tx_vector_no);
2247
2147 VXGE_COMPLETE_VPATH_TX(fifo); 2248 VXGE_COMPLETE_VPATH_TX(fifo);
2148 2249
2250 vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
2251 fifo->tx_vector_no);
2252
2253 mmiowb();
2254
2149 return IRQ_HANDLED; 2255 return IRQ_HANDLED;
2150} 2256}
2151 2257
2152static irqreturn_t 2258static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id)
2153vxge_rx_msix_napi_handle(int irq, void *dev_id)
2154{ 2259{
2155 struct vxge_ring *ring = (struct vxge_ring *)dev_id; 2260 struct vxge_ring *ring = (struct vxge_ring *)dev_id;
2156 2261
2157 /* MSIX_IDX for Rx is 1 */ 2262 adaptive_coalesce_rx_interrupts(ring);
2263
2158 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle, 2264 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
2159 ring->rx_vector_no); 2265 ring->rx_vector_no);
2266
2267 vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle,
2268 ring->rx_vector_no);
2160 2269
2161 napi_schedule(&ring->napi); 2270 napi_schedule(&ring->napi);
2162 return IRQ_HANDLED; 2271 return IRQ_HANDLED;
@@ -2173,14 +2282,20 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
2173 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; 2282 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
2174 2283
2175 for (i = 0; i < vdev->no_of_vpath; i++) { 2284 for (i = 0; i < vdev->no_of_vpath; i++) {
2285 /* Reduce the chance of loosing alarm interrupts by masking
2286 * the vector. A pending bit will be set if an alarm is
2287 * generated and on unmask the interrupt will be fired.
2288 */
2176 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id); 2289 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
2290 vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
2291 mmiowb();
2177 2292
2178 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle, 2293 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
2179 vdev->exec_mode); 2294 vdev->exec_mode);
2180 if (status == VXGE_HW_OK) { 2295 if (status == VXGE_HW_OK) {
2181
2182 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle, 2296 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
2183 msix_id); 2297 msix_id);
2298 mmiowb();
2184 continue; 2299 continue;
2185 } 2300 }
2186 vxge_debug_intr(VXGE_ERR, 2301 vxge_debug_intr(VXGE_ERR,
@@ -2299,6 +2414,9 @@ static int vxge_enable_msix(struct vxgedev *vdev)
2299 vpath->ring.rx_vector_no = (vpath->device_id * 2414 vpath->ring.rx_vector_no = (vpath->device_id *
2300 VXGE_HW_VPATH_MSIX_ACTIVE) + 1; 2415 VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
2301 2416
2417 vpath->fifo.tx_vector_no = (vpath->device_id *
2418 VXGE_HW_VPATH_MSIX_ACTIVE);
2419
2302 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, 2420 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
2303 VXGE_ALARM_MSIX_ID); 2421 VXGE_ALARM_MSIX_ID);
2304 } 2422 }
@@ -2474,8 +2592,9 @@ INTA_MODE:
2474 "%s:vxge:INTA", vdev->ndev->name); 2592 "%s:vxge:INTA", vdev->ndev->name);
2475 vxge_hw_device_set_intr_type(vdev->devh, 2593 vxge_hw_device_set_intr_type(vdev->devh,
2476 VXGE_HW_INTR_MODE_IRQLINE); 2594 VXGE_HW_INTR_MODE_IRQLINE);
2477 vxge_hw_vpath_tti_ci_set(vdev->devh, 2595
2478 vdev->vpaths[0].device_id); 2596 vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle);
2597
2479 ret = request_irq((int) vdev->pdev->irq, 2598 ret = request_irq((int) vdev->pdev->irq,
2480 vxge_isr_napi, 2599 vxge_isr_napi,
2481 IRQF_SHARED, vdev->desc[0], vdev); 2600 IRQF_SHARED, vdev->desc[0], vdev);
@@ -2745,6 +2864,10 @@ static int vxge_open(struct net_device *dev)
2745 } 2864 }
2746 2865
2747 netif_tx_start_all_queues(vdev->ndev); 2866 netif_tx_start_all_queues(vdev->ndev);
2867
2868 /* configure CI */
2869 vxge_config_ci_for_tti_rti(vdev);
2870
2748 goto out0; 2871 goto out0;
2749 2872
2750out2: 2873out2:
@@ -3264,19 +3387,6 @@ static const struct net_device_ops vxge_netdev_ops = {
3264#endif 3387#endif
3265}; 3388};
3266 3389
3267static int __devinit vxge_device_revision(struct vxgedev *vdev)
3268{
3269 int ret;
3270 u8 revision;
3271
3272 ret = pci_read_config_byte(vdev->pdev, PCI_REVISION_ID, &revision);
3273 if (ret)
3274 return -EIO;
3275
3276 vdev->titan1 = (revision == VXGE_HW_TITAN1_PCI_REVISION);
3277 return 0;
3278}
3279
3280static int __devinit vxge_device_register(struct __vxge_hw_device *hldev, 3390static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3281 struct vxge_config *config, 3391 struct vxge_config *config,
3282 int high_dma, int no_of_vpath, 3392 int high_dma, int no_of_vpath,
@@ -3316,10 +3426,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3316 memcpy(&vdev->config, config, sizeof(struct vxge_config)); 3426 memcpy(&vdev->config, config, sizeof(struct vxge_config));
3317 vdev->rx_csum = 1; /* Enable Rx CSUM by default. */ 3427 vdev->rx_csum = 1; /* Enable Rx CSUM by default. */
3318 vdev->rx_hwts = 0; 3428 vdev->rx_hwts = 0;
3319 3429 vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION);
3320 ret = vxge_device_revision(vdev);
3321 if (ret < 0)
3322 goto _out1;
3323 3430
3324 SET_NETDEV_DEV(ndev, &vdev->pdev->dev); 3431 SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
3325 3432
@@ -3348,7 +3455,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3348 vxge_debug_init(VXGE_ERR, 3455 vxge_debug_init(VXGE_ERR,
3349 "%s: vpath memory allocation failed", 3456 "%s: vpath memory allocation failed",
3350 vdev->ndev->name); 3457 vdev->ndev->name);
3351 ret = -ENODEV; 3458 ret = -ENOMEM;
3352 goto _out1; 3459 goto _out1;
3353 } 3460 }
3354 3461
@@ -3369,11 +3476,11 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3369 if (vdev->config.gro_enable) 3476 if (vdev->config.gro_enable)
3370 ndev->features |= NETIF_F_GRO; 3477 ndev->features |= NETIF_F_GRO;
3371 3478
3372 if (register_netdev(ndev)) { 3479 ret = register_netdev(ndev);
3480 if (ret) {
3373 vxge_debug_init(vxge_hw_device_trace_level_get(hldev), 3481 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3374 "%s: %s : device registration failed!", 3482 "%s: %s : device registration failed!",
3375 ndev->name, __func__); 3483 ndev->name, __func__);
3376 ret = -ENODEV;
3377 goto _out2; 3484 goto _out2;
3378 } 3485 }
3379 3486
@@ -3444,6 +3551,11 @@ static void vxge_device_unregister(struct __vxge_hw_device *hldev)
3444 /* in 2.6 will call stop() if device is up */ 3551 /* in 2.6 will call stop() if device is up */
3445 unregister_netdev(dev); 3552 unregister_netdev(dev);
3446 3553
3554 kfree(vdev->vpaths);
3555
3556 /* we are safe to free it now */
3557 free_netdev(dev);
3558
3447 vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered", 3559 vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
3448 buf); 3560 buf);
3449 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf, 3561 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
@@ -3799,7 +3911,7 @@ static void __devinit vxge_device_config_init(
3799 break; 3911 break;
3800 3912
3801 case MSI_X: 3913 case MSI_X:
3802 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX; 3914 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT;
3803 break; 3915 break;
3804 } 3916 }
3805 3917
@@ -4335,10 +4447,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4335 goto _exit1; 4447 goto _exit1;
4336 } 4448 }
4337 4449
4338 if (pci_request_region(pdev, 0, VXGE_DRIVER_NAME)) { 4450 ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME);
4451 if (ret) {
4339 vxge_debug_init(VXGE_ERR, 4452 vxge_debug_init(VXGE_ERR,
4340 "%s : request regions failed", __func__); 4453 "%s : request regions failed", __func__);
4341 ret = -ENODEV;
4342 goto _exit1; 4454 goto _exit1;
4343 } 4455 }
4344 4456
@@ -4446,7 +4558,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4446 if (!img[i].is_valid) 4558 if (!img[i].is_valid)
4447 break; 4559 break;
4448 vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version " 4560 vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
4449 "%d.%d.%d.%d\n", VXGE_DRIVER_NAME, i, 4561 "%d.%d.%d.%d", VXGE_DRIVER_NAME, i,
4450 VXGE_EPROM_IMG_MAJOR(img[i].version), 4562 VXGE_EPROM_IMG_MAJOR(img[i].version),
4451 VXGE_EPROM_IMG_MINOR(img[i].version), 4563 VXGE_EPROM_IMG_MINOR(img[i].version),
4452 VXGE_EPROM_IMG_FIX(img[i].version), 4564 VXGE_EPROM_IMG_FIX(img[i].version),
@@ -4643,8 +4755,9 @@ _exit6:
4643_exit5: 4755_exit5:
4644 vxge_device_unregister(hldev); 4756 vxge_device_unregister(hldev);
4645_exit4: 4757_exit4:
4646 pci_disable_sriov(pdev); 4758 pci_set_drvdata(pdev, NULL);
4647 vxge_hw_device_terminate(hldev); 4759 vxge_hw_device_terminate(hldev);
4760 pci_disable_sriov(pdev);
4648_exit3: 4761_exit3:
4649 iounmap(attr.bar0); 4762 iounmap(attr.bar0);
4650_exit2: 4763_exit2:
@@ -4655,7 +4768,7 @@ _exit0:
4655 kfree(ll_config); 4768 kfree(ll_config);
4656 kfree(device_config); 4769 kfree(device_config);
4657 driver_config->config_dev_cnt--; 4770 driver_config->config_dev_cnt--;
4658 pci_set_drvdata(pdev, NULL); 4771 driver_config->total_dev_cnt--;
4659 return ret; 4772 return ret;
4660} 4773}
4661 4774
@@ -4668,45 +4781,34 @@ _exit0:
4668static void __devexit vxge_remove(struct pci_dev *pdev) 4781static void __devexit vxge_remove(struct pci_dev *pdev)
4669{ 4782{
4670 struct __vxge_hw_device *hldev; 4783 struct __vxge_hw_device *hldev;
4671 struct vxgedev *vdev = NULL; 4784 struct vxgedev *vdev;
4672 struct net_device *dev; 4785 int i;
4673 int i = 0;
4674 4786
4675 hldev = pci_get_drvdata(pdev); 4787 hldev = pci_get_drvdata(pdev);
4676
4677 if (hldev == NULL) 4788 if (hldev == NULL)
4678 return; 4789 return;
4679 4790
4680 dev = hldev->ndev; 4791 vdev = netdev_priv(hldev->ndev);
4681 vdev = netdev_priv(dev);
4682 4792
4683 vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__); 4793 vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
4684
4685 vxge_debug_init(vdev->level_trace, "%s : removing PCI device...", 4794 vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
4686 __func__); 4795 __func__);
4687 vxge_device_unregister(hldev);
4688 4796
4689 for (i = 0; i < vdev->no_of_vpath; i++) { 4797 for (i = 0; i < vdev->no_of_vpath; i++)
4690 vxge_free_mac_add_list(&vdev->vpaths[i]); 4798 vxge_free_mac_add_list(&vdev->vpaths[i]);
4691 vdev->vpaths[i].mcast_addr_cnt = 0;
4692 vdev->vpaths[i].mac_addr_cnt = 0;
4693 }
4694
4695 kfree(vdev->vpaths);
4696 4799
4800 vxge_device_unregister(hldev);
4801 pci_set_drvdata(pdev, NULL);
4802 /* Do not call pci_disable_sriov here, as it will break child devices */
4803 vxge_hw_device_terminate(hldev);
4697 iounmap(vdev->bar0); 4804 iounmap(vdev->bar0);
4698 4805 pci_release_region(pdev, 0);
4699 /* we are safe to free it now */ 4806 pci_disable_device(pdev);
4700 free_netdev(dev); 4807 driver_config->config_dev_cnt--;
4808 driver_config->total_dev_cnt--;
4701 4809
4702 vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered", 4810 vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
4703 __func__, __LINE__); 4811 __func__, __LINE__);
4704
4705 vxge_hw_device_terminate(hldev);
4706
4707 pci_disable_device(pdev);
4708 pci_release_region(pdev, 0);
4709 pci_set_drvdata(pdev, NULL);
4710 vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__, 4812 vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__,
4711 __LINE__); 4813 __LINE__);
4712} 4814}
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index 5746fedc356f..40474f0da576 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -59,11 +59,13 @@
59#define VXGE_TTI_LTIMER_VAL 1000 59#define VXGE_TTI_LTIMER_VAL 1000
60#define VXGE_T1A_TTI_LTIMER_VAL 80 60#define VXGE_T1A_TTI_LTIMER_VAL 80
61#define VXGE_TTI_RTIMER_VAL 0 61#define VXGE_TTI_RTIMER_VAL 0
62#define VXGE_TTI_RTIMER_ADAPT_VAL 10
62#define VXGE_T1A_TTI_RTIMER_VAL 400 63#define VXGE_T1A_TTI_RTIMER_VAL 400
63#define VXGE_RTI_BTIMER_VAL 250 64#define VXGE_RTI_BTIMER_VAL 250
64#define VXGE_RTI_LTIMER_VAL 100 65#define VXGE_RTI_LTIMER_VAL 100
65#define VXGE_RTI_RTIMER_VAL 0 66#define VXGE_RTI_RTIMER_VAL 0
66#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH 67#define VXGE_RTI_RTIMER_ADAPT_VAL 15
68#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
67#define VXGE_ISR_POLLING_CNT 8 69#define VXGE_ISR_POLLING_CNT 8
68#define VXGE_MAX_CONFIG_DEV 0xFF 70#define VXGE_MAX_CONFIG_DEV 0xFF
69#define VXGE_EXEC_MODE_DISABLE 0 71#define VXGE_EXEC_MODE_DISABLE 0
@@ -107,6 +109,14 @@
107#define RTI_T1A_RX_UFC_C 50 109#define RTI_T1A_RX_UFC_C 50
108#define RTI_T1A_RX_UFC_D 60 110#define RTI_T1A_RX_UFC_D 60
109 111
112/*
113 * The interrupt rate is maintained at 3k per second with the moderation
114 * parameters for most traffic but not all. This is the maximum interrupt
115 * count allowed per function with INTA or per vector in the case of
116 * MSI-X in a 10 millisecond time period. Enabled only for Titan 1A.
117 */
118#define VXGE_T1A_MAX_INTERRUPT_COUNT 100
119#define VXGE_T1A_MAX_TX_INTERRUPT_COUNT 200
110 120
111/* Milli secs timer period */ 121/* Milli secs timer period */
112#define VXGE_TIMER_DELAY 10000 122#define VXGE_TIMER_DELAY 10000
@@ -247,6 +257,11 @@ struct vxge_fifo {
247 int tx_steering_type; 257 int tx_steering_type;
248 int indicate_max_pkts; 258 int indicate_max_pkts;
249 259
260 /* Adaptive interrupt moderation parameters used in T1A */
261 unsigned long interrupt_count;
262 unsigned long jiffies;
263
264 u32 tx_vector_no;
250 /* Tx stats */ 265 /* Tx stats */
251 struct vxge_fifo_stats stats; 266 struct vxge_fifo_stats stats;
252} ____cacheline_aligned; 267} ____cacheline_aligned;
@@ -271,6 +286,10 @@ struct vxge_ring {
271 */ 286 */
272 int driver_id; 287 int driver_id;
273 288
289 /* Adaptive interrupt moderation parameters used in T1A */
290 unsigned long interrupt_count;
291 unsigned long jiffies;
292
274 /* copy of the flag indicating whether rx_csum is to be used */ 293 /* copy of the flag indicating whether rx_csum is to be used */
275 u32 rx_csum:1, 294 u32 rx_csum:1,
276 rx_hwts:1; 295 rx_hwts:1;
@@ -286,7 +305,7 @@ struct vxge_ring {
286 305
287 int vlan_tag_strip; 306 int vlan_tag_strip;
288 struct vlan_group *vlgrp; 307 struct vlan_group *vlgrp;
289 int rx_vector_no; 308 u32 rx_vector_no;
290 enum vxge_hw_status last_status; 309 enum vxge_hw_status last_status;
291 310
292 /* Rx stats */ 311 /* Rx stats */
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index 4c10d6c4075f..8674f331311c 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -218,6 +218,68 @@ exit:
218 return status; 218 return status;
219} 219}
220 220
221void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
222{
223 struct vxge_hw_vpath_reg __iomem *vp_reg;
224 struct vxge_hw_vp_config *config;
225 u64 val64;
226
227 if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
228 return;
229
230 vp_reg = fifo->vp_reg;
231 config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
232
233 if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
234 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
235 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
236 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
237 fifo->tim_tti_cfg1_saved = val64;
238 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
239 }
240}
241
242void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
243{
244 u64 val64 = ring->tim_rti_cfg1_saved;
245
246 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
247 ring->tim_rti_cfg1_saved = val64;
248 writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
249}
250
251void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
252{
253 u64 val64 = fifo->tim_tti_cfg3_saved;
254 u64 timer = (fifo->rtimer * 1000) / 272;
255
256 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
257 if (timer)
258 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
259 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
260
261 writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
262 /* tti_cfg3_saved is not updated again because it is
263 * initialized at one place only - init time.
264 */
265}
266
267void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
268{
269 u64 val64 = ring->tim_rti_cfg3_saved;
270 u64 timer = (ring->rtimer * 1000) / 272;
271
272 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
273 if (timer)
274 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
275 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
276
277 writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
278 /* rti_cfg3_saved is not updated again because it is
279 * initialized at one place only - init time.
280 */
281}
282
221/** 283/**
222 * vxge_hw_channel_msix_mask - Mask MSIX Vector. 284 * vxge_hw_channel_msix_mask - Mask MSIX Vector.
223 * @channeh: Channel for rx or tx handle 285 * @channeh: Channel for rx or tx handle
@@ -254,6 +316,23 @@ vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
254} 316}
255 317
256/** 318/**
319 * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
320 * @channel: Channel for rx or tx handle
321 * @msix_id: MSI ID
322 *
323 * The function unmasks the msix interrupt for the given msix_id
324 * if configured in MSIX oneshot mode
325 *
326 * Returns: 0
327 */
328void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
329{
330 __vxge_hw_pio_mem_write32_upper(
331 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
332 &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
333}
334
335/**
257 * vxge_hw_device_set_intr_type - Updates the configuration 336 * vxge_hw_device_set_intr_type - Updates the configuration
258 * with new interrupt type. 337 * with new interrupt type.
259 * @hldev: HW device handle. 338 * @hldev: HW device handle.
@@ -2191,19 +2270,14 @@ vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2191 if (vpath->hldev->config.intr_mode == 2270 if (vpath->hldev->config.intr_mode ==
2192 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { 2271 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2193 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( 2272 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2273 VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
2274 0, 32), &vp_reg->one_shot_vect0_en);
2275 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2194 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN, 2276 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2195 0, 32), &vp_reg->one_shot_vect1_en); 2277 0, 32), &vp_reg->one_shot_vect1_en);
2196 }
2197
2198 if (vpath->hldev->config.intr_mode ==
2199 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2200 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( 2278 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2201 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN, 2279 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2202 0, 32), &vp_reg->one_shot_vect2_en); 2280 0, 32), &vp_reg->one_shot_vect2_en);
2203
2204 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2205 VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
2206 0, 32), &vp_reg->one_shot_vect3_en);
2207 } 2281 }
2208} 2282}
2209 2283
@@ -2229,6 +2303,32 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2229} 2303}
2230 2304
2231/** 2305/**
2306 * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2307 * @vp: Virtual Path handle.
2308 * @msix_id: MSI ID
2309 *
2310 * The function clears the msix interrupt for the given msix_id
2311 *
2312 * Returns: 0,
2313 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2314 * status.
2315 * See also:
2316 */
2317void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2318{
2319 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2320
2321 if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT))
2322 __vxge_hw_pio_mem_write32_upper(
2323 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2324 &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
2325 else
2326 __vxge_hw_pio_mem_write32_upper(
2327 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2328 &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
2329}
2330
2331/**
2232 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector. 2332 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2233 * @vp: Virtual Path handle. 2333 * @vp: Virtual Path handle.
2234 * @msix_id: MSI ID 2334 * @msix_id: MSI ID
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index d48486d6afa1..9d9dfda4c7ab 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -2142,6 +2142,10 @@ void vxge_hw_device_clear_tx_rx(
2142 * Virtual Paths 2142 * Virtual Paths
2143 */ 2143 */
2144 2144
2145void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring);
2146
2147void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo);
2148
2145u32 vxge_hw_vpath_id( 2149u32 vxge_hw_vpath_id(
2146 struct __vxge_hw_vpath_handle *vpath_handle); 2150 struct __vxge_hw_vpath_handle *vpath_handle);
2147 2151
@@ -2245,6 +2249,8 @@ void
2245vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle, 2249vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
2246 int msix_id); 2250 int msix_id);
2247 2251
2252void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id);
2253
2248void vxge_hw_device_flush_io(struct __vxge_hw_device *devh); 2254void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
2249 2255
2250void 2256void
@@ -2270,6 +2276,9 @@ void
2270vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id); 2276vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
2271 2277
2272void 2278void
2279vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channelh, int msix_id);
2280
2281void
2273vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, 2282vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
2274 void **dtrh); 2283 void **dtrh);
2275 2284
@@ -2282,7 +2291,8 @@ vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh);
2282int 2291int
2283vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel); 2292vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
2284 2293
2285void 2294void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo);
2286vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id); 2295
2296void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring);
2287 2297
2288#endif 2298#endif
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index ad2f99b9bcf3..581e21525e85 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -16,8 +16,8 @@
16 16
17#define VXGE_VERSION_MAJOR "2" 17#define VXGE_VERSION_MAJOR "2"
18#define VXGE_VERSION_MINOR "5" 18#define VXGE_VERSION_MINOR "5"
19#define VXGE_VERSION_FIX "1" 19#define VXGE_VERSION_FIX "2"
20#define VXGE_VERSION_BUILD "22082" 20#define VXGE_VERSION_BUILD "22259"
21#define VXGE_VERSION_FOR "k" 21#define VXGE_VERSION_FOR "k"
22 22
23#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld)) 23#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld))
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index b4338f389394..7aeb113cbb90 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -274,6 +274,7 @@ source "drivers/net/wireless/b43legacy/Kconfig"
274source "drivers/net/wireless/hostap/Kconfig" 274source "drivers/net/wireless/hostap/Kconfig"
275source "drivers/net/wireless/ipw2x00/Kconfig" 275source "drivers/net/wireless/ipw2x00/Kconfig"
276source "drivers/net/wireless/iwlwifi/Kconfig" 276source "drivers/net/wireless/iwlwifi/Kconfig"
277source "drivers/net/wireless/iwlegacy/Kconfig"
277source "drivers/net/wireless/iwmc3200wifi/Kconfig" 278source "drivers/net/wireless/iwmc3200wifi/Kconfig"
278source "drivers/net/wireless/libertas/Kconfig" 279source "drivers/net/wireless/libertas/Kconfig"
279source "drivers/net/wireless/orinoco/Kconfig" 280source "drivers/net/wireless/orinoco/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 9760561a27a5..ddd3fb6ba1d3 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -24,7 +24,7 @@ obj-$(CONFIG_B43LEGACY) += b43legacy/
24obj-$(CONFIG_ZD1211RW) += zd1211rw/ 24obj-$(CONFIG_ZD1211RW) += zd1211rw/
25obj-$(CONFIG_RTL8180) += rtl818x/ 25obj-$(CONFIG_RTL8180) += rtl818x/
26obj-$(CONFIG_RTL8187) += rtl818x/ 26obj-$(CONFIG_RTL8187) += rtl818x/
27obj-$(CONFIG_RTL8192CE) += rtlwifi/ 27obj-$(CONFIG_RTLWIFI) += rtlwifi/
28 28
29# 16-bit wireless PCMCIA client drivers 29# 16-bit wireless PCMCIA client drivers
30obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o 30obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o
@@ -41,7 +41,8 @@ obj-$(CONFIG_ADM8211) += adm8211.o
41 41
42obj-$(CONFIG_MWL8K) += mwl8k.o 42obj-$(CONFIG_MWL8K) += mwl8k.o
43 43
44obj-$(CONFIG_IWLWIFI) += iwlwifi/ 44obj-$(CONFIG_IWLAGN) += iwlwifi/
45obj-$(CONFIG_IWLWIFI_LEGACY) += iwlegacy/
45obj-$(CONFIG_RT2X00) += rt2x00/ 46obj-$(CONFIG_RT2X00) += rt2x00/
46 47
47obj-$(CONFIG_P54_COMMON) += p54/ 48obj-$(CONFIG_P54_COMMON) += p54/
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index f9aa1bc0a947..afe2cbc6cb24 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1658,7 +1658,7 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
1658} 1658}
1659 1659
1660/* Put adm8211_tx_hdr on skb and transmit */ 1660/* Put adm8211_tx_hdr on skb and transmit */
1661static int adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb) 1661static void adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
1662{ 1662{
1663 struct adm8211_tx_hdr *txhdr; 1663 struct adm8211_tx_hdr *txhdr;
1664 size_t payload_len, hdrlen; 1664 size_t payload_len, hdrlen;
@@ -1707,8 +1707,6 @@ static int adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
1707 txhdr->retry_limit = info->control.rates[0].count; 1707 txhdr->retry_limit = info->control.rates[0].count;
1708 1708
1709 adm8211_tx_raw(dev, skb, plcp_signal, hdrlen); 1709 adm8211_tx_raw(dev, skb, plcp_signal, hdrlen);
1710
1711 return NETDEV_TX_OK;
1712} 1710}
1713 1711
1714static int adm8211_alloc_rings(struct ieee80211_hw *dev) 1712static int adm8211_alloc_rings(struct ieee80211_hw *dev)
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 1476314afa8a..298601436ee2 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -1728,7 +1728,7 @@ static void at76_mac80211_tx_callback(struct urb *urb)
1728 ieee80211_wake_queues(priv->hw); 1728 ieee80211_wake_queues(priv->hw);
1729} 1729}
1730 1730
1731static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 1731static void at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1732{ 1732{
1733 struct at76_priv *priv = hw->priv; 1733 struct at76_priv *priv = hw->priv;
1734 struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer; 1734 struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer;
@@ -1741,7 +1741,8 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1741 if (priv->tx_urb->status == -EINPROGRESS) { 1741 if (priv->tx_urb->status == -EINPROGRESS) {
1742 wiphy_err(priv->hw->wiphy, 1742 wiphy_err(priv->hw->wiphy,
1743 "%s called while tx urb is pending\n", __func__); 1743 "%s called while tx urb is pending\n", __func__);
1744 return NETDEV_TX_BUSY; 1744 dev_kfree_skb_any(skb);
1745 return;
1745 } 1746 }
1746 1747
1747 /* The following code lines are important when the device is going to 1748 /* The following code lines are important when the device is going to
@@ -1755,7 +1756,8 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1755 if (compare_ether_addr(priv->bssid, mgmt->bssid)) { 1756 if (compare_ether_addr(priv->bssid, mgmt->bssid)) {
1756 memcpy(priv->bssid, mgmt->bssid, ETH_ALEN); 1757 memcpy(priv->bssid, mgmt->bssid, ETH_ALEN);
1757 ieee80211_queue_work(hw, &priv->work_join_bssid); 1758 ieee80211_queue_work(hw, &priv->work_join_bssid);
1758 return NETDEV_TX_BUSY; 1759 dev_kfree_skb_any(skb);
1760 return;
1759 } 1761 }
1760 } 1762 }
1761 1763
@@ -1795,8 +1797,6 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1795 priv->tx_urb, 1797 priv->tx_urb,
1796 priv->tx_urb->hcpriv, priv->tx_urb->complete); 1798 priv->tx_urb->hcpriv, priv->tx_urb->complete);
1797 } 1799 }
1798
1799 return 0;
1800} 1800}
1801 1801
1802static int at76_mac80211_start(struct ieee80211_hw *hw) 1802static int at76_mac80211_start(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/at76c50x-usb.h b/drivers/net/wireless/at76c50x-usb.h
index 4a37447dfc01..f14a65473fe8 100644
--- a/drivers/net/wireless/at76c50x-usb.h
+++ b/drivers/net/wireless/at76c50x-usb.h
@@ -290,7 +290,7 @@ struct mib_mac_mgmt {
290 u8 res; 290 u8 res;
291 u8 multi_domain_capability_implemented; 291 u8 multi_domain_capability_implemented;
292 u8 multi_domain_capability_enabled; 292 u8 multi_domain_capability_enabled;
293 u8 country_string[3]; 293 u8 country_string[IEEE80211_COUNTRY_STRING_LEN];
294 u8 reserved[3]; 294 u8 reserved[3];
295} __packed; 295} __packed;
296 296
diff --git a/drivers/net/wireless/ath/ar9170/Kconfig b/drivers/net/wireless/ath/ar9170/Kconfig
index d7a4799d20fb..7b9672b0d090 100644
--- a/drivers/net/wireless/ath/ar9170/Kconfig
+++ b/drivers/net/wireless/ath/ar9170/Kconfig
@@ -1,8 +1,10 @@
1config AR9170_USB 1config AR9170_USB
2 tristate "Atheros AR9170 802.11n USB support" 2 tristate "Atheros AR9170 802.11n USB support (OBSOLETE)"
3 depends on USB && MAC80211 3 depends on USB && MAC80211
4 select FW_LOADER 4 select FW_LOADER
5 help 5 help
6 This driver is going to get replaced by carl9170.
7
6 This is a driver for the Atheros "otus" 802.11n USB devices. 8 This is a driver for the Atheros "otus" 802.11n USB devices.
7 9
8 These devices require additional firmware (2 files). 10 These devices require additional firmware (2 files).
diff --git a/drivers/net/wireless/ath/ar9170/ar9170.h b/drivers/net/wireless/ath/ar9170/ar9170.h
index 4f845f80c098..371e4ce49528 100644
--- a/drivers/net/wireless/ath/ar9170/ar9170.h
+++ b/drivers/net/wireless/ath/ar9170/ar9170.h
@@ -224,7 +224,7 @@ void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len);
224int ar9170_nag_limiter(struct ar9170 *ar); 224int ar9170_nag_limiter(struct ar9170 *ar);
225 225
226/* MAC */ 226/* MAC */
227int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb); 227void ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
228int ar9170_init_mac(struct ar9170 *ar); 228int ar9170_init_mac(struct ar9170 *ar);
229int ar9170_set_qos(struct ar9170 *ar); 229int ar9170_set_qos(struct ar9170 *ar);
230int ar9170_update_multicast(struct ar9170 *ar, const u64 mc_hast); 230int ar9170_update_multicast(struct ar9170 *ar, const u64 mc_hast);
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index 32bf79e6a320..b761fec0d721 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -1475,7 +1475,7 @@ static void ar9170_tx(struct ar9170 *ar)
1475 msecs_to_jiffies(AR9170_JANITOR_DELAY)); 1475 msecs_to_jiffies(AR9170_JANITOR_DELAY));
1476} 1476}
1477 1477
1478int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 1478void ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1479{ 1479{
1480 struct ar9170 *ar = hw->priv; 1480 struct ar9170 *ar = hw->priv;
1481 struct ieee80211_tx_info *info; 1481 struct ieee80211_tx_info *info;
@@ -1493,11 +1493,10 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1493 skb_queue_tail(&ar->tx_pending[queue], skb); 1493 skb_queue_tail(&ar->tx_pending[queue], skb);
1494 1494
1495 ar9170_tx(ar); 1495 ar9170_tx(ar);
1496 return NETDEV_TX_OK; 1496 return;
1497 1497
1498err_free: 1498err_free:
1499 dev_kfree_skb_any(skb); 1499 dev_kfree_skb_any(skb);
1500 return NETDEV_TX_OK;
1501} 1500}
1502 1501
1503static int ar9170_op_add_interface(struct ieee80211_hw *hw, 1502static int ar9170_op_add_interface(struct ieee80211_hw *hw,
@@ -1945,7 +1944,8 @@ static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
1945static int ar9170_ampdu_action(struct ieee80211_hw *hw, 1944static int ar9170_ampdu_action(struct ieee80211_hw *hw,
1946 struct ieee80211_vif *vif, 1945 struct ieee80211_vif *vif,
1947 enum ieee80211_ampdu_mlme_action action, 1946 enum ieee80211_ampdu_mlme_action action,
1948 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 1947 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
1948 u8 buf_size)
1949{ 1949{
1950 switch (action) { 1950 switch (action) {
1951 case IEEE80211_AMPDU_RX_START: 1951 case IEEE80211_AMPDU_RX_START:
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index e43210c8585c..a6c6a466000f 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -108,12 +108,14 @@ enum ath_cipher {
108 * struct ath_ops - Register read/write operations 108 * struct ath_ops - Register read/write operations
109 * 109 *
110 * @read: Register read 110 * @read: Register read
111 * @multi_read: Multiple register read
111 * @write: Register write 112 * @write: Register write
112 * @enable_write_buffer: Enable multiple register writes 113 * @enable_write_buffer: Enable multiple register writes
113 * @write_flush: flush buffered register writes and disable buffering 114 * @write_flush: flush buffered register writes and disable buffering
114 */ 115 */
115struct ath_ops { 116struct ath_ops {
116 unsigned int (*read)(void *, u32 reg_offset); 117 unsigned int (*read)(void *, u32 reg_offset);
118 void (*multi_read)(void *, u32 *addr, u32 *val, u16 count);
117 void (*write)(void *, u32 val, u32 reg_offset); 119 void (*write)(void *, u32 val, u32 reg_offset);
118 void (*enable_write_buffer)(void *); 120 void (*enable_write_buffer)(void *);
119 void (*write_flush) (void *); 121 void (*write_flush) (void *);
diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig
index e0793319389d..e18a9aa7b6ca 100644
--- a/drivers/net/wireless/ath/ath5k/Kconfig
+++ b/drivers/net/wireless/ath/ath5k/Kconfig
@@ -40,6 +40,17 @@ config ATH5K_DEBUG
40 40
41 modprobe ath5k debug=0x00000400 41 modprobe ath5k debug=0x00000400
42 42
43config ATH5K_TRACER
44 bool "Atheros 5xxx tracer"
45 depends on ATH5K
46 depends on EVENT_TRACING
47 ---help---
48 Say Y here to enable tracepoints for the ath5k driver
49 using the kernel tracing infrastructure. Select this
50 option if you are interested in debugging the driver.
51
52 If unsure, say N.
53
43config ATH5K_AHB 54config ATH5K_AHB
44 bool "Atheros 5xxx AHB bus support" 55 bool "Atheros 5xxx AHB bus support"
45 depends on (ATHEROS_AR231X && !PCI) 56 depends on (ATHEROS_AR231X && !PCI)
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index 707cde149248..82324e98efef 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -31,7 +31,8 @@ static void ath5k_ahb_read_cachesize(struct ath_common *common, int *csz)
31 *csz = L1_CACHE_BYTES >> 2; 31 *csz = L1_CACHE_BYTES >> 2;
32} 32}
33 33
34bool ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data) 34static bool
35ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
35{ 36{
36 struct ath5k_softc *sc = common->priv; 37 struct ath5k_softc *sc = common->priv;
37 struct platform_device *pdev = to_platform_device(sc->dev); 38 struct platform_device *pdev = to_platform_device(sc->dev);
@@ -46,10 +47,10 @@ bool ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
46 47
47 eeprom += off; 48 eeprom += off;
48 if (eeprom > eeprom_end) 49 if (eeprom > eeprom_end)
49 return -EINVAL; 50 return false;
50 51
51 *data = *eeprom; 52 *data = *eeprom;
52 return 0; 53 return true;
53} 54}
54 55
55int ath5k_hw_read_srev(struct ath5k_hw *ah) 56int ath5k_hw_read_srev(struct ath5k_hw *ah)
@@ -92,7 +93,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
92 goto err_out; 93 goto err_out;
93 } 94 }
94 95
95 mem = ioremap_nocache(res->start, res->end - res->start + 1); 96 mem = ioremap_nocache(res->start, resource_size(res));
96 if (mem == NULL) { 97 if (mem == NULL) {
97 dev_err(&pdev->dev, "ioremap failed\n"); 98 dev_err(&pdev->dev, "ioremap failed\n");
98 ret = -ENOMEM; 99 ret = -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 407e39c2b10b..8a06dbd39629 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -210,14 +210,9 @@
210/* Initial values */ 210/* Initial values */
211#define AR5K_INIT_CYCRSSI_THR1 2 211#define AR5K_INIT_CYCRSSI_THR1 2
212 212
213/* Tx retry limits */ 213/* Tx retry limit defaults from standard */
214#define AR5K_INIT_SH_RETRY 10 214#define AR5K_INIT_RETRY_SHORT 7
215#define AR5K_INIT_LG_RETRY AR5K_INIT_SH_RETRY 215#define AR5K_INIT_RETRY_LONG 4
216/* For station mode */
217#define AR5K_INIT_SSH_RETRY 32
218#define AR5K_INIT_SLG_RETRY AR5K_INIT_SSH_RETRY
219#define AR5K_INIT_TX_RETRY 10
220
221 216
222/* Slot time */ 217/* Slot time */
223#define AR5K_INIT_SLOT_TIME_TURBO 6 218#define AR5K_INIT_SLOT_TIME_TURBO 6
@@ -518,7 +513,7 @@ enum ath5k_tx_queue_id {
518 AR5K_TX_QUEUE_ID_NOQCU_DATA = 0, 513 AR5K_TX_QUEUE_ID_NOQCU_DATA = 0,
519 AR5K_TX_QUEUE_ID_NOQCU_BEACON = 1, 514 AR5K_TX_QUEUE_ID_NOQCU_BEACON = 1,
520 AR5K_TX_QUEUE_ID_DATA_MIN = 0, /*IEEE80211_TX_QUEUE_DATA0*/ 515 AR5K_TX_QUEUE_ID_DATA_MIN = 0, /*IEEE80211_TX_QUEUE_DATA0*/
521 AR5K_TX_QUEUE_ID_DATA_MAX = 4, /*IEEE80211_TX_QUEUE_DATA4*/ 516 AR5K_TX_QUEUE_ID_DATA_MAX = 3, /*IEEE80211_TX_QUEUE_DATA3*/
522 AR5K_TX_QUEUE_ID_DATA_SVP = 5, /*IEEE80211_TX_QUEUE_SVP - Spectralink Voice Protocol*/ 517 AR5K_TX_QUEUE_ID_DATA_SVP = 5, /*IEEE80211_TX_QUEUE_SVP - Spectralink Voice Protocol*/
523 AR5K_TX_QUEUE_ID_CAB = 6, /*IEEE80211_TX_QUEUE_AFTER_BEACON*/ 518 AR5K_TX_QUEUE_ID_CAB = 6, /*IEEE80211_TX_QUEUE_AFTER_BEACON*/
524 AR5K_TX_QUEUE_ID_BEACON = 7, /*IEEE80211_TX_QUEUE_BEACON*/ 519 AR5K_TX_QUEUE_ID_BEACON = 7, /*IEEE80211_TX_QUEUE_BEACON*/
@@ -1057,7 +1052,9 @@ struct ath5k_hw {
1057#define ah_modes ah_capabilities.cap_mode 1052#define ah_modes ah_capabilities.cap_mode
1058#define ah_ee_version ah_capabilities.cap_eeprom.ee_version 1053#define ah_ee_version ah_capabilities.cap_eeprom.ee_version
1059 1054
1060 u32 ah_limit_tx_retries; 1055 u8 ah_retry_long;
1056 u8 ah_retry_short;
1057
1061 u8 ah_coverage_class; 1058 u8 ah_coverage_class;
1062 bool ah_ack_bitrate_high; 1059 bool ah_ack_bitrate_high;
1063 u8 ah_bwmode; 1060 u8 ah_bwmode;
@@ -1067,7 +1064,6 @@ struct ath5k_hw {
1067 u8 ah_ant_mode; 1064 u8 ah_ant_mode;
1068 u8 ah_tx_ant; 1065 u8 ah_tx_ant;
1069 u8 ah_def_ant; 1066 u8 ah_def_ant;
1070 bool ah_software_retry;
1071 1067
1072 struct ath5k_capabilities ah_capabilities; 1068 struct ath5k_capabilities ah_capabilities;
1073 1069
@@ -1162,6 +1158,26 @@ void ath5k_hw_deinit(struct ath5k_hw *ah);
1162int ath5k_sysfs_register(struct ath5k_softc *sc); 1158int ath5k_sysfs_register(struct ath5k_softc *sc);
1163void ath5k_sysfs_unregister(struct ath5k_softc *sc); 1159void ath5k_sysfs_unregister(struct ath5k_softc *sc);
1164 1160
1161/* base.c */
1162struct ath5k_buf;
1163struct ath5k_txq;
1164
1165void set_beacon_filter(struct ieee80211_hw *hw, bool enable);
1166bool ath_any_vif_assoc(struct ath5k_softc *sc);
1167void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
1168 struct ath5k_txq *txq);
1169int ath5k_init_hw(struct ath5k_softc *sc);
1170int ath5k_stop_hw(struct ath5k_softc *sc);
1171void ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif);
1172void ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
1173 struct ieee80211_vif *vif);
1174int ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan);
1175void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
1176int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
1177void ath5k_beacon_config(struct ath5k_softc *sc);
1178void ath5k_txbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf);
1179void ath5k_rxbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf);
1180
1165/*Chip id helper functions */ 1181/*Chip id helper functions */
1166const char *ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val); 1182const char *ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val);
1167int ath5k_hw_read_srev(struct ath5k_hw *ah); 1183int ath5k_hw_read_srev(struct ath5k_hw *ah);
@@ -1250,6 +1266,8 @@ int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
1250int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, 1266int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah,
1251 enum ath5k_tx_queue queue_type, 1267 enum ath5k_tx_queue queue_type,
1252 struct ath5k_txq_info *queue_info); 1268 struct ath5k_txq_info *queue_info);
1269void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
1270 unsigned int queue);
1253u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue); 1271u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue);
1254void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue); 1272void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue);
1255int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue); 1273int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue);
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index cdac5cff0177..bc8240560488 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -118,8 +118,8 @@ int ath5k_hw_init(struct ath5k_softc *sc)
118 ah->ah_bwmode = AR5K_BWMODE_DEFAULT; 118 ah->ah_bwmode = AR5K_BWMODE_DEFAULT;
119 ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER; 119 ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
120 ah->ah_imr = 0; 120 ah->ah_imr = 0;
121 ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY; 121 ah->ah_retry_short = AR5K_INIT_RETRY_SHORT;
122 ah->ah_software_retry = false; 122 ah->ah_retry_long = AR5K_INIT_RETRY_LONG;
123 ah->ah_ant_mode = AR5K_ANTMODE_DEFAULT; 123 ah->ah_ant_mode = AR5K_ANTMODE_DEFAULT;
124 ah->ah_noise_floor = -95; /* until first NF calibration is run */ 124 ah->ah_noise_floor = -95; /* until first NF calibration is run */
125 sc->ani_state.ani_mode = ATH5K_ANI_MODE_AUTO; 125 sc->ani_state.ani_mode = ATH5K_ANI_MODE_AUTO;
@@ -220,7 +220,8 @@ int ath5k_hw_init(struct ath5k_softc *sc)
220 ah->ah_radio = AR5K_RF5112; 220 ah->ah_radio = AR5K_RF5112;
221 ah->ah_single_chip = false; 221 ah->ah_single_chip = false;
222 ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_5112B; 222 ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_5112B;
223 } else if (ah->ah_mac_version == (AR5K_SREV_AR2415 >> 4)) { 223 } else if (ah->ah_mac_version == (AR5K_SREV_AR2415 >> 4) ||
224 ah->ah_mac_version == (AR5K_SREV_AR2315_R6 >> 4)) {
224 ah->ah_radio = AR5K_RF2316; 225 ah->ah_radio = AR5K_RF2316;
225 ah->ah_single_chip = true; 226 ah->ah_single_chip = true;
226 ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2316; 227 ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2316;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 09ae4ef0fd51..4d7f21ee111c 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -61,6 +61,9 @@
61#include "debug.h" 61#include "debug.h"
62#include "ani.h" 62#include "ani.h"
63 63
64#define CREATE_TRACE_POINTS
65#include "trace.h"
66
64int ath5k_modparam_nohwcrypt; 67int ath5k_modparam_nohwcrypt;
65module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO); 68module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
66MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); 69MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
@@ -242,73 +245,68 @@ static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *re
242\********************/ 245\********************/
243 246
244/* 247/*
245 * Convert IEEE channel number to MHz frequency.
246 */
247static inline short
248ath5k_ieee2mhz(short chan)
249{
250 if (chan <= 14 || chan >= 27)
251 return ieee80211chan2mhz(chan);
252 else
253 return 2212 + chan * 20;
254}
255
256/*
257 * Returns true for the channel numbers used without all_channels modparam. 248 * Returns true for the channel numbers used without all_channels modparam.
258 */ 249 */
259static bool ath5k_is_standard_channel(short chan) 250static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
260{ 251{
261 return ((chan <= 14) || 252 if (band == IEEE80211_BAND_2GHZ && chan <= 14)
262 /* UNII 1,2 */ 253 return true;
263 ((chan & 3) == 0 && chan >= 36 && chan <= 64) || 254
255 return /* UNII 1,2 */
256 (((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
264 /* midband */ 257 /* midband */
265 ((chan & 3) == 0 && chan >= 100 && chan <= 140) || 258 ((chan & 3) == 0 && chan >= 100 && chan <= 140) ||
266 /* UNII-3 */ 259 /* UNII-3 */
267 ((chan & 3) == 1 && chan >= 149 && chan <= 165)); 260 ((chan & 3) == 1 && chan >= 149 && chan <= 165) ||
261 /* 802.11j 5.030-5.080 GHz (20MHz) */
262 (chan == 8 || chan == 12 || chan == 16) ||
263 /* 802.11j 4.9GHz (20MHz) */
264 (chan == 184 || chan == 188 || chan == 192 || chan == 196));
268} 265}
269 266
270static unsigned int 267static unsigned int
271ath5k_copy_channels(struct ath5k_hw *ah, 268ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
272 struct ieee80211_channel *channels, 269 unsigned int mode, unsigned int max)
273 unsigned int mode,
274 unsigned int max)
275{ 270{
276 unsigned int i, count, size, chfreq, freq, ch; 271 unsigned int count, size, chfreq, freq, ch;
277 272 enum ieee80211_band band;
278 if (!test_bit(mode, ah->ah_modes))
279 return 0;
280 273
281 switch (mode) { 274 switch (mode) {
282 case AR5K_MODE_11A: 275 case AR5K_MODE_11A:
283 /* 1..220, but 2GHz frequencies are filtered by check_channel */ 276 /* 1..220, but 2GHz frequencies are filtered by check_channel */
284 size = 220 ; 277 size = 220;
285 chfreq = CHANNEL_5GHZ; 278 chfreq = CHANNEL_5GHZ;
279 band = IEEE80211_BAND_5GHZ;
286 break; 280 break;
287 case AR5K_MODE_11B: 281 case AR5K_MODE_11B:
288 case AR5K_MODE_11G: 282 case AR5K_MODE_11G:
289 size = 26; 283 size = 26;
290 chfreq = CHANNEL_2GHZ; 284 chfreq = CHANNEL_2GHZ;
285 band = IEEE80211_BAND_2GHZ;
291 break; 286 break;
292 default: 287 default:
293 ATH5K_WARN(ah->ah_sc, "bad mode, not copying channels\n"); 288 ATH5K_WARN(ah->ah_sc, "bad mode, not copying channels\n");
294 return 0; 289 return 0;
295 } 290 }
296 291
297 for (i = 0, count = 0; i < size && max > 0; i++) { 292 count = 0;
298 ch = i + 1 ; 293 for (ch = 1; ch <= size && count < max; ch++) {
299 freq = ath5k_ieee2mhz(ch); 294 freq = ieee80211_channel_to_frequency(ch, band);
295
296 if (freq == 0) /* mapping failed - not a standard channel */
297 continue;
300 298
301 /* Check if channel is supported by the chipset */ 299 /* Check if channel is supported by the chipset */
302 if (!ath5k_channel_ok(ah, freq, chfreq)) 300 if (!ath5k_channel_ok(ah, freq, chfreq))
303 continue; 301 continue;
304 302
305 if (!modparam_all_channels && !ath5k_is_standard_channel(ch)) 303 if (!modparam_all_channels &&
304 !ath5k_is_standard_channel(ch, band))
306 continue; 305 continue;
307 306
308 /* Write channel info and increment counter */ 307 /* Write channel info and increment counter */
309 channels[count].center_freq = freq; 308 channels[count].center_freq = freq;
310 channels[count].band = (chfreq == CHANNEL_2GHZ) ? 309 channels[count].band = band;
311 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
312 switch (mode) { 310 switch (mode) {
313 case AR5K_MODE_11A: 311 case AR5K_MODE_11A:
314 case AR5K_MODE_11G: 312 case AR5K_MODE_11G:
@@ -319,7 +317,6 @@ ath5k_copy_channels(struct ath5k_hw *ah,
319 } 317 }
320 318
321 count++; 319 count++;
322 max--;
323 } 320 }
324 321
325 return count; 322 return count;
@@ -364,7 +361,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
364 sband->n_bitrates = 12; 361 sband->n_bitrates = 12;
365 362
366 sband->channels = sc->channels; 363 sband->channels = sc->channels;
367 sband->n_channels = ath5k_copy_channels(ah, sband->channels, 364 sband->n_channels = ath5k_setup_channels(ah, sband->channels,
368 AR5K_MODE_11G, max_c); 365 AR5K_MODE_11G, max_c);
369 366
370 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; 367 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
@@ -390,7 +387,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
390 } 387 }
391 388
392 sband->channels = sc->channels; 389 sband->channels = sc->channels;
393 sband->n_channels = ath5k_copy_channels(ah, sband->channels, 390 sband->n_channels = ath5k_setup_channels(ah, sband->channels,
394 AR5K_MODE_11B, max_c); 391 AR5K_MODE_11B, max_c);
395 392
396 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; 393 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
@@ -410,7 +407,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
410 sband->n_bitrates = 8; 407 sband->n_bitrates = 8;
411 408
412 sband->channels = &sc->channels[count_c]; 409 sband->channels = &sc->channels[count_c];
413 sband->n_channels = ath5k_copy_channels(ah, sband->channels, 410 sband->n_channels = ath5k_setup_channels(ah, sband->channels,
414 AR5K_MODE_11A, max_c); 411 AR5K_MODE_11A, max_c);
415 412
416 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband; 413 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
@@ -445,31 +442,9 @@ ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
445 return ath5k_reset(sc, chan, true); 442 return ath5k_reset(sc, chan, true);
446} 443}
447 444
448static void 445void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
449ath5k_setcurmode(struct ath5k_softc *sc, unsigned int mode)
450{
451 sc->curmode = mode;
452
453 if (mode == AR5K_MODE_11A) {
454 sc->curband = &sc->sbands[IEEE80211_BAND_5GHZ];
455 } else {
456 sc->curband = &sc->sbands[IEEE80211_BAND_2GHZ];
457 }
458}
459
460struct ath_vif_iter_data {
461 const u8 *hw_macaddr;
462 u8 mask[ETH_ALEN];
463 u8 active_mac[ETH_ALEN]; /* first active MAC */
464 bool need_set_hw_addr;
465 bool found_active;
466 bool any_assoc;
467 enum nl80211_iftype opmode;
468};
469
470static void ath_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
471{ 446{
472 struct ath_vif_iter_data *iter_data = data; 447 struct ath5k_vif_iter_data *iter_data = data;
473 int i; 448 int i;
474 struct ath5k_vif *avf = (void *)vif->drv_priv; 449 struct ath5k_vif *avf = (void *)vif->drv_priv;
475 450
@@ -499,9 +474,12 @@ static void ath_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
499 */ 474 */
500 if (avf->opmode == NL80211_IFTYPE_AP) 475 if (avf->opmode == NL80211_IFTYPE_AP)
501 iter_data->opmode = NL80211_IFTYPE_AP; 476 iter_data->opmode = NL80211_IFTYPE_AP;
502 else 477 else {
478 if (avf->opmode == NL80211_IFTYPE_STATION)
479 iter_data->n_stas++;
503 if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED) 480 if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED)
504 iter_data->opmode = avf->opmode; 481 iter_data->opmode = avf->opmode;
482 }
505} 483}
506 484
507void 485void
@@ -509,7 +487,8 @@ ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
509 struct ieee80211_vif *vif) 487 struct ieee80211_vif *vif)
510{ 488{
511 struct ath_common *common = ath5k_hw_common(sc->ah); 489 struct ath_common *common = ath5k_hw_common(sc->ah);
512 struct ath_vif_iter_data iter_data; 490 struct ath5k_vif_iter_data iter_data;
491 u32 rfilt;
513 492
514 /* 493 /*
515 * Use the hardware MAC address as reference, the hardware uses it 494 * Use the hardware MAC address as reference, the hardware uses it
@@ -520,12 +499,13 @@ ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
520 iter_data.found_active = false; 499 iter_data.found_active = false;
521 iter_data.need_set_hw_addr = true; 500 iter_data.need_set_hw_addr = true;
522 iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED; 501 iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED;
502 iter_data.n_stas = 0;
523 503
524 if (vif) 504 if (vif)
525 ath_vif_iter(&iter_data, vif->addr, vif); 505 ath5k_vif_iter(&iter_data, vif->addr, vif);
526 506
527 /* Get list of all active MAC addresses */ 507 /* Get list of all active MAC addresses */
528 ieee80211_iterate_active_interfaces_atomic(sc->hw, ath_vif_iter, 508 ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter,
529 &iter_data); 509 &iter_data);
530 memcpy(sc->bssidmask, iter_data.mask, ETH_ALEN); 510 memcpy(sc->bssidmask, iter_data.mask, ETH_ALEN);
531 511
@@ -543,20 +523,19 @@ ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
543 523
544 if (ath5k_hw_hasbssidmask(sc->ah)) 524 if (ath5k_hw_hasbssidmask(sc->ah))
545 ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask); 525 ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
546}
547 526
548void 527 /* Set up RX Filter */
549ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif) 528 if (iter_data.n_stas > 1) {
550{ 529 /* If you have multiple STA interfaces connected to
551 struct ath5k_hw *ah = sc->ah; 530 * different APs, ARPs are not received (most of the time?)
552 u32 rfilt; 531 * Enabling PROMISC appears to fix that probem.
532 */
533 sc->filter_flags |= AR5K_RX_FILTER_PROM;
534 }
553 535
554 /* configure rx filter */
555 rfilt = sc->filter_flags; 536 rfilt = sc->filter_flags;
556 ath5k_hw_set_rx_filter(ah, rfilt); 537 ath5k_hw_set_rx_filter(sc->ah, rfilt);
557 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt); 538 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
558
559 ath5k_update_bssid_mask_and_opmode(sc, vif);
560} 539}
561 540
562static inline int 541static inline int
@@ -569,7 +548,7 @@ ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
569 "hw_rix out of bounds: %x\n", hw_rix)) 548 "hw_rix out of bounds: %x\n", hw_rix))
570 return 0; 549 return 0;
571 550
572 rix = sc->rate_idx[sc->curband->band][hw_rix]; 551 rix = sc->rate_idx[sc->curchan->band][hw_rix];
573 if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix)) 552 if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
574 rix = 0; 553 rix = 0;
575 554
@@ -964,6 +943,7 @@ ath5k_txq_setup(struct ath5k_softc *sc,
964 spin_lock_init(&txq->lock); 943 spin_lock_init(&txq->lock);
965 txq->setup = true; 944 txq->setup = true;
966 txq->txq_len = 0; 945 txq->txq_len = 0;
946 txq->txq_max = ATH5K_TXQ_LEN_MAX;
967 txq->txq_poll_mark = false; 947 txq->txq_poll_mark = false;
968 txq->txq_stuck = 0; 948 txq->txq_stuck = 0;
969 } 949 }
@@ -1132,7 +1112,7 @@ ath5k_rx_start(struct ath5k_softc *sc)
1132 spin_unlock_bh(&sc->rxbuflock); 1112 spin_unlock_bh(&sc->rxbuflock);
1133 1113
1134 ath5k_hw_start_rx_dma(ah); /* enable recv descriptors */ 1114 ath5k_hw_start_rx_dma(ah); /* enable recv descriptors */
1135 ath5k_mode_setup(sc, NULL); /* set filters, etc. */ 1115 ath5k_update_bssid_mask_and_opmode(sc, NULL); /* set filters, etc. */
1136 ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */ 1116 ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */
1137 1117
1138 return 0; 1118 return 0;
@@ -1376,10 +1356,10 @@ ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
1376 * right now, so it's not too bad... 1356 * right now, so it's not too bad...
1377 */ 1357 */
1378 rxs->mactime = ath5k_extend_tsf(sc->ah, rs->rs_tstamp); 1358 rxs->mactime = ath5k_extend_tsf(sc->ah, rs->rs_tstamp);
1379 rxs->flag |= RX_FLAG_TSFT; 1359 rxs->flag |= RX_FLAG_MACTIME_MPDU;
1380 1360
1381 rxs->freq = sc->curchan->center_freq; 1361 rxs->freq = sc->curchan->center_freq;
1382 rxs->band = sc->curband->band; 1362 rxs->band = sc->curchan->band;
1383 1363
1384 rxs->signal = sc->ah->ah_noise_floor + rs->rs_rssi; 1364 rxs->signal = sc->ah->ah_noise_floor + rs->rs_rssi;
1385 1365
@@ -1394,10 +1374,10 @@ ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
1394 rxs->flag |= ath5k_rx_decrypted(sc, skb, rs); 1374 rxs->flag |= ath5k_rx_decrypted(sc, skb, rs);
1395 1375
1396 if (rxs->rate_idx >= 0 && rs->rs_rate == 1376 if (rxs->rate_idx >= 0 && rs->rs_rate ==
1397 sc->curband->bitrates[rxs->rate_idx].hw_value_short) 1377 sc->sbands[sc->curchan->band].bitrates[rxs->rate_idx].hw_value_short)
1398 rxs->flag |= RX_FLAG_SHORTPRE; 1378 rxs->flag |= RX_FLAG_SHORTPRE;
1399 1379
1400 ath5k_debug_dump_skb(sc, skb, "RX ", 0); 1380 trace_ath5k_rx(sc, skb);
1401 1381
1402 ath5k_update_beacon_rssi(sc, skb, rs->rs_rssi); 1382 ath5k_update_beacon_rssi(sc, skb, rs->rs_rssi);
1403 1383
@@ -1533,7 +1513,7 @@ unlock:
1533* TX Handling * 1513* TX Handling *
1534\*************/ 1514\*************/
1535 1515
1536int 1516void
1537ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, 1517ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
1538 struct ath5k_txq *txq) 1518 struct ath5k_txq *txq)
1539{ 1519{
@@ -1542,7 +1522,7 @@ ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
1542 unsigned long flags; 1522 unsigned long flags;
1543 int padsize; 1523 int padsize;
1544 1524
1545 ath5k_debug_dump_skb(sc, skb, "TX ", 1); 1525 trace_ath5k_tx(sc, skb, txq);
1546 1526
1547 /* 1527 /*
1548 * The hardware expects the header padded to 4 byte boundaries. 1528 * The hardware expects the header padded to 4 byte boundaries.
@@ -1555,7 +1535,7 @@ ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
1555 goto drop_packet; 1535 goto drop_packet;
1556 } 1536 }
1557 1537
1558 if (txq->txq_len >= ATH5K_TXQ_LEN_MAX) 1538 if (txq->txq_len >= txq->txq_max)
1559 ieee80211_stop_queue(hw, txq->qnum); 1539 ieee80211_stop_queue(hw, txq->qnum);
1560 1540
1561 spin_lock_irqsave(&sc->txbuflock, flags); 1541 spin_lock_irqsave(&sc->txbuflock, flags);
@@ -1582,16 +1562,15 @@ ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
1582 spin_unlock_irqrestore(&sc->txbuflock, flags); 1562 spin_unlock_irqrestore(&sc->txbuflock, flags);
1583 goto drop_packet; 1563 goto drop_packet;
1584 } 1564 }
1585 return NETDEV_TX_OK; 1565 return;
1586 1566
1587drop_packet: 1567drop_packet:
1588 dev_kfree_skb_any(skb); 1568 dev_kfree_skb_any(skb);
1589 return NETDEV_TX_OK;
1590} 1569}
1591 1570
1592static void 1571static void
1593ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb, 1572ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb,
1594 struct ath5k_tx_status *ts) 1573 struct ath5k_txq *txq, struct ath5k_tx_status *ts)
1595{ 1574{
1596 struct ieee80211_tx_info *info; 1575 struct ieee80211_tx_info *info;
1597 int i; 1576 int i;
@@ -1643,6 +1622,7 @@ ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb,
1643 else 1622 else
1644 sc->stats.antenna_tx[0]++; /* invalid */ 1623 sc->stats.antenna_tx[0]++; /* invalid */
1645 1624
1625 trace_ath5k_tx_complete(sc, skb, txq, ts);
1646 ieee80211_tx_status(sc->hw, skb); 1626 ieee80211_tx_status(sc->hw, skb);
1647} 1627}
1648 1628
@@ -1679,7 +1659,7 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1679 1659
1680 dma_unmap_single(sc->dev, bf->skbaddr, skb->len, 1660 dma_unmap_single(sc->dev, bf->skbaddr, skb->len,
1681 DMA_TO_DEVICE); 1661 DMA_TO_DEVICE);
1682 ath5k_tx_frame_completed(sc, skb, &ts); 1662 ath5k_tx_frame_completed(sc, skb, txq, &ts);
1683 } 1663 }
1684 1664
1685 /* 1665 /*
@@ -1821,8 +1801,6 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1821 goto out; 1801 goto out;
1822 } 1802 }
1823 1803
1824 ath5k_debug_dump_skb(sc, skb, "BC ", 1);
1825
1826 ath5k_txbuf_free_skb(sc, avf->bbuf); 1804 ath5k_txbuf_free_skb(sc, avf->bbuf);
1827 avf->bbuf->skb = skb; 1805 avf->bbuf->skb = skb;
1828 ret = ath5k_beacon_setup(sc, avf->bbuf); 1806 ret = ath5k_beacon_setup(sc, avf->bbuf);
@@ -1917,6 +1895,8 @@ ath5k_beacon_send(struct ath5k_softc *sc)
1917 sc->opmode == NL80211_IFTYPE_MESH_POINT) 1895 sc->opmode == NL80211_IFTYPE_MESH_POINT)
1918 ath5k_beacon_update(sc->hw, vif); 1896 ath5k_beacon_update(sc->hw, vif);
1919 1897
1898 trace_ath5k_tx(sc, bf->skb, &sc->txqs[sc->bhalq]);
1899
1920 ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr); 1900 ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr);
1921 ath5k_hw_start_tx_dma(ah, sc->bhalq); 1901 ath5k_hw_start_tx_dma(ah, sc->bhalq);
1922 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n", 1902 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n",
@@ -2417,7 +2397,8 @@ ath5k_init_softc(struct ath5k_softc *sc, const struct ath_bus_ops *bus_ops)
2417 /* set up multi-rate retry capabilities */ 2397 /* set up multi-rate retry capabilities */
2418 if (sc->ah->ah_version == AR5K_AR5212) { 2398 if (sc->ah->ah_version == AR5K_AR5212) {
2419 hw->max_rates = 4; 2399 hw->max_rates = 4;
2420 hw->max_rate_tries = 11; 2400 hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT,
2401 AR5K_INIT_RETRY_LONG);
2421 } 2402 }
2422 2403
2423 hw->vif_data_size = sizeof(struct ath5k_vif); 2404 hw->vif_data_size = sizeof(struct ath5k_vif);
@@ -2554,7 +2535,6 @@ ath5k_init_hw(struct ath5k_softc *sc)
2554 * and then setup of the interrupt mask. 2535 * and then setup of the interrupt mask.
2555 */ 2536 */
2556 sc->curchan = sc->hw->conf.channel; 2537 sc->curchan = sc->hw->conf.channel;
2557 sc->curband = &sc->sbands[sc->curchan->band];
2558 sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL | 2538 sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
2559 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL | 2539 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
2560 AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB; 2540 AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
@@ -2681,10 +2661,8 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
2681 * so we should also free any remaining 2661 * so we should also free any remaining
2682 * tx buffers */ 2662 * tx buffers */
2683 ath5k_drain_tx_buffs(sc); 2663 ath5k_drain_tx_buffs(sc);
2684 if (chan) { 2664 if (chan)
2685 sc->curchan = chan; 2665 sc->curchan = chan;
2686 sc->curband = &sc->sbands[chan->band];
2687 }
2688 ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, chan != NULL, 2666 ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, chan != NULL,
2689 skip_pcu); 2667 skip_pcu);
2690 if (ret) { 2668 if (ret) {
@@ -2782,12 +2760,6 @@ ath5k_init(struct ieee80211_hw *hw)
2782 goto err; 2760 goto err;
2783 } 2761 }
2784 2762
2785 /* NB: setup here so ath5k_rate_update is happy */
2786 if (test_bit(AR5K_MODE_11A, ah->ah_modes))
2787 ath5k_setcurmode(sc, AR5K_MODE_11A);
2788 else
2789 ath5k_setcurmode(sc, AR5K_MODE_11B);
2790
2791 /* 2763 /*
2792 * Allocate tx+rx descriptors and populate the lists. 2764 * Allocate tx+rx descriptors and populate the lists.
2793 */ 2765 */
@@ -2946,13 +2918,13 @@ ath5k_deinit_softc(struct ath5k_softc *sc)
2946bool 2918bool
2947ath_any_vif_assoc(struct ath5k_softc *sc) 2919ath_any_vif_assoc(struct ath5k_softc *sc)
2948{ 2920{
2949 struct ath_vif_iter_data iter_data; 2921 struct ath5k_vif_iter_data iter_data;
2950 iter_data.hw_macaddr = NULL; 2922 iter_data.hw_macaddr = NULL;
2951 iter_data.any_assoc = false; 2923 iter_data.any_assoc = false;
2952 iter_data.need_set_hw_addr = false; 2924 iter_data.need_set_hw_addr = false;
2953 iter_data.found_active = true; 2925 iter_data.found_active = true;
2954 2926
2955 ieee80211_iterate_active_interfaces_atomic(sc->hw, ath_vif_iter, 2927 ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter,
2956 &iter_data); 2928 &iter_data);
2957 return iter_data.any_assoc; 2929 return iter_data.any_assoc;
2958} 2930}
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index 6d511476e4d2..978f1f4ac2f3 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -86,6 +86,7 @@ struct ath5k_txq {
86 spinlock_t lock; /* lock on q and link */ 86 spinlock_t lock; /* lock on q and link */
87 bool setup; 87 bool setup;
88 int txq_len; /* number of queued buffers */ 88 int txq_len; /* number of queued buffers */
89 int txq_max; /* max allowed num of queued buffers */
89 bool txq_poll_mark; 90 bool txq_poll_mark;
90 unsigned int txq_stuck; /* informational counter */ 91 unsigned int txq_stuck; /* informational counter */
91}; 92};
@@ -183,8 +184,6 @@ struct ath5k_softc {
183 enum nl80211_iftype opmode; 184 enum nl80211_iftype opmode;
184 struct ath5k_hw *ah; /* Atheros HW */ 185 struct ath5k_hw *ah; /* Atheros HW */
185 186
186 struct ieee80211_supported_band *curband;
187
188#ifdef CONFIG_ATH5K_DEBUG 187#ifdef CONFIG_ATH5K_DEBUG
189 struct ath5k_dbg_info debug; /* debug info */ 188 struct ath5k_dbg_info debug; /* debug info */
190#endif /* CONFIG_ATH5K_DEBUG */ 189#endif /* CONFIG_ATH5K_DEBUG */
@@ -202,7 +201,6 @@ struct ath5k_softc {
202#define ATH_STAT_STARTED 4 /* opened & irqs enabled */ 201#define ATH_STAT_STARTED 4 /* opened & irqs enabled */
203 202
204 unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */ 203 unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */
205 unsigned int curmode; /* current phy mode */
206 struct ieee80211_channel *curchan; /* current h/w channel */ 204 struct ieee80211_channel *curchan; /* current h/w channel */
207 205
208 u16 nvifs; 206 u16 nvifs;
@@ -262,6 +260,19 @@ struct ath5k_softc {
262 struct survey_info survey; /* collected survey info */ 260 struct survey_info survey; /* collected survey info */
263}; 261};
264 262
263struct ath5k_vif_iter_data {
264 const u8 *hw_macaddr;
265 u8 mask[ETH_ALEN];
266 u8 active_mac[ETH_ALEN]; /* first active MAC */
267 bool need_set_hw_addr;
268 bool found_active;
269 bool any_assoc;
270 enum nl80211_iftype opmode;
271 int n_stas;
272};
273void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif);
274
275
265#define ath5k_hw_hasbssidmask(_ah) \ 276#define ath5k_hw_hasbssidmask(_ah) \
266 (ath5k_hw_get_capability(_ah, AR5K_CAP_BSSIDMASK, 0, NULL) == 0) 277 (ath5k_hw_get_capability(_ah, AR5K_CAP_BSSIDMASK, 0, NULL) == 0)
267#define ath5k_hw_hasveol(_ah) \ 278#define ath5k_hw_hasveol(_ah) \
diff --git a/drivers/net/wireless/ath/ath5k/caps.c b/drivers/net/wireless/ath/ath5k/caps.c
index 31cad80e9b01..f77e8a703c5c 100644
--- a/drivers/net/wireless/ath/ath5k/caps.c
+++ b/drivers/net/wireless/ath/ath5k/caps.c
@@ -32,23 +32,24 @@
32 */ 32 */
33int ath5k_hw_set_capabilities(struct ath5k_hw *ah) 33int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
34{ 34{
35 struct ath5k_capabilities *caps = &ah->ah_capabilities;
35 u16 ee_header; 36 u16 ee_header;
36 37
37 /* Capabilities stored in the EEPROM */ 38 /* Capabilities stored in the EEPROM */
38 ee_header = ah->ah_capabilities.cap_eeprom.ee_header; 39 ee_header = caps->cap_eeprom.ee_header;
39 40
40 if (ah->ah_version == AR5K_AR5210) { 41 if (ah->ah_version == AR5K_AR5210) {
41 /* 42 /*
42 * Set radio capabilities 43 * Set radio capabilities
43 * (The AR5110 only supports the middle 5GHz band) 44 * (The AR5110 only supports the middle 5GHz band)
44 */ 45 */
45 ah->ah_capabilities.cap_range.range_5ghz_min = 5120; 46 caps->cap_range.range_5ghz_min = 5120;
46 ah->ah_capabilities.cap_range.range_5ghz_max = 5430; 47 caps->cap_range.range_5ghz_max = 5430;
47 ah->ah_capabilities.cap_range.range_2ghz_min = 0; 48 caps->cap_range.range_2ghz_min = 0;
48 ah->ah_capabilities.cap_range.range_2ghz_max = 0; 49 caps->cap_range.range_2ghz_max = 0;
49 50
50 /* Set supported modes */ 51 /* Set supported modes */
51 __set_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode); 52 __set_bit(AR5K_MODE_11A, caps->cap_mode);
52 } else { 53 } else {
53 /* 54 /*
54 * XXX The tranceiver supports frequencies from 4920 to 6100GHz 55 * XXX The tranceiver supports frequencies from 4920 to 6100GHz
@@ -56,9 +57,8 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
56 * XXX current ieee80211 implementation because the IEEE 57 * XXX current ieee80211 implementation because the IEEE
57 * XXX channel mapping does not support negative channel 58 * XXX channel mapping does not support negative channel
58 * XXX numbers (2312MHz is channel -19). Of course, this 59 * XXX numbers (2312MHz is channel -19). Of course, this
59 * XXX doesn't matter because these channels are out of range 60 * XXX doesn't matter because these channels are out of the
60 * XXX but some regulation domains like MKK (Japan) will 61 * XXX legal range.
61 * XXX support frequencies somewhere around 4.8GHz.
62 */ 62 */
63 63
64 /* 64 /*
@@ -66,13 +66,14 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
66 */ 66 */
67 67
68 if (AR5K_EEPROM_HDR_11A(ee_header)) { 68 if (AR5K_EEPROM_HDR_11A(ee_header)) {
69 /* 4920 */ 69 if (ath_is_49ghz_allowed(caps->cap_eeprom.ee_regdomain))
70 ah->ah_capabilities.cap_range.range_5ghz_min = 5005; 70 caps->cap_range.range_5ghz_min = 4920;
71 ah->ah_capabilities.cap_range.range_5ghz_max = 6100; 71 else
72 caps->cap_range.range_5ghz_min = 5005;
73 caps->cap_range.range_5ghz_max = 6100;
72 74
73 /* Set supported modes */ 75 /* Set supported modes */
74 __set_bit(AR5K_MODE_11A, 76 __set_bit(AR5K_MODE_11A, caps->cap_mode);
75 ah->ah_capabilities.cap_mode);
76 } 77 }
77 78
78 /* Enable 802.11b if a 2GHz capable radio (2111/5112) is 79 /* Enable 802.11b if a 2GHz capable radio (2111/5112) is
@@ -81,32 +82,29 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
81 (AR5K_EEPROM_HDR_11G(ee_header) && 82 (AR5K_EEPROM_HDR_11G(ee_header) &&
82 ah->ah_version != AR5K_AR5211)) { 83 ah->ah_version != AR5K_AR5211)) {
83 /* 2312 */ 84 /* 2312 */
84 ah->ah_capabilities.cap_range.range_2ghz_min = 2412; 85 caps->cap_range.range_2ghz_min = 2412;
85 ah->ah_capabilities.cap_range.range_2ghz_max = 2732; 86 caps->cap_range.range_2ghz_max = 2732;
86 87
87 if (AR5K_EEPROM_HDR_11B(ee_header)) 88 if (AR5K_EEPROM_HDR_11B(ee_header))
88 __set_bit(AR5K_MODE_11B, 89 __set_bit(AR5K_MODE_11B, caps->cap_mode);
89 ah->ah_capabilities.cap_mode);
90 90
91 if (AR5K_EEPROM_HDR_11G(ee_header) && 91 if (AR5K_EEPROM_HDR_11G(ee_header) &&
92 ah->ah_version != AR5K_AR5211) 92 ah->ah_version != AR5K_AR5211)
93 __set_bit(AR5K_MODE_11G, 93 __set_bit(AR5K_MODE_11G, caps->cap_mode);
94 ah->ah_capabilities.cap_mode);
95 } 94 }
96 } 95 }
97 96
98 /* Set number of supported TX queues */ 97 /* Set number of supported TX queues */
99 if (ah->ah_version == AR5K_AR5210) 98 if (ah->ah_version == AR5K_AR5210)
100 ah->ah_capabilities.cap_queues.q_tx_num = 99 caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES_NOQCU;
101 AR5K_NUM_TX_QUEUES_NOQCU;
102 else 100 else
103 ah->ah_capabilities.cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES; 101 caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
104 102
105 /* newer hardware has PHY error counters */ 103 /* newer hardware has PHY error counters */
106 if (ah->ah_mac_srev >= AR5K_SREV_AR5213A) 104 if (ah->ah_mac_srev >= AR5K_SREV_AR5213A)
107 ah->ah_capabilities.cap_has_phyerr_counters = true; 105 caps->cap_has_phyerr_counters = true;
108 else 106 else
109 ah->ah_capabilities.cap_has_phyerr_counters = false; 107 caps->cap_has_phyerr_counters = false;
110 108
111 return 0; 109 return 0;
112} 110}
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index d2f84d76bb07..0230f30e9e9a 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -308,8 +308,6 @@ static const struct {
308 { ATH5K_DEBUG_CALIBRATE, "calib", "periodic calibration" }, 308 { ATH5K_DEBUG_CALIBRATE, "calib", "periodic calibration" },
309 { ATH5K_DEBUG_TXPOWER, "txpower", "transmit power setting" }, 309 { ATH5K_DEBUG_TXPOWER, "txpower", "transmit power setting" },
310 { ATH5K_DEBUG_LED, "led", "LED management" }, 310 { ATH5K_DEBUG_LED, "led", "LED management" },
311 { ATH5K_DEBUG_DUMP_RX, "dumprx", "print received skb content" },
312 { ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" },
313 { ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" }, 311 { ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" },
314 { ATH5K_DEBUG_DMA, "dma", "dma start/stop" }, 312 { ATH5K_DEBUG_DMA, "dma", "dma start/stop" },
315 { ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" }, 313 { ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" },
@@ -1036,24 +1034,6 @@ ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah)
1036} 1034}
1037 1035
1038void 1036void
1039ath5k_debug_dump_skb(struct ath5k_softc *sc,
1040 struct sk_buff *skb, const char *prefix, int tx)
1041{
1042 char buf[16];
1043
1044 if (likely(!((tx && (sc->debug.level & ATH5K_DEBUG_DUMP_TX)) ||
1045 (!tx && (sc->debug.level & ATH5K_DEBUG_DUMP_RX)))))
1046 return;
1047
1048 snprintf(buf, sizeof(buf), "%s %s", wiphy_name(sc->hw->wiphy), prefix);
1049
1050 print_hex_dump_bytes(buf, DUMP_PREFIX_NONE, skb->data,
1051 min(200U, skb->len));
1052
1053 printk(KERN_DEBUG "\n");
1054}
1055
1056void
1057ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf) 1037ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf)
1058{ 1038{
1059 struct ath5k_desc *ds = bf->desc; 1039 struct ath5k_desc *ds = bf->desc;
diff --git a/drivers/net/wireless/ath/ath5k/debug.h b/drivers/net/wireless/ath/ath5k/debug.h
index 3e34428d5126..b0355aef68d3 100644
--- a/drivers/net/wireless/ath/ath5k/debug.h
+++ b/drivers/net/wireless/ath/ath5k/debug.h
@@ -116,8 +116,6 @@ enum ath5k_debug_level {
116 ATH5K_DEBUG_CALIBRATE = 0x00000020, 116 ATH5K_DEBUG_CALIBRATE = 0x00000020,
117 ATH5K_DEBUG_TXPOWER = 0x00000040, 117 ATH5K_DEBUG_TXPOWER = 0x00000040,
118 ATH5K_DEBUG_LED = 0x00000080, 118 ATH5K_DEBUG_LED = 0x00000080,
119 ATH5K_DEBUG_DUMP_RX = 0x00000100,
120 ATH5K_DEBUG_DUMP_TX = 0x00000200,
121 ATH5K_DEBUG_DUMPBANDS = 0x00000400, 119 ATH5K_DEBUG_DUMPBANDS = 0x00000400,
122 ATH5K_DEBUG_DMA = 0x00000800, 120 ATH5K_DEBUG_DMA = 0x00000800,
123 ATH5K_DEBUG_ANI = 0x00002000, 121 ATH5K_DEBUG_ANI = 0x00002000,
@@ -152,10 +150,6 @@ void
152ath5k_debug_dump_bands(struct ath5k_softc *sc); 150ath5k_debug_dump_bands(struct ath5k_softc *sc);
153 151
154void 152void
155ath5k_debug_dump_skb(struct ath5k_softc *sc,
156 struct sk_buff *skb, const char *prefix, int tx);
157
158void
159ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf); 153ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf);
160 154
161#else /* no debugging */ 155#else /* no debugging */
@@ -182,10 +176,6 @@ static inline void
182ath5k_debug_dump_bands(struct ath5k_softc *sc) {} 176ath5k_debug_dump_bands(struct ath5k_softc *sc) {}
183 177
184static inline void 178static inline void
185ath5k_debug_dump_skb(struct ath5k_softc *sc,
186 struct sk_buff *skb, const char *prefix, int tx) {}
187
188static inline void
189ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf) {} 179ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf) {}
190 180
191#endif /* ifdef CONFIG_ATH5K_DEBUG */ 181#endif /* ifdef CONFIG_ATH5K_DEBUG */
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 80e625608bac..b6561f785c6e 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -72,7 +72,6 @@ static int
72ath5k_eeprom_init_header(struct ath5k_hw *ah) 72ath5k_eeprom_init_header(struct ath5k_hw *ah)
73{ 73{
74 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 74 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
75 int ret;
76 u16 val; 75 u16 val;
77 u32 cksum, offset, eep_max = AR5K_EEPROM_INFO_MAX; 76 u32 cksum, offset, eep_max = AR5K_EEPROM_INFO_MAX;
78 77
@@ -192,7 +191,7 @@ static int ath5k_eeprom_read_ants(struct ath5k_hw *ah, u32 *offset,
192 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 191 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
193 u32 o = *offset; 192 u32 o = *offset;
194 u16 val; 193 u16 val;
195 int ret, i = 0; 194 int i = 0;
196 195
197 AR5K_EEPROM_READ(o++, val); 196 AR5K_EEPROM_READ(o++, val);
198 ee->ee_switch_settling[mode] = (val >> 8) & 0x7f; 197 ee->ee_switch_settling[mode] = (val >> 8) & 0x7f;
@@ -252,7 +251,6 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
252 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 251 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
253 u32 o = *offset; 252 u32 o = *offset;
254 u16 val; 253 u16 val;
255 int ret;
256 254
257 ee->ee_n_piers[mode] = 0; 255 ee->ee_n_piers[mode] = 0;
258 AR5K_EEPROM_READ(o++, val); 256 AR5K_EEPROM_READ(o++, val);
@@ -515,7 +513,6 @@ ath5k_eeprom_read_freq_list(struct ath5k_hw *ah, int *offset, int max,
515 int o = *offset; 513 int o = *offset;
516 int i = 0; 514 int i = 0;
517 u8 freq1, freq2; 515 u8 freq1, freq2;
518 int ret;
519 u16 val; 516 u16 val;
520 517
521 ee->ee_n_piers[mode] = 0; 518 ee->ee_n_piers[mode] = 0;
@@ -551,7 +548,7 @@ ath5k_eeprom_init_11a_pcal_freq(struct ath5k_hw *ah, int offset)
551{ 548{
552 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 549 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
553 struct ath5k_chan_pcal_info *pcal = ee->ee_pwr_cal_a; 550 struct ath5k_chan_pcal_info *pcal = ee->ee_pwr_cal_a;
554 int i, ret; 551 int i;
555 u16 val; 552 u16 val;
556 u8 mask; 553 u8 mask;
557 554
@@ -970,7 +967,6 @@ ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode)
970 u32 offset; 967 u32 offset;
971 u8 i, c; 968 u8 i, c;
972 u16 val; 969 u16 val;
973 int ret;
974 u8 pd_gains = 0; 970 u8 pd_gains = 0;
975 971
976 /* Count how many curves we have and 972 /* Count how many curves we have and
@@ -1228,7 +1224,7 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
1228 struct ath5k_chan_pcal_info *chinfo; 1224 struct ath5k_chan_pcal_info *chinfo;
1229 u8 *pdgain_idx = ee->ee_pdc_to_idx[mode]; 1225 u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
1230 u32 offset; 1226 u32 offset;
1231 int idx, i, ret; 1227 int idx, i;
1232 u16 val; 1228 u16 val;
1233 u8 pd_gains = 0; 1229 u8 pd_gains = 0;
1234 1230
@@ -1419,7 +1415,7 @@ ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode)
1419 u8 *rate_target_pwr_num; 1415 u8 *rate_target_pwr_num;
1420 u32 offset; 1416 u32 offset;
1421 u16 val; 1417 u16 val;
1422 int ret, i; 1418 int i;
1423 1419
1424 offset = AR5K_EEPROM_TARGET_PWRSTART(ee->ee_misc1); 1420 offset = AR5K_EEPROM_TARGET_PWRSTART(ee->ee_misc1);
1425 rate_target_pwr_num = &ee->ee_rate_target_pwr_num[mode]; 1421 rate_target_pwr_num = &ee->ee_rate_target_pwr_num[mode];
@@ -1593,7 +1589,7 @@ ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah)
1593 struct ath5k_edge_power *rep; 1589 struct ath5k_edge_power *rep;
1594 unsigned int fmask, pmask; 1590 unsigned int fmask, pmask;
1595 unsigned int ctl_mode; 1591 unsigned int ctl_mode;
1596 int ret, i, j; 1592 int i, j;
1597 u32 offset; 1593 u32 offset;
1598 u16 val; 1594 u16 val;
1599 1595
@@ -1733,16 +1729,12 @@ int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
1733 u8 mac_d[ETH_ALEN] = {}; 1729 u8 mac_d[ETH_ALEN] = {};
1734 u32 total, offset; 1730 u32 total, offset;
1735 u16 data; 1731 u16 data;
1736 int octet, ret; 1732 int octet;
1737 1733
1738 ret = ath5k_hw_nvram_read(ah, 0x20, &data); 1734 AR5K_EEPROM_READ(0x20, data);
1739 if (ret)
1740 return ret;
1741 1735
1742 for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) { 1736 for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) {
1743 ret = ath5k_hw_nvram_read(ah, offset, &data); 1737 AR5K_EEPROM_READ(offset, data);
1744 if (ret)
1745 return ret;
1746 1738
1747 total += data; 1739 total += data;
1748 mac_d[octet + 1] = data & 0xff; 1740 mac_d[octet + 1] = data & 0xff;
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h
index 7c09e150dbdc..6511c27d938e 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath/ath5k/eeprom.h
@@ -241,9 +241,8 @@ enum ath5k_eeprom_freq_bands{
241#define AR5K_SPUR_SYMBOL_WIDTH_TURBO_100Hz 6250 241#define AR5K_SPUR_SYMBOL_WIDTH_TURBO_100Hz 6250
242 242
243#define AR5K_EEPROM_READ(_o, _v) do { \ 243#define AR5K_EEPROM_READ(_o, _v) do { \
244 ret = ath5k_hw_nvram_read(ah, (_o), &(_v)); \ 244 if (!ath5k_hw_nvram_read(ah, (_o), &(_v))) \
245 if (ret) \ 245 return -EIO; \
246 return ret; \
247} while (0) 246} while (0)
248 247
249#define AR5K_EEPROM_READ_HDR(_o, _v) \ 248#define AR5K_EEPROM_READ_HDR(_o, _v) \
@@ -269,29 +268,6 @@ enum ath5k_ctl_mode {
269 AR5K_CTL_MODE_M = 15, 268 AR5K_CTL_MODE_M = 15,
270}; 269};
271 270
272/* Default CTL ids for the 3 main reg domains.
273 * Atheros only uses these by default but vendors
274 * can have up to 32 different CTLs for different
275 * scenarios. Note that theese values are ORed with
276 * the mode id (above) so we can have up to 24 CTL
277 * datasets out of these 3 main regdomains. That leaves
278 * 8 ids that can be used by vendors and since 0x20 is
279 * missing from HAL sources i guess this is the set of
280 * custom CTLs vendors can use. */
281#define AR5K_CTL_FCC 0x10
282#define AR5K_CTL_CUSTOM 0x20
283#define AR5K_CTL_ETSI 0x30
284#define AR5K_CTL_MKK 0x40
285
286/* Indicates a CTL with only mode set and
287 * no reg domain mapping, such CTLs are used
288 * for world roaming domains or simply when
289 * a reg domain is not set */
290#define AR5K_CTL_NO_REGDOMAIN 0xf0
291
292/* Indicates an empty (invalid) CTL */
293#define AR5K_CTL_NO_CTL 0xff
294
295/* Per channel calibration data, used for power table setup */ 271/* Per channel calibration data, used for power table setup */
296struct ath5k_chan_pcal_info_rf5111 { 272struct ath5k_chan_pcal_info_rf5111 {
297 /* Power levels in half dbm units 273 /* Power levels in half dbm units
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index d76d68c99f72..9be29b728b1c 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -48,28 +48,11 @@
48 48
49extern int ath5k_modparam_nohwcrypt; 49extern int ath5k_modparam_nohwcrypt;
50 50
51/* functions used from base.c */
52void set_beacon_filter(struct ieee80211_hw *hw, bool enable);
53bool ath_any_vif_assoc(struct ath5k_softc *sc);
54int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
55 struct ath5k_txq *txq);
56int ath5k_init_hw(struct ath5k_softc *sc);
57int ath5k_stop_hw(struct ath5k_softc *sc);
58void ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif);
59void ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
60 struct ieee80211_vif *vif);
61int ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan);
62void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
63int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
64void ath5k_beacon_config(struct ath5k_softc *sc);
65void ath5k_txbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf);
66void ath5k_rxbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf);
67
68/********************\ 51/********************\
69* Mac80211 functions * 52* Mac80211 functions *
70\********************/ 53\********************/
71 54
72static int 55static void
73ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 56ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
74{ 57{
75 struct ath5k_softc *sc = hw->priv; 58 struct ath5k_softc *sc = hw->priv;
@@ -77,10 +60,10 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
77 60
78 if (WARN_ON(qnum >= sc->ah->ah_capabilities.cap_queues.q_tx_num)) { 61 if (WARN_ON(qnum >= sc->ah->ah_capabilities.cap_queues.q_tx_num)) {
79 dev_kfree_skb_any(skb); 62 dev_kfree_skb_any(skb);
80 return 0; 63 return;
81 } 64 }
82 65
83 return ath5k_tx_queue(hw, skb, &sc->txqs[qnum]); 66 ath5k_tx_queue(hw, skb, &sc->txqs[qnum]);
84} 67}
85 68
86 69
@@ -175,8 +158,7 @@ ath5k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
175 158
176 memcpy(&avf->lladdr, vif->addr, ETH_ALEN); 159 memcpy(&avf->lladdr, vif->addr, ETH_ALEN);
177 160
178 ath5k_mode_setup(sc, vif); 161 ath5k_update_bssid_mask_and_opmode(sc, vif);
179
180 ret = 0; 162 ret = 0;
181end: 163end:
182 mutex_unlock(&sc->lock); 164 mutex_unlock(&sc->lock);
@@ -226,6 +208,7 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
226 struct ath5k_hw *ah = sc->ah; 208 struct ath5k_hw *ah = sc->ah;
227 struct ieee80211_conf *conf = &hw->conf; 209 struct ieee80211_conf *conf = &hw->conf;
228 int ret = 0; 210 int ret = 0;
211 int i;
229 212
230 mutex_lock(&sc->lock); 213 mutex_lock(&sc->lock);
231 214
@@ -243,6 +226,14 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
243 ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2)); 226 ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2));
244 } 227 }
245 228
229 if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
230 ah->ah_retry_long = conf->long_frame_max_tx_count;
231 ah->ah_retry_short = conf->short_frame_max_tx_count;
232
233 for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++)
234 ath5k_hw_set_tx_retry_limits(ah, i);
235 }
236
246 /* TODO: 237 /* TODO:
247 * 1) Move this on config_interface and handle each case 238 * 1) Move this on config_interface and handle each case
248 * separately eg. when we have only one STA vif, use 239 * separately eg. when we have only one STA vif, use
@@ -389,6 +380,7 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
389 struct ath5k_softc *sc = hw->priv; 380 struct ath5k_softc *sc = hw->priv;
390 struct ath5k_hw *ah = sc->ah; 381 struct ath5k_hw *ah = sc->ah;
391 u32 mfilt[2], rfilt; 382 u32 mfilt[2], rfilt;
383 struct ath5k_vif_iter_data iter_data; /* to count STA interfaces */
392 384
393 mutex_lock(&sc->lock); 385 mutex_lock(&sc->lock);
394 386
@@ -462,6 +454,21 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
462 break; 454 break;
463 } 455 }
464 456
457 iter_data.hw_macaddr = NULL;
458 iter_data.n_stas = 0;
459 iter_data.need_set_hw_addr = false;
460 ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter,
461 &iter_data);
462
463 /* Set up RX Filter */
464 if (iter_data.n_stas > 1) {
465 /* If you have multiple STA interfaces connected to
466 * different APs, ARPs are not received (most of the time?)
467 * Enabling PROMISC appears to fix that probem.
468 */
469 rfilt |= AR5K_RX_FILTER_PROM;
470 }
471
465 /* Set filters */ 472 /* Set filters */
466 ath5k_hw_set_rx_filter(ah, rfilt); 473 ath5k_hw_set_rx_filter(ah, rfilt);
467 474
@@ -733,6 +740,47 @@ ath5k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
733} 740}
734 741
735 742
743static void ath5k_get_ringparam(struct ieee80211_hw *hw,
744 u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max)
745{
746 struct ath5k_softc *sc = hw->priv;
747
748 *tx = sc->txqs[AR5K_TX_QUEUE_ID_DATA_MIN].txq_max;
749
750 *tx_max = ATH5K_TXQ_LEN_MAX;
751 *rx = *rx_max = ATH_RXBUF;
752}
753
754
755static int ath5k_set_ringparam(struct ieee80211_hw *hw, u32 tx, u32 rx)
756{
757 struct ath5k_softc *sc = hw->priv;
758 u16 qnum;
759
760 /* only support setting tx ring size for now */
761 if (rx != ATH_RXBUF)
762 return -EINVAL;
763
764 /* restrict tx ring size min/max */
765 if (!tx || tx > ATH5K_TXQ_LEN_MAX)
766 return -EINVAL;
767
768 for (qnum = 0; qnum < ARRAY_SIZE(sc->txqs); qnum++) {
769 if (!sc->txqs[qnum].setup)
770 continue;
771 if (sc->txqs[qnum].qnum < AR5K_TX_QUEUE_ID_DATA_MIN ||
772 sc->txqs[qnum].qnum > AR5K_TX_QUEUE_ID_DATA_MAX)
773 continue;
774
775 sc->txqs[qnum].txq_max = tx;
776 if (sc->txqs[qnum].txq_len >= sc->txqs[qnum].txq_max)
777 ieee80211_stop_queue(hw, sc->txqs[qnum].qnum);
778 }
779
780 return 0;
781}
782
783
736const struct ieee80211_ops ath5k_hw_ops = { 784const struct ieee80211_ops ath5k_hw_ops = {
737 .tx = ath5k_tx, 785 .tx = ath5k_tx,
738 .start = ath5k_start, 786 .start = ath5k_start,
@@ -771,4 +819,6 @@ const struct ieee80211_ops ath5k_hw_ops = {
771 /* .napi_poll = not implemented */ 819 /* .napi_poll = not implemented */
772 .set_antenna = ath5k_set_antenna, 820 .set_antenna = ath5k_set_antenna,
773 .get_antenna = ath5k_get_antenna, 821 .get_antenna = ath5k_get_antenna,
822 .set_ringparam = ath5k_set_ringparam,
823 .get_ringparam = ath5k_get_ringparam,
774}; 824};
diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c
index 7f8c5b0e9d2a..66598a0d1df0 100644
--- a/drivers/net/wireless/ath/ath5k/pci.c
+++ b/drivers/net/wireless/ath/ath5k/pci.c
@@ -69,7 +69,8 @@ static void ath5k_pci_read_cachesize(struct ath_common *common, int *csz)
69/* 69/*
70 * Read from eeprom 70 * Read from eeprom
71 */ 71 */
72bool ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data) 72static bool
73ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
73{ 74{
74 struct ath5k_hw *ah = (struct ath5k_hw *) common->ah; 75 struct ath5k_hw *ah = (struct ath5k_hw *) common->ah;
75 u32 status, timeout; 76 u32 status, timeout;
@@ -90,15 +91,15 @@ bool ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
90 status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS); 91 status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS);
91 if (status & AR5K_EEPROM_STAT_RDDONE) { 92 if (status & AR5K_EEPROM_STAT_RDDONE) {
92 if (status & AR5K_EEPROM_STAT_RDERR) 93 if (status & AR5K_EEPROM_STAT_RDERR)
93 return -EIO; 94 return false;
94 *data = (u16)(ath5k_hw_reg_read(ah, AR5K_EEPROM_DATA) & 95 *data = (u16)(ath5k_hw_reg_read(ah, AR5K_EEPROM_DATA) &
95 0xffff); 96 0xffff);
96 return 0; 97 return true;
97 } 98 }
98 udelay(15); 99 udelay(15);
99 } 100 }
100 101
101 return -ETIMEDOUT; 102 return false;
102} 103}
103 104
104int ath5k_hw_read_srev(struct ath5k_hw *ah) 105int ath5k_hw_read_srev(struct ath5k_hw *ah)
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 2c9c9e793d4e..3343fb9e4940 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -228,24 +228,9 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
228/* 228/*
229 * Set tx retry limits on DCU 229 * Set tx retry limits on DCU
230 */ 230 */
231static void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah, 231void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
232 unsigned int queue) 232 unsigned int queue)
233{ 233{
234 u32 retry_lg, retry_sh;
235
236 /*
237 * Calculate and set retry limits
238 */
239 if (ah->ah_software_retry) {
240 /* XXX Need to test this */
241 retry_lg = ah->ah_limit_tx_retries;
242 retry_sh = retry_lg = retry_lg > AR5K_DCU_RETRY_LMT_SH_RETRY ?
243 AR5K_DCU_RETRY_LMT_SH_RETRY : retry_lg;
244 } else {
245 retry_lg = AR5K_INIT_LG_RETRY;
246 retry_sh = AR5K_INIT_SH_RETRY;
247 }
248
249 /* Single data queue on AR5210 */ 234 /* Single data queue on AR5210 */
250 if (ah->ah_version == AR5K_AR5210) { 235 if (ah->ah_version == AR5K_AR5210) {
251 struct ath5k_txq_info *tq = &ah->ah_txq[queue]; 236 struct ath5k_txq_info *tq = &ah->ah_txq[queue];
@@ -255,25 +240,26 @@ static void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
255 240
256 ath5k_hw_reg_write(ah, 241 ath5k_hw_reg_write(ah,
257 (tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S) 242 (tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
258 | AR5K_REG_SM(AR5K_INIT_SLG_RETRY, 243 | AR5K_REG_SM(ah->ah_retry_long,
259 AR5K_NODCU_RETRY_LMT_SLG_RETRY) 244 AR5K_NODCU_RETRY_LMT_SLG_RETRY)
260 | AR5K_REG_SM(AR5K_INIT_SSH_RETRY, 245 | AR5K_REG_SM(ah->ah_retry_short,
261 AR5K_NODCU_RETRY_LMT_SSH_RETRY) 246 AR5K_NODCU_RETRY_LMT_SSH_RETRY)
262 | AR5K_REG_SM(retry_lg, AR5K_NODCU_RETRY_LMT_LG_RETRY) 247 | AR5K_REG_SM(ah->ah_retry_long,
263 | AR5K_REG_SM(retry_sh, AR5K_NODCU_RETRY_LMT_SH_RETRY), 248 AR5K_NODCU_RETRY_LMT_LG_RETRY)
249 | AR5K_REG_SM(ah->ah_retry_short,
250 AR5K_NODCU_RETRY_LMT_SH_RETRY),
264 AR5K_NODCU_RETRY_LMT); 251 AR5K_NODCU_RETRY_LMT);
265 /* DCU on AR5211+ */ 252 /* DCU on AR5211+ */
266 } else { 253 } else {
267 ath5k_hw_reg_write(ah, 254 ath5k_hw_reg_write(ah,
268 AR5K_REG_SM(AR5K_INIT_SLG_RETRY, 255 AR5K_REG_SM(ah->ah_retry_long,
269 AR5K_DCU_RETRY_LMT_SLG_RETRY) | 256 AR5K_DCU_RETRY_LMT_RTS)
270 AR5K_REG_SM(AR5K_INIT_SSH_RETRY, 257 | AR5K_REG_SM(ah->ah_retry_long,
271 AR5K_DCU_RETRY_LMT_SSH_RETRY) | 258 AR5K_DCU_RETRY_LMT_STA_RTS)
272 AR5K_REG_SM(retry_lg, AR5K_DCU_RETRY_LMT_LG_RETRY) | 259 | AR5K_REG_SM(max(ah->ah_retry_long, ah->ah_retry_short),
273 AR5K_REG_SM(retry_sh, AR5K_DCU_RETRY_LMT_SH_RETRY), 260 AR5K_DCU_RETRY_LMT_STA_DATA),
274 AR5K_QUEUE_DFS_RETRY_LIMIT(queue)); 261 AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
275 } 262 }
276 return;
277} 263}
278 264
279/** 265/**
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index fd14b9103951..e1c9abd8c879 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -686,16 +686,15 @@
686 686
687/* 687/*
688 * DCU retry limit registers 688 * DCU retry limit registers
689 * all these fields don't allow zero values
689 */ 690 */
690#define AR5K_DCU_RETRY_LMT_BASE 0x1080 /* Register Address -Queue0 DCU_RETRY_LMT */ 691#define AR5K_DCU_RETRY_LMT_BASE 0x1080 /* Register Address -Queue0 DCU_RETRY_LMT */
691#define AR5K_DCU_RETRY_LMT_SH_RETRY 0x0000000f /* Short retry limit mask */ 692#define AR5K_DCU_RETRY_LMT_RTS 0x0000000f /* RTS failure limit. Transmission fails if no CTS is received for this number of times */
692#define AR5K_DCU_RETRY_LMT_SH_RETRY_S 0 693#define AR5K_DCU_RETRY_LMT_RTS_S 0
693#define AR5K_DCU_RETRY_LMT_LG_RETRY 0x000000f0 /* Long retry limit mask */ 694#define AR5K_DCU_RETRY_LMT_STA_RTS 0x00003f00 /* STA RTS failure limit. If exceeded CW reset */
694#define AR5K_DCU_RETRY_LMT_LG_RETRY_S 4 695#define AR5K_DCU_RETRY_LMT_STA_RTS_S 8
695#define AR5K_DCU_RETRY_LMT_SSH_RETRY 0x00003f00 /* Station short retry limit mask (?) */ 696#define AR5K_DCU_RETRY_LMT_STA_DATA 0x000fc000 /* STA data failure limit. If exceeded CW reset. */
696#define AR5K_DCU_RETRY_LMT_SSH_RETRY_S 8 697#define AR5K_DCU_RETRY_LMT_STA_DATA_S 14
697#define AR5K_DCU_RETRY_LMT_SLG_RETRY 0x000fc000 /* Station long retry limit mask (?) */
698#define AR5K_DCU_RETRY_LMT_SLG_RETRY_S 14
699#define AR5K_QUEUE_DFS_RETRY_LIMIT(_q) AR5K_QUEUE_REG(AR5K_DCU_RETRY_LMT_BASE, _q) 698#define AR5K_QUEUE_DFS_RETRY_LIMIT(_q) AR5K_QUEUE_REG(AR5K_DCU_RETRY_LMT_BASE, _q)
700 699
701/* 700/*
diff --git a/drivers/net/wireless/ath/ath5k/trace.h b/drivers/net/wireless/ath/ath5k/trace.h
new file mode 100644
index 000000000000..2de68adb6240
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/trace.h
@@ -0,0 +1,107 @@
1#if !defined(__TRACE_ATH5K_H) || defined(TRACE_HEADER_MULTI_READ)
2#define __TRACE_ATH5K_H
3
4#include <linux/tracepoint.h>
5#include "base.h"
6
7#ifndef CONFIG_ATH5K_TRACER
8#undef TRACE_EVENT
9#define TRACE_EVENT(name, proto, ...) \
10static inline void trace_ ## name(proto) {}
11#endif
12
13struct sk_buff;
14
15#define PRIV_ENTRY __field(struct ath5k_softc *, priv)
16#define PRIV_ASSIGN __entry->priv = priv
17
18#undef TRACE_SYSTEM
19#define TRACE_SYSTEM ath5k
20
21TRACE_EVENT(ath5k_rx,
22 TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb),
23 TP_ARGS(priv, skb),
24 TP_STRUCT__entry(
25 PRIV_ENTRY
26 __field(unsigned long, skbaddr)
27 __dynamic_array(u8, frame, skb->len)
28 ),
29 TP_fast_assign(
30 PRIV_ASSIGN;
31 __entry->skbaddr = (unsigned long) skb;
32 memcpy(__get_dynamic_array(frame), skb->data, skb->len);
33 ),
34 TP_printk(
35 "[%p] RX skb=%lx", __entry->priv, __entry->skbaddr
36 )
37);
38
39TRACE_EVENT(ath5k_tx,
40 TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb,
41 struct ath5k_txq *q),
42
43 TP_ARGS(priv, skb, q),
44
45 TP_STRUCT__entry(
46 PRIV_ENTRY
47 __field(unsigned long, skbaddr)
48 __field(u8, qnum)
49 __dynamic_array(u8, frame, skb->len)
50 ),
51
52 TP_fast_assign(
53 PRIV_ASSIGN;
54 __entry->skbaddr = (unsigned long) skb;
55 __entry->qnum = (u8) q->qnum;
56 memcpy(__get_dynamic_array(frame), skb->data, skb->len);
57 ),
58
59 TP_printk(
60 "[%p] TX skb=%lx q=%d", __entry->priv, __entry->skbaddr,
61 __entry->qnum
62 )
63);
64
65TRACE_EVENT(ath5k_tx_complete,
66 TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb,
67 struct ath5k_txq *q, struct ath5k_tx_status *ts),
68
69 TP_ARGS(priv, skb, q, ts),
70
71 TP_STRUCT__entry(
72 PRIV_ENTRY
73 __field(unsigned long, skbaddr)
74 __field(u8, qnum)
75 __field(u8, ts_status)
76 __field(s8, ts_rssi)
77 __field(u8, ts_antenna)
78 ),
79
80 TP_fast_assign(
81 PRIV_ASSIGN;
82 __entry->skbaddr = (unsigned long) skb;
83 __entry->qnum = (u8) q->qnum;
84 __entry->ts_status = ts->ts_status;
85 __entry->ts_rssi = ts->ts_rssi;
86 __entry->ts_antenna = ts->ts_antenna;
87 ),
88
89 TP_printk(
90 "[%p] TX end skb=%lx q=%d stat=%x rssi=%d ant=%x",
91 __entry->priv, __entry->skbaddr, __entry->qnum,
92 __entry->ts_status, __entry->ts_rssi, __entry->ts_antenna
93 )
94);
95
96#endif /* __TRACE_ATH5K_H */
97
98#ifdef CONFIG_ATH5K_TRACER
99
100#undef TRACE_INCLUDE_PATH
101#define TRACE_INCLUDE_PATH ../../drivers/net/wireless/ath/ath5k
102#undef TRACE_INCLUDE_FILE
103#define TRACE_INCLUDE_FILE trace
104
105#include <trace/define_trace.h>
106
107#endif
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index aca01621c205..4d66ca8042eb 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -4,7 +4,6 @@ ath9k-y += beacon.o \
4 main.o \ 4 main.o \
5 recv.o \ 5 recv.o \
6 xmit.o \ 6 xmit.o \
7 virtual.o \
8 7
9ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o 8ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o
10ath9k-$(CONFIG_PCI) += pci.o 9ath9k-$(CONFIG_PCI) += pci.o
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 25a6e4417cdb..9cb0efa9b4c0 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -54,7 +54,6 @@ static struct ath_bus_ops ath_ahb_bus_ops = {
54static int ath_ahb_probe(struct platform_device *pdev) 54static int ath_ahb_probe(struct platform_device *pdev)
55{ 55{
56 void __iomem *mem; 56 void __iomem *mem;
57 struct ath_wiphy *aphy;
58 struct ath_softc *sc; 57 struct ath_softc *sc;
59 struct ieee80211_hw *hw; 58 struct ieee80211_hw *hw;
60 struct resource *res; 59 struct resource *res;
@@ -76,7 +75,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
76 goto err_out; 75 goto err_out;
77 } 76 }
78 77
79 mem = ioremap_nocache(res->start, res->end - res->start + 1); 78 mem = ioremap_nocache(res->start, resource_size(res));
80 if (mem == NULL) { 79 if (mem == NULL) {
81 dev_err(&pdev->dev, "ioremap failed\n"); 80 dev_err(&pdev->dev, "ioremap failed\n");
82 ret = -ENOMEM; 81 ret = -ENOMEM;
@@ -92,8 +91,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
92 91
93 irq = res->start; 92 irq = res->start;
94 93
95 hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) + 94 hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
96 sizeof(struct ath_softc), &ath9k_ops);
97 if (hw == NULL) { 95 if (hw == NULL) {
98 dev_err(&pdev->dev, "no memory for ieee80211_hw\n"); 96 dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
99 ret = -ENOMEM; 97 ret = -ENOMEM;
@@ -103,11 +101,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
103 SET_IEEE80211_DEV(hw, &pdev->dev); 101 SET_IEEE80211_DEV(hw, &pdev->dev);
104 platform_set_drvdata(pdev, hw); 102 platform_set_drvdata(pdev, hw);
105 103
106 aphy = hw->priv; 104 sc = hw->priv;
107 sc = (struct ath_softc *) (aphy + 1);
108 aphy->sc = sc;
109 aphy->hw = hw;
110 sc->pri_wiphy = aphy;
111 sc->hw = hw; 105 sc->hw = hw;
112 sc->dev = &pdev->dev; 106 sc->dev = &pdev->dev;
113 sc->mem = mem; 107 sc->mem = mem;
@@ -151,8 +145,7 @@ static int ath_ahb_remove(struct platform_device *pdev)
151 struct ieee80211_hw *hw = platform_get_drvdata(pdev); 145 struct ieee80211_hw *hw = platform_get_drvdata(pdev);
152 146
153 if (hw) { 147 if (hw) {
154 struct ath_wiphy *aphy = hw->priv; 148 struct ath_softc *sc = hw->priv;
155 struct ath_softc *sc = aphy->sc;
156 void __iomem *mem = sc->mem; 149 void __iomem *mem = sc->mem;
157 150
158 ath9k_deinit_device(sc); 151 ath9k_deinit_device(sc);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 5e300bd3d264..76388c6d6692 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -805,7 +805,10 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
805{ 805{
806 struct ath_common *common = ath9k_hw_common(ah); 806 struct ath_common *common = ath9k_hw_common(ah);
807 807
808 if (AR_SREV_9271(ah) || AR_SREV_9285_12_OR_LATER(ah)) { 808 if (AR_SREV_9271(ah)) {
809 if (!ar9285_hw_cl_cal(ah, chan))
810 return false;
811 } else if (AR_SREV_9285_12_OR_LATER(ah)) {
809 if (!ar9285_hw_clc(ah, chan)) 812 if (!ar9285_hw_clc(ah, chan))
810 return false; 813 return false;
811 } else { 814 } else {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 4819747fa4c3..4a9271802991 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3673,7 +3673,7 @@ static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
3673 return; 3673 return;
3674 3674
3675 reg_pmu_set = (5 << 1) | (7 << 4) | (1 << 8) | 3675 reg_pmu_set = (5 << 1) | (7 << 4) | (1 << 8) |
3676 (7 << 14) | (6 << 17) | (1 << 20) | 3676 (2 << 14) | (6 << 17) | (1 << 20) |
3677 (3 << 24) | (1 << 28); 3677 (3 << 24) | (1 << 28);
3678 3678
3679 REG_WRITE(ah, AR_PHY_PMU1, reg_pmu_set); 3679 REG_WRITE(ah, AR_PHY_PMU1, reg_pmu_set);
@@ -3959,19 +3959,19 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
3959{ 3959{
3960#define POW_SM(_r, _s) (((_r) & 0x3f) << (_s)) 3960#define POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
3961 /* make sure forced gain is not set */ 3961 /* make sure forced gain is not set */
3962 REG_WRITE(ah, 0xa458, 0); 3962 REG_WRITE(ah, AR_PHY_TX_FORCED_GAIN, 0);
3963 3963
3964 /* Write the OFDM power per rate set */ 3964 /* Write the OFDM power per rate set */
3965 3965
3966 /* 6 (LSB), 9, 12, 18 (MSB) */ 3966 /* 6 (LSB), 9, 12, 18 (MSB) */
3967 REG_WRITE(ah, 0xa3c0, 3967 REG_WRITE(ah, AR_PHY_POWER_TX_RATE(0),
3968 POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 24) | 3968 POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 24) |
3969 POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 16) | 3969 POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 16) |
3970 POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 8) | 3970 POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 8) |
3971 POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 0)); 3971 POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 0));
3972 3972
3973 /* 24 (LSB), 36, 48, 54 (MSB) */ 3973 /* 24 (LSB), 36, 48, 54 (MSB) */
3974 REG_WRITE(ah, 0xa3c4, 3974 REG_WRITE(ah, AR_PHY_POWER_TX_RATE(1),
3975 POW_SM(pPwrArray[ALL_TARGET_LEGACY_54], 24) | 3975 POW_SM(pPwrArray[ALL_TARGET_LEGACY_54], 24) |
3976 POW_SM(pPwrArray[ALL_TARGET_LEGACY_48], 16) | 3976 POW_SM(pPwrArray[ALL_TARGET_LEGACY_48], 16) |
3977 POW_SM(pPwrArray[ALL_TARGET_LEGACY_36], 8) | 3977 POW_SM(pPwrArray[ALL_TARGET_LEGACY_36], 8) |
@@ -3980,14 +3980,14 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
3980 /* Write the CCK power per rate set */ 3980 /* Write the CCK power per rate set */
3981 3981
3982 /* 1L (LSB), reserved, 2L, 2S (MSB) */ 3982 /* 1L (LSB), reserved, 2L, 2S (MSB) */
3983 REG_WRITE(ah, 0xa3c8, 3983 REG_WRITE(ah, AR_PHY_POWER_TX_RATE(2),
3984 POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 24) | 3984 POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 24) |
3985 POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 16) | 3985 POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 16) |
3986 /* POW_SM(txPowerTimes2, 8) | this is reserved for AR9003 */ 3986 /* POW_SM(txPowerTimes2, 8) | this is reserved for AR9003 */
3987 POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0)); 3987 POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0));
3988 3988
3989 /* 5.5L (LSB), 5.5S, 11L, 11S (MSB) */ 3989 /* 5.5L (LSB), 5.5S, 11L, 11S (MSB) */
3990 REG_WRITE(ah, 0xa3cc, 3990 REG_WRITE(ah, AR_PHY_POWER_TX_RATE(3),
3991 POW_SM(pPwrArray[ALL_TARGET_LEGACY_11S], 24) | 3991 POW_SM(pPwrArray[ALL_TARGET_LEGACY_11S], 24) |
3992 POW_SM(pPwrArray[ALL_TARGET_LEGACY_11L], 16) | 3992 POW_SM(pPwrArray[ALL_TARGET_LEGACY_11L], 16) |
3993 POW_SM(pPwrArray[ALL_TARGET_LEGACY_5S], 8) | 3993 POW_SM(pPwrArray[ALL_TARGET_LEGACY_5S], 8) |
@@ -3997,7 +3997,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
3997 /* Write the HT20 power per rate set */ 3997 /* Write the HT20 power per rate set */
3998 3998
3999 /* 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) */ 3999 /* 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) */
4000 REG_WRITE(ah, 0xa3d0, 4000 REG_WRITE(ah, AR_PHY_POWER_TX_RATE(4),
4001 POW_SM(pPwrArray[ALL_TARGET_HT20_5], 24) | 4001 POW_SM(pPwrArray[ALL_TARGET_HT20_5], 24) |
4002 POW_SM(pPwrArray[ALL_TARGET_HT20_4], 16) | 4002 POW_SM(pPwrArray[ALL_TARGET_HT20_4], 16) |
4003 POW_SM(pPwrArray[ALL_TARGET_HT20_1_3_9_11_17_19], 8) | 4003 POW_SM(pPwrArray[ALL_TARGET_HT20_1_3_9_11_17_19], 8) |
@@ -4005,7 +4005,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
4005 ); 4005 );
4006 4006
4007 /* 6 (LSB), 7, 12, 13 (MSB) */ 4007 /* 6 (LSB), 7, 12, 13 (MSB) */
4008 REG_WRITE(ah, 0xa3d4, 4008 REG_WRITE(ah, AR_PHY_POWER_TX_RATE(5),
4009 POW_SM(pPwrArray[ALL_TARGET_HT20_13], 24) | 4009 POW_SM(pPwrArray[ALL_TARGET_HT20_13], 24) |
4010 POW_SM(pPwrArray[ALL_TARGET_HT20_12], 16) | 4010 POW_SM(pPwrArray[ALL_TARGET_HT20_12], 16) |
4011 POW_SM(pPwrArray[ALL_TARGET_HT20_7], 8) | 4011 POW_SM(pPwrArray[ALL_TARGET_HT20_7], 8) |
@@ -4013,7 +4013,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
4013 ); 4013 );
4014 4014
4015 /* 14 (LSB), 15, 20, 21 */ 4015 /* 14 (LSB), 15, 20, 21 */
4016 REG_WRITE(ah, 0xa3e4, 4016 REG_WRITE(ah, AR_PHY_POWER_TX_RATE(9),
4017 POW_SM(pPwrArray[ALL_TARGET_HT20_21], 24) | 4017 POW_SM(pPwrArray[ALL_TARGET_HT20_21], 24) |
4018 POW_SM(pPwrArray[ALL_TARGET_HT20_20], 16) | 4018 POW_SM(pPwrArray[ALL_TARGET_HT20_20], 16) |
4019 POW_SM(pPwrArray[ALL_TARGET_HT20_15], 8) | 4019 POW_SM(pPwrArray[ALL_TARGET_HT20_15], 8) |
@@ -4023,7 +4023,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
4023 /* Mixed HT20 and HT40 rates */ 4023 /* Mixed HT20 and HT40 rates */
4024 4024
4025 /* HT20 22 (LSB), HT20 23, HT40 22, HT40 23 (MSB) */ 4025 /* HT20 22 (LSB), HT20 23, HT40 22, HT40 23 (MSB) */
4026 REG_WRITE(ah, 0xa3e8, 4026 REG_WRITE(ah, AR_PHY_POWER_TX_RATE(10),
4027 POW_SM(pPwrArray[ALL_TARGET_HT40_23], 24) | 4027 POW_SM(pPwrArray[ALL_TARGET_HT40_23], 24) |
4028 POW_SM(pPwrArray[ALL_TARGET_HT40_22], 16) | 4028 POW_SM(pPwrArray[ALL_TARGET_HT40_22], 16) |
4029 POW_SM(pPwrArray[ALL_TARGET_HT20_23], 8) | 4029 POW_SM(pPwrArray[ALL_TARGET_HT20_23], 8) |
@@ -4035,7 +4035,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
4035 * correct PAR difference between HT40 and HT20/LEGACY 4035 * correct PAR difference between HT40 and HT20/LEGACY
4036 * 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) 4036 * 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB)
4037 */ 4037 */
4038 REG_WRITE(ah, 0xa3d8, 4038 REG_WRITE(ah, AR_PHY_POWER_TX_RATE(6),
4039 POW_SM(pPwrArray[ALL_TARGET_HT40_5], 24) | 4039 POW_SM(pPwrArray[ALL_TARGET_HT40_5], 24) |
4040 POW_SM(pPwrArray[ALL_TARGET_HT40_4], 16) | 4040 POW_SM(pPwrArray[ALL_TARGET_HT40_4], 16) |
4041 POW_SM(pPwrArray[ALL_TARGET_HT40_1_3_9_11_17_19], 8) | 4041 POW_SM(pPwrArray[ALL_TARGET_HT40_1_3_9_11_17_19], 8) |
@@ -4043,7 +4043,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
4043 ); 4043 );
4044 4044
4045 /* 6 (LSB), 7, 12, 13 (MSB) */ 4045 /* 6 (LSB), 7, 12, 13 (MSB) */
4046 REG_WRITE(ah, 0xa3dc, 4046 REG_WRITE(ah, AR_PHY_POWER_TX_RATE(7),
4047 POW_SM(pPwrArray[ALL_TARGET_HT40_13], 24) | 4047 POW_SM(pPwrArray[ALL_TARGET_HT40_13], 24) |
4048 POW_SM(pPwrArray[ALL_TARGET_HT40_12], 16) | 4048 POW_SM(pPwrArray[ALL_TARGET_HT40_12], 16) |
4049 POW_SM(pPwrArray[ALL_TARGET_HT40_7], 8) | 4049 POW_SM(pPwrArray[ALL_TARGET_HT40_7], 8) |
@@ -4051,7 +4051,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
4051 ); 4051 );
4052 4052
4053 /* 14 (LSB), 15, 20, 21 */ 4053 /* 14 (LSB), 15, 20, 21 */
4054 REG_WRITE(ah, 0xa3ec, 4054 REG_WRITE(ah, AR_PHY_POWER_TX_RATE(11),
4055 POW_SM(pPwrArray[ALL_TARGET_HT40_21], 24) | 4055 POW_SM(pPwrArray[ALL_TARGET_HT40_21], 24) |
4056 POW_SM(pPwrArray[ALL_TARGET_HT40_20], 16) | 4056 POW_SM(pPwrArray[ALL_TARGET_HT40_20], 16) |
4057 POW_SM(pPwrArray[ALL_TARGET_HT40_15], 8) | 4057 POW_SM(pPwrArray[ALL_TARGET_HT40_15], 8) |
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 06fb2c850535..7f5de6e4448b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -28,7 +28,67 @@
28 */ 28 */
29static void ar9003_hw_init_mode_regs(struct ath_hw *ah) 29static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
30{ 30{
31 if (AR_SREV_9485(ah)) { 31 if (AR_SREV_9485_11(ah)) {
32 /* mac */
33 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
34 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
35 ar9485_1_1_mac_core,
36 ARRAY_SIZE(ar9485_1_1_mac_core), 2);
37 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
38 ar9485_1_1_mac_postamble,
39 ARRAY_SIZE(ar9485_1_1_mac_postamble), 5);
40
41 /* bb */
42 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], ar9485_1_1,
43 ARRAY_SIZE(ar9485_1_1), 2);
44 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
45 ar9485_1_1_baseband_core,
46 ARRAY_SIZE(ar9485_1_1_baseband_core), 2);
47 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
48 ar9485_1_1_baseband_postamble,
49 ARRAY_SIZE(ar9485_1_1_baseband_postamble), 5);
50
51 /* radio */
52 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
53 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
54 ar9485_1_1_radio_core,
55 ARRAY_SIZE(ar9485_1_1_radio_core), 2);
56 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
57 ar9485_1_1_radio_postamble,
58 ARRAY_SIZE(ar9485_1_1_radio_postamble), 2);
59
60 /* soc */
61 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
62 ar9485_1_1_soc_preamble,
63 ARRAY_SIZE(ar9485_1_1_soc_preamble), 2);
64 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
65 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST], NULL, 0, 0);
66
67 /* rx/tx gain */
68 INIT_INI_ARRAY(&ah->iniModesRxGain,
69 ar9485_common_rx_gain_1_1,
70 ARRAY_SIZE(ar9485_common_rx_gain_1_1), 2);
71 INIT_INI_ARRAY(&ah->iniModesTxGain,
72 ar9485_modes_lowest_ob_db_tx_gain_1_1,
73 ARRAY_SIZE(ar9485_modes_lowest_ob_db_tx_gain_1_1),
74 5);
75
76 /* Load PCIE SERDES settings from INI */
77
78 /* Awake Setting */
79
80 INIT_INI_ARRAY(&ah->iniPcieSerdes,
81 ar9485_1_1_pcie_phy_clkreq_disable_L1,
82 ARRAY_SIZE(ar9485_1_1_pcie_phy_clkreq_disable_L1),
83 2);
84
85 /* Sleep Setting */
86
87 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
88 ar9485_1_1_pcie_phy_clkreq_disable_L1,
89 ARRAY_SIZE(ar9485_1_1_pcie_phy_clkreq_disable_L1),
90 2);
91 } else if (AR_SREV_9485(ah)) {
32 /* mac */ 92 /* mac */
33 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0); 93 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
34 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], 94 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -85,8 +145,8 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
85 /* Sleep Setting */ 145 /* Sleep Setting */
86 146
87 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 147 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
88 ar9485_1_0_pcie_phy_pll_on_clkreq_enable_L1, 148 ar9485_1_0_pcie_phy_pll_on_clkreq_disable_L1,
89 ARRAY_SIZE(ar9485_1_0_pcie_phy_pll_on_clkreq_enable_L1), 149 ARRAY_SIZE(ar9485_1_0_pcie_phy_pll_on_clkreq_disable_L1),
90 2); 150 2);
91 } else { 151 } else {
92 /* mac */ 152 /* mac */
@@ -163,7 +223,12 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
163 switch (ar9003_hw_get_tx_gain_idx(ah)) { 223 switch (ar9003_hw_get_tx_gain_idx(ah)) {
164 case 0: 224 case 0:
165 default: 225 default:
166 if (AR_SREV_9485(ah)) 226 if (AR_SREV_9485_11(ah))
227 INIT_INI_ARRAY(&ah->iniModesTxGain,
228 ar9485_modes_lowest_ob_db_tx_gain_1_1,
229 ARRAY_SIZE(ar9485_modes_lowest_ob_db_tx_gain_1_1),
230 5);
231 else if (AR_SREV_9485(ah))
167 INIT_INI_ARRAY(&ah->iniModesTxGain, 232 INIT_INI_ARRAY(&ah->iniModesTxGain,
168 ar9485Modes_lowest_ob_db_tx_gain_1_0, 233 ar9485Modes_lowest_ob_db_tx_gain_1_0,
169 ARRAY_SIZE(ar9485Modes_lowest_ob_db_tx_gain_1_0), 234 ARRAY_SIZE(ar9485Modes_lowest_ob_db_tx_gain_1_0),
@@ -175,10 +240,15 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
175 5); 240 5);
176 break; 241 break;
177 case 1: 242 case 1:
178 if (AR_SREV_9485(ah)) 243 if (AR_SREV_9485_11(ah))
244 INIT_INI_ARRAY(&ah->iniModesTxGain,
245 ar9485Modes_high_ob_db_tx_gain_1_1,
246 ARRAY_SIZE(ar9485Modes_high_ob_db_tx_gain_1_1),
247 5);
248 else if (AR_SREV_9485(ah))
179 INIT_INI_ARRAY(&ah->iniModesTxGain, 249 INIT_INI_ARRAY(&ah->iniModesTxGain,
180 ar9485Modes_high_ob_db_tx_gain_1_0, 250 ar9485Modes_high_ob_db_tx_gain_1_0,
181 ARRAY_SIZE(ar9485Modes_lowest_ob_db_tx_gain_1_0), 251 ARRAY_SIZE(ar9485Modes_high_ob_db_tx_gain_1_0),
182 5); 252 5);
183 else 253 else
184 INIT_INI_ARRAY(&ah->iniModesTxGain, 254 INIT_INI_ARRAY(&ah->iniModesTxGain,
@@ -187,10 +257,15 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
187 5); 257 5);
188 break; 258 break;
189 case 2: 259 case 2:
190 if (AR_SREV_9485(ah)) 260 if (AR_SREV_9485_11(ah))
261 INIT_INI_ARRAY(&ah->iniModesTxGain,
262 ar9485Modes_low_ob_db_tx_gain_1_1,
263 ARRAY_SIZE(ar9485Modes_low_ob_db_tx_gain_1_1),
264 5);
265 else if (AR_SREV_9485(ah))
191 INIT_INI_ARRAY(&ah->iniModesTxGain, 266 INIT_INI_ARRAY(&ah->iniModesTxGain,
192 ar9485Modes_low_ob_db_tx_gain_1_0, 267 ar9485Modes_low_ob_db_tx_gain_1_0,
193 ARRAY_SIZE(ar9485Modes_lowest_ob_db_tx_gain_1_0), 268 ARRAY_SIZE(ar9485Modes_low_ob_db_tx_gain_1_0),
194 5); 269 5);
195 else 270 else
196 INIT_INI_ARRAY(&ah->iniModesTxGain, 271 INIT_INI_ARRAY(&ah->iniModesTxGain,
@@ -199,7 +274,12 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
199 5); 274 5);
200 break; 275 break;
201 case 3: 276 case 3:
202 if (AR_SREV_9485(ah)) 277 if (AR_SREV_9485_11(ah))
278 INIT_INI_ARRAY(&ah->iniModesTxGain,
279 ar9485Modes_high_power_tx_gain_1_1,
280 ARRAY_SIZE(ar9485Modes_high_power_tx_gain_1_1),
281 5);
282 else if (AR_SREV_9485(ah))
203 INIT_INI_ARRAY(&ah->iniModesTxGain, 283 INIT_INI_ARRAY(&ah->iniModesTxGain,
204 ar9485Modes_high_power_tx_gain_1_0, 284 ar9485Modes_high_power_tx_gain_1_0,
205 ARRAY_SIZE(ar9485Modes_high_power_tx_gain_1_0), 285 ARRAY_SIZE(ar9485Modes_high_power_tx_gain_1_0),
@@ -218,7 +298,12 @@ static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
218 switch (ar9003_hw_get_rx_gain_idx(ah)) { 298 switch (ar9003_hw_get_rx_gain_idx(ah)) {
219 case 0: 299 case 0:
220 default: 300 default:
221 if (AR_SREV_9485(ah)) 301 if (AR_SREV_9485_11(ah))
302 INIT_INI_ARRAY(&ah->iniModesRxGain,
303 ar9485_common_rx_gain_1_1,
304 ARRAY_SIZE(ar9485_common_rx_gain_1_1),
305 2);
306 else if (AR_SREV_9485(ah))
222 INIT_INI_ARRAY(&ah->iniModesRxGain, 307 INIT_INI_ARRAY(&ah->iniModesRxGain,
223 ar9485Common_rx_gain_1_0, 308 ar9485Common_rx_gain_1_0,
224 ARRAY_SIZE(ar9485Common_rx_gain_1_0), 309 ARRAY_SIZE(ar9485Common_rx_gain_1_0),
@@ -230,7 +315,12 @@ static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
230 2); 315 2);
231 break; 316 break;
232 case 1: 317 case 1:
233 if (AR_SREV_9485(ah)) 318 if (AR_SREV_9485_11(ah))
319 INIT_INI_ARRAY(&ah->iniModesRxGain,
320 ar9485Common_wo_xlna_rx_gain_1_1,
321 ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_1),
322 2);
323 else if (AR_SREV_9485(ah))
234 INIT_INI_ARRAY(&ah->iniModesRxGain, 324 INIT_INI_ARRAY(&ah->iniModesRxGain,
235 ar9485Common_wo_xlna_rx_gain_1_0, 325 ar9485Common_wo_xlna_rx_gain_1_0,
236 ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_0), 326 ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_0),
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 4ceddbbdfcee..038a0cbfc6e7 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -615,7 +615,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
615 */ 615 */
616 if (rxsp->status11 & AR_CRCErr) 616 if (rxsp->status11 & AR_CRCErr)
617 rxs->rs_status |= ATH9K_RXERR_CRC; 617 rxs->rs_status |= ATH9K_RXERR_CRC;
618 if (rxsp->status11 & AR_PHYErr) { 618 else if (rxsp->status11 & AR_PHYErr) {
619 phyerr = MS(rxsp->status11, AR_PHYErrCode); 619 phyerr = MS(rxsp->status11, AR_PHYErrCode);
620 /* 620 /*
621 * If we reach a point here where AR_PostDelimCRCErr is 621 * If we reach a point here where AR_PostDelimCRCErr is
@@ -638,11 +638,11 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
638 rxs->rs_phyerr = phyerr; 638 rxs->rs_phyerr = phyerr;
639 } 639 }
640 640
641 } 641 } else if (rxsp->status11 & AR_DecryptCRCErr)
642 if (rxsp->status11 & AR_DecryptCRCErr)
643 rxs->rs_status |= ATH9K_RXERR_DECRYPT; 642 rxs->rs_status |= ATH9K_RXERR_DECRYPT;
644 if (rxsp->status11 & AR_MichaelErr) 643 else if (rxsp->status11 & AR_MichaelErr)
645 rxs->rs_status |= ATH9K_RXERR_MIC; 644 rxs->rs_status |= ATH9K_RXERR_MIC;
645
646 if (rxsp->status11 & AR_KeyMiss) 646 if (rxsp->status11 & AR_KeyMiss)
647 rxs->rs_status |= ATH9K_RXERR_DECRYPT; 647 rxs->rs_status |= ATH9K_RXERR_DECRYPT;
648 } 648 }
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 8d60f4f09acc..eb250d6b8038 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -1020,28 +1020,29 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
1020static void ar9003_hw_do_getnf(struct ath_hw *ah, 1020static void ar9003_hw_do_getnf(struct ath_hw *ah,
1021 int16_t nfarray[NUM_NF_READINGS]) 1021 int16_t nfarray[NUM_NF_READINGS])
1022{ 1022{
1023 int16_t nf; 1023#define AR_PHY_CH_MINCCA_PWR 0x1FF00000
1024 1024#define AR_PHY_CH_MINCCA_PWR_S 20
1025 nf = MS(REG_READ(ah, AR_PHY_CCA_0), AR_PHY_MINCCA_PWR); 1025#define AR_PHY_CH_EXT_MINCCA_PWR 0x01FF0000
1026 nfarray[0] = sign_extend32(nf, 8); 1026#define AR_PHY_CH_EXT_MINCCA_PWR_S 16
1027
1028 nf = MS(REG_READ(ah, AR_PHY_CCA_1), AR_PHY_CH1_MINCCA_PWR);
1029 nfarray[1] = sign_extend32(nf, 8);
1030 1027
1031 nf = MS(REG_READ(ah, AR_PHY_CCA_2), AR_PHY_CH2_MINCCA_PWR); 1028 int16_t nf;
1032 nfarray[2] = sign_extend32(nf, 8); 1029 int i;
1033
1034 if (!IS_CHAN_HT40(ah->curchan))
1035 return;
1036 1030
1037 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR); 1031 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
1038 nfarray[3] = sign_extend32(nf, 8); 1032 if (ah->rxchainmask & BIT(i)) {
1033 nf = MS(REG_READ(ah, ah->nf_regs[i]),
1034 AR_PHY_CH_MINCCA_PWR);
1035 nfarray[i] = sign_extend32(nf, 8);
1039 1036
1040 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_1), AR_PHY_CH1_EXT_MINCCA_PWR); 1037 if (IS_CHAN_HT40(ah->curchan)) {
1041 nfarray[4] = sign_extend32(nf, 8); 1038 u8 ext_idx = AR9300_MAX_CHAINS + i;
1042 1039
1043 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_2), AR_PHY_CH2_EXT_MINCCA_PWR); 1040 nf = MS(REG_READ(ah, ah->nf_regs[ext_idx]),
1044 nfarray[5] = sign_extend32(nf, 8); 1041 AR_PHY_CH_EXT_MINCCA_PWR);
1042 nfarray[ext_idx] = sign_extend32(nf, 8);
1043 }
1044 }
1045 }
1045} 1046}
1046 1047
1047static void ar9003_hw_set_nf_limits(struct ath_hw *ah) 1048static void ar9003_hw_set_nf_limits(struct ath_hw *ah)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 59bab6bd8a74..8bdda2cf9dd7 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -486,6 +486,8 @@
486#define AR_PHY_HEAVYCLIP_40 (AR_SM_BASE + 0x1ac) 486#define AR_PHY_HEAVYCLIP_40 (AR_SM_BASE + 0x1ac)
487#define AR_PHY_ILLEGAL_TXRATE (AR_SM_BASE + 0x1b0) 487#define AR_PHY_ILLEGAL_TXRATE (AR_SM_BASE + 0x1b0)
488 488
489#define AR_PHY_POWER_TX_RATE(_d) (AR_SM_BASE + 0x1c0 + ((_d) << 2))
490
489#define AR_PHY_PWRTX_MAX (AR_SM_BASE + 0x1f0) 491#define AR_PHY_PWRTX_MAX (AR_SM_BASE + 0x1f0)
490#define AR_PHY_POWER_TX_SUB (AR_SM_BASE + 0x1f4) 492#define AR_PHY_POWER_TX_SUB (AR_SM_BASE + 0x1f4)
491 493
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index 70de3d89a7b5..71cc0a3a29fb 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -667,6 +667,7 @@ static const u32 ar9485_1_0_pcie_phy_clkreq_enable_L1[][2] = {
667 667
668static const u32 ar9485_1_0_soc_preamble[][2] = { 668static const u32 ar9485_1_0_soc_preamble[][2] = {
669 /* Addr allmodes */ 669 /* Addr allmodes */
670 {0x00004090, 0x00aa10aa},
670 {0x000040a4, 0x00a0c9c9}, 671 {0x000040a4, 0x00a0c9c9},
671 {0x00007048, 0x00000004}, 672 {0x00007048, 0x00000004},
672}; 673};
@@ -940,4 +941,1146 @@ static const u32 ar9485_1_0_mac_core[][2] = {
940 {0x000083cc, 0x00000200}, 941 {0x000083cc, 0x00000200},
941 {0x000083d0, 0x000301ff}, 942 {0x000083d0, 0x000301ff},
942}; 943};
944
945static const u32 ar9485_1_1_mac_core[][2] = {
946 /* Addr allmodes */
947 {0x00000008, 0x00000000},
948 {0x00000030, 0x00020085},
949 {0x00000034, 0x00000005},
950 {0x00000040, 0x00000000},
951 {0x00000044, 0x00000000},
952 {0x00000048, 0x00000008},
953 {0x0000004c, 0x00000010},
954 {0x00000050, 0x00000000},
955 {0x00001040, 0x002ffc0f},
956 {0x00001044, 0x002ffc0f},
957 {0x00001048, 0x002ffc0f},
958 {0x0000104c, 0x002ffc0f},
959 {0x00001050, 0x002ffc0f},
960 {0x00001054, 0x002ffc0f},
961 {0x00001058, 0x002ffc0f},
962 {0x0000105c, 0x002ffc0f},
963 {0x00001060, 0x002ffc0f},
964 {0x00001064, 0x002ffc0f},
965 {0x000010f0, 0x00000100},
966 {0x00001270, 0x00000000},
967 {0x000012b0, 0x00000000},
968 {0x000012f0, 0x00000000},
969 {0x0000143c, 0x00000000},
970 {0x0000147c, 0x00000000},
971 {0x00008000, 0x00000000},
972 {0x00008004, 0x00000000},
973 {0x00008008, 0x00000000},
974 {0x0000800c, 0x00000000},
975 {0x00008018, 0x00000000},
976 {0x00008020, 0x00000000},
977 {0x00008038, 0x00000000},
978 {0x0000803c, 0x00000000},
979 {0x00008040, 0x00000000},
980 {0x00008044, 0x00000000},
981 {0x00008048, 0x00000000},
982 {0x0000804c, 0xffffffff},
983 {0x00008054, 0x00000000},
984 {0x00008058, 0x00000000},
985 {0x0000805c, 0x000fc78f},
986 {0x00008060, 0x0000000f},
987 {0x00008064, 0x00000000},
988 {0x00008070, 0x00000310},
989 {0x00008074, 0x00000020},
990 {0x00008078, 0x00000000},
991 {0x0000809c, 0x0000000f},
992 {0x000080a0, 0x00000000},
993 {0x000080a4, 0x02ff0000},
994 {0x000080a8, 0x0e070605},
995 {0x000080ac, 0x0000000d},
996 {0x000080b0, 0x00000000},
997 {0x000080b4, 0x00000000},
998 {0x000080b8, 0x00000000},
999 {0x000080bc, 0x00000000},
1000 {0x000080c0, 0x2a800000},
1001 {0x000080c4, 0x06900168},
1002 {0x000080c8, 0x13881c22},
1003 {0x000080cc, 0x01f40000},
1004 {0x000080d0, 0x00252500},
1005 {0x000080d4, 0x00a00000},
1006 {0x000080d8, 0x00400000},
1007 {0x000080dc, 0x00000000},
1008 {0x000080e0, 0xffffffff},
1009 {0x000080e4, 0x0000ffff},
1010 {0x000080e8, 0x3f3f3f3f},
1011 {0x000080ec, 0x00000000},
1012 {0x000080f0, 0x00000000},
1013 {0x000080f4, 0x00000000},
1014 {0x000080fc, 0x00020000},
1015 {0x00008100, 0x00000000},
1016 {0x00008108, 0x00000052},
1017 {0x0000810c, 0x00000000},
1018 {0x00008110, 0x00000000},
1019 {0x00008114, 0x000007ff},
1020 {0x00008118, 0x000000aa},
1021 {0x0000811c, 0x00003210},
1022 {0x00008124, 0x00000000},
1023 {0x00008128, 0x00000000},
1024 {0x0000812c, 0x00000000},
1025 {0x00008130, 0x00000000},
1026 {0x00008134, 0x00000000},
1027 {0x00008138, 0x00000000},
1028 {0x0000813c, 0x0000ffff},
1029 {0x00008144, 0xffffffff},
1030 {0x00008168, 0x00000000},
1031 {0x0000816c, 0x00000000},
1032 {0x00008170, 0x18486200},
1033 {0x00008174, 0x33332210},
1034 {0x00008178, 0x00000000},
1035 {0x0000817c, 0x00020000},
1036 {0x000081c0, 0x00000000},
1037 {0x000081c4, 0x33332210},
1038 {0x000081d4, 0x00000000},
1039 {0x000081ec, 0x00000000},
1040 {0x000081f0, 0x00000000},
1041 {0x000081f4, 0x00000000},
1042 {0x000081f8, 0x00000000},
1043 {0x000081fc, 0x00000000},
1044 {0x00008240, 0x00100000},
1045 {0x00008244, 0x0010f400},
1046 {0x00008248, 0x00000800},
1047 {0x0000824c, 0x0001e800},
1048 {0x00008250, 0x00000000},
1049 {0x00008254, 0x00000000},
1050 {0x00008258, 0x00000000},
1051 {0x0000825c, 0x40000000},
1052 {0x00008260, 0x00080922},
1053 {0x00008264, 0x9ca00010},
1054 {0x00008268, 0xffffffff},
1055 {0x0000826c, 0x0000ffff},
1056 {0x00008270, 0x00000000},
1057 {0x00008274, 0x40000000},
1058 {0x00008278, 0x003e4180},
1059 {0x0000827c, 0x00000004},
1060 {0x00008284, 0x0000002c},
1061 {0x00008288, 0x0000002c},
1062 {0x0000828c, 0x000000ff},
1063 {0x00008294, 0x00000000},
1064 {0x00008298, 0x00000000},
1065 {0x0000829c, 0x00000000},
1066 {0x00008300, 0x00000140},
1067 {0x00008314, 0x00000000},
1068 {0x0000831c, 0x0000010d},
1069 {0x00008328, 0x00000000},
1070 {0x0000832c, 0x00000007},
1071 {0x00008330, 0x00000302},
1072 {0x00008334, 0x00000700},
1073 {0x00008338, 0x00ff0000},
1074 {0x0000833c, 0x02400000},
1075 {0x00008340, 0x000107ff},
1076 {0x00008344, 0xa248105b},
1077 {0x00008348, 0x008f0000},
1078 {0x0000835c, 0x00000000},
1079 {0x00008360, 0xffffffff},
1080 {0x00008364, 0xffffffff},
1081 {0x00008368, 0x00000000},
1082 {0x00008370, 0x00000000},
1083 {0x00008374, 0x000000ff},
1084 {0x00008378, 0x00000000},
1085 {0x0000837c, 0x00000000},
1086 {0x00008380, 0xffffffff},
1087 {0x00008384, 0xffffffff},
1088 {0x00008390, 0xffffffff},
1089 {0x00008394, 0xffffffff},
1090 {0x00008398, 0x00000000},
1091 {0x0000839c, 0x00000000},
1092 {0x000083a0, 0x00000000},
1093 {0x000083a4, 0x0000fa14},
1094 {0x000083a8, 0x000f0c00},
1095 {0x000083ac, 0x33332210},
1096 {0x000083b0, 0x33332210},
1097 {0x000083b4, 0x33332210},
1098 {0x000083b8, 0x33332210},
1099 {0x000083bc, 0x00000000},
1100 {0x000083c0, 0x00000000},
1101 {0x000083c4, 0x00000000},
1102 {0x000083c8, 0x00000000},
1103 {0x000083cc, 0x00000200},
1104 {0x000083d0, 0x000301ff},
1105};
1106
1107static const u32 ar9485_1_1_baseband_core[][2] = {
1108 /* Addr allmodes */
1109 {0x00009800, 0xafe68e30},
1110 {0x00009804, 0xfd14e000},
1111 {0x00009808, 0x9c0a8f6b},
1112 {0x0000980c, 0x04800000},
1113 {0x00009814, 0x9280c00a},
1114 {0x00009818, 0x00000000},
1115 {0x0000981c, 0x00020028},
1116 {0x00009834, 0x5f3ca3de},
1117 {0x00009838, 0x0108ecff},
1118 {0x0000983c, 0x14750600},
1119 {0x00009880, 0x201fff00},
1120 {0x00009884, 0x00001042},
1121 {0x000098a4, 0x00200400},
1122 {0x000098b0, 0x52440bbe},
1123 {0x000098d0, 0x004b6a8e},
1124 {0x000098d4, 0x00000820},
1125 {0x000098dc, 0x00000000},
1126 {0x000098f0, 0x00000000},
1127 {0x000098f4, 0x00000000},
1128 {0x00009c04, 0x00000000},
1129 {0x00009c08, 0x03200000},
1130 {0x00009c0c, 0x00000000},
1131 {0x00009c10, 0x00000000},
1132 {0x00009c14, 0x00046384},
1133 {0x00009c18, 0x05b6b440},
1134 {0x00009c1c, 0x00b6b440},
1135 {0x00009d00, 0xc080a333},
1136 {0x00009d04, 0x40206c10},
1137 {0x00009d08, 0x009c4060},
1138 {0x00009d0c, 0x1883800a},
1139 {0x00009d10, 0x01834061},
1140 {0x00009d14, 0x00c00400},
1141 {0x00009d18, 0x00000000},
1142 {0x00009d1c, 0x00000000},
1143 {0x00009e08, 0x0038233c},
1144 {0x00009e24, 0x9927b515},
1145 {0x00009e28, 0x12ef0200},
1146 {0x00009e30, 0x06336f77},
1147 {0x00009e34, 0x6af6532f},
1148 {0x00009e38, 0x0cc80c00},
1149 {0x00009e40, 0x0d261820},
1150 {0x00009e4c, 0x00001004},
1151 {0x00009e50, 0x00ff03f1},
1152 {0x00009fc0, 0x80be4788},
1153 {0x00009fc4, 0x0001efb5},
1154 {0x00009fcc, 0x40000014},
1155 {0x0000a20c, 0x00000000},
1156 {0x0000a210, 0x00000000},
1157 {0x0000a220, 0x00000000},
1158 {0x0000a224, 0x00000000},
1159 {0x0000a228, 0x10002310},
1160 {0x0000a23c, 0x00000000},
1161 {0x0000a244, 0x0c000000},
1162 {0x0000a2a0, 0x00000001},
1163 {0x0000a2c0, 0x00000001},
1164 {0x0000a2c8, 0x00000000},
1165 {0x0000a2cc, 0x18c43433},
1166 {0x0000a2d4, 0x00000000},
1167 {0x0000a2dc, 0x00000000},
1168 {0x0000a2e0, 0x00000000},
1169 {0x0000a2e4, 0x00000000},
1170 {0x0000a2e8, 0x00000000},
1171 {0x0000a2ec, 0x00000000},
1172 {0x0000a2f0, 0x00000000},
1173 {0x0000a2f4, 0x00000000},
1174 {0x0000a2f8, 0x00000000},
1175 {0x0000a344, 0x00000000},
1176 {0x0000a34c, 0x00000000},
1177 {0x0000a350, 0x0000a000},
1178 {0x0000a364, 0x00000000},
1179 {0x0000a370, 0x00000000},
1180 {0x0000a390, 0x00000001},
1181 {0x0000a394, 0x00000444},
1182 {0x0000a398, 0x001f0e0f},
1183 {0x0000a39c, 0x0075393f},
1184 {0x0000a3a0, 0xb79f6427},
1185 {0x0000a3a4, 0x000000ff},
1186 {0x0000a3a8, 0x3b3b3b3b},
1187 {0x0000a3ac, 0x2f2f2f2f},
1188 {0x0000a3c0, 0x20202020},
1189 {0x0000a3c4, 0x22222220},
1190 {0x0000a3c8, 0x20200020},
1191 {0x0000a3cc, 0x20202020},
1192 {0x0000a3d0, 0x20202020},
1193 {0x0000a3d4, 0x20202020},
1194 {0x0000a3d8, 0x20202020},
1195 {0x0000a3dc, 0x20202020},
1196 {0x0000a3e0, 0x20202020},
1197 {0x0000a3e4, 0x20202020},
1198 {0x0000a3e8, 0x20202020},
1199 {0x0000a3ec, 0x20202020},
1200 {0x0000a3f0, 0x00000000},
1201 {0x0000a3f4, 0x00000006},
1202 {0x0000a3f8, 0x0cdbd380},
1203 {0x0000a3fc, 0x000f0f01},
1204 {0x0000a400, 0x8fa91f01},
1205 {0x0000a404, 0x00000000},
1206 {0x0000a408, 0x0e79e5c6},
1207 {0x0000a40c, 0x00820820},
1208 {0x0000a414, 0x1ce739cf},
1209 {0x0000a418, 0x2d0019ce},
1210 {0x0000a41c, 0x1ce739ce},
1211 {0x0000a420, 0x000001ce},
1212 {0x0000a424, 0x1ce739ce},
1213 {0x0000a428, 0x000001ce},
1214 {0x0000a42c, 0x1ce739ce},
1215 {0x0000a430, 0x1ce739ce},
1216 {0x0000a434, 0x00000000},
1217 {0x0000a438, 0x00001801},
1218 {0x0000a43c, 0x00000000},
1219 {0x0000a440, 0x00000000},
1220 {0x0000a444, 0x00000000},
1221 {0x0000a448, 0x04000000},
1222 {0x0000a44c, 0x00000001},
1223 {0x0000a450, 0x00010000},
1224 {0x0000a5c4, 0xbfad9d74},
1225 {0x0000a5c8, 0x0048060a},
1226 {0x0000a5cc, 0x00000637},
1227 {0x0000a760, 0x03020100},
1228 {0x0000a764, 0x09080504},
1229 {0x0000a768, 0x0d0c0b0a},
1230 {0x0000a76c, 0x13121110},
1231 {0x0000a770, 0x31301514},
1232 {0x0000a774, 0x35343332},
1233 {0x0000a778, 0x00000036},
1234 {0x0000a780, 0x00000838},
1235 {0x0000a7c0, 0x00000000},
1236 {0x0000a7c4, 0xfffffffc},
1237 {0x0000a7c8, 0x00000000},
1238 {0x0000a7cc, 0x00000000},
1239 {0x0000a7d0, 0x00000000},
1240 {0x0000a7d4, 0x00000004},
1241 {0x0000a7dc, 0x00000000},
1242};
1243
1244static const u32 ar9485Common_1_1[][2] = {
1245 /* Addr allmodes */
1246 {0x00007010, 0x00000022},
1247 {0x00007020, 0x00000000},
1248 {0x00007034, 0x00000002},
1249 {0x00007038, 0x000004c2},
1250};
1251
1252static const u32 ar9485_1_1_baseband_postamble[][5] = {
1253 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1254 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
1255 {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
1256 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
1257 {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
1258 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
1259 {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
1260 {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
1261 {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
1262 {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
1263 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
1264 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
1265 {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
1266 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1267 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
1268 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
1269 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
1270 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
1271 {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
1272 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
1273 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
1274 {0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0},
1275 {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
1276 {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
1277 {0x0000a234, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff},
1278 {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
1279 {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
1280 {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
1281 {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
1282 {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
1283 {0x0000a260, 0x3a021501, 0x3a021501, 0x3a021501, 0x3a021501},
1284 {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
1285 {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
1286 {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
1287 {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1288 {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1289 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
1290 {0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982},
1291 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
1292 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1293 {0x0000be04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
1294 {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1295};
1296
1297static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
1298 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1299 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
1300 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
1301 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1302 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
1303 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
1304 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
1305 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
1306 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
1307 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
1308 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
1309 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
1310 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
1311 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
1312 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
1313 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
1314 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
1315 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
1316 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
1317 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
1318 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
1319 {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
1320 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
1321 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
1322 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
1323 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
1324 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb},
1325 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
1326 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
1327 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
1328 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1329 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1330 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1331 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1332 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1333 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1334 {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1335 {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1336 {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1337 {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1338 {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1339 {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1340 {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1341 {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1342 {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1343 {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1344 {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1345 {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1346 {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1347 {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1348 {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1349 {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1350 {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1351 {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1352 {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1353 {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1354 {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1355 {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1356 {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1357 {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1358 {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1359 {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1360 {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1361 {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1362 {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1363 {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1364 {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1365 {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1366 {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
1367 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
1368};
1369
1370static const u32 ar9485_modes_lowest_ob_db_tx_gain_1_1[][5] = {
1371 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1372 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
1373 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
1374 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1375 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
1376 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
1377 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
1378 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
1379 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
1380 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
1381 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
1382 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
1383 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
1384 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
1385 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
1386 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
1387 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
1388 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
1389 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
1390 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
1391 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
1392 {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
1393 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
1394 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
1395 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
1396 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
1397 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb},
1398 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
1399 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
1400 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
1401 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1402 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1403 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1404 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1405 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1406 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1407 {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1408 {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1409 {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1410 {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1411 {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1412 {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1413 {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1414 {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1415 {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1416 {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1417 {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1418 {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1419 {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1420 {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1421 {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1422 {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1423 {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1424 {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1425 {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1426 {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1427 {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1428 {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1429 {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1430 {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1431 {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1432 {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1433 {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1434 {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1435 {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1436 {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1437 {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1438 {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1439 {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
1440 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
1441};
1442
1443static const u32 ar9485_1_1_radio_postamble[][2] = {
1444 /* Addr allmodes */
1445 {0x0001609c, 0x0b283f31},
1446 {0x000160ac, 0x24611800},
1447 {0x000160b0, 0x03284f3e},
1448 {0x0001610c, 0x00170000},
1449 {0x00016140, 0x10804008},
1450};
1451
1452static const u32 ar9485_1_1_mac_postamble[][5] = {
1453 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1454 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
1455 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
1456 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
1457 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
1458 {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
1459 {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
1460 {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
1461 {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
1462};
1463
1464static const u32 ar9485_1_1_radio_core[][2] = {
1465 /* Addr allmodes */
1466 {0x00016000, 0x36db6db6},
1467 {0x00016004, 0x6db6db40},
1468 {0x00016008, 0x73800000},
1469 {0x0001600c, 0x00000000},
1470 {0x00016040, 0x7f80fff8},
1471 {0x0001604c, 0x000f0278},
1472 {0x00016050, 0x4db6db8c},
1473 {0x00016054, 0x6db60000},
1474 {0x00016080, 0x00080000},
1475 {0x00016084, 0x0e48048c},
1476 {0x00016088, 0x14214514},
1477 {0x0001608c, 0x119f081e},
1478 {0x00016090, 0x24926490},
1479 {0x00016098, 0xd28b3330},
1480 {0x000160a0, 0xc2108ffe},
1481 {0x000160a4, 0x812fc370},
1482 {0x000160a8, 0x423c8000},
1483 {0x000160b4, 0x92480040},
1484 {0x000160c0, 0x006db6db},
1485 {0x000160c4, 0x0186db60},
1486 {0x000160c8, 0x6db6db6c},
1487 {0x000160cc, 0x6de6fbe0},
1488 {0x000160d0, 0xf7dfcf3c},
1489 {0x00016100, 0x04cb0001},
1490 {0x00016104, 0xfff80015},
1491 {0x00016108, 0x00080010},
1492 {0x00016144, 0x01884080},
1493 {0x00016148, 0x00008040},
1494 {0x00016240, 0x08400000},
1495 {0x00016244, 0x1bf90f00},
1496 {0x00016248, 0x00000000},
1497 {0x0001624c, 0x00000000},
1498 {0x00016280, 0x01000015},
1499 {0x00016284, 0x00d30000},
1500 {0x00016288, 0x00318000},
1501 {0x0001628c, 0x50000000},
1502 {0x00016290, 0x4b96210f},
1503 {0x00016380, 0x00000000},
1504 {0x00016384, 0x00000000},
1505 {0x00016388, 0x00800700},
1506 {0x0001638c, 0x00800700},
1507 {0x00016390, 0x00800700},
1508 {0x00016394, 0x00000000},
1509 {0x00016398, 0x00000000},
1510 {0x0001639c, 0x00000000},
1511 {0x000163a0, 0x00000001},
1512 {0x000163a4, 0x00000001},
1513 {0x000163a8, 0x00000000},
1514 {0x000163ac, 0x00000000},
1515 {0x000163b0, 0x00000000},
1516 {0x000163b4, 0x00000000},
1517 {0x000163b8, 0x00000000},
1518 {0x000163bc, 0x00000000},
1519 {0x000163c0, 0x000000a0},
1520 {0x000163c4, 0x000c0000},
1521 {0x000163c8, 0x14021402},
1522 {0x000163cc, 0x00001402},
1523 {0x000163d0, 0x00000000},
1524 {0x000163d4, 0x00000000},
1525 {0x00016c40, 0x13188278},
1526 {0x00016c44, 0x12000000},
1527};
1528
1529static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_enable_L1[][2] = {
1530 /* Addr allmodes */
1531 {0x00018c00, 0x10052e5e},
1532 {0x00018c04, 0x000801d8},
1533 {0x00018c08, 0x0000080c},
1534};
1535
1536static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
1537 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1538 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
1539 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
1540 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1541 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
1542 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
1543 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
1544 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
1545 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
1546 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
1547 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
1548 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
1549 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
1550 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
1551 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
1552 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
1553 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
1554 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
1555 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
1556 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
1557 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
1558 {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
1559 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
1560 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
1561 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
1562 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
1563 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb},
1564 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
1565 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
1566 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
1567 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1568 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1569 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1570 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1571 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1572 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1573 {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1574 {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1575 {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1576 {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1577 {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1578 {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1579 {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1580 {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1581 {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1582 {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1583 {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1584 {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1585 {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1586 {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1587 {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1588 {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1589 {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1590 {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1591 {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1592 {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1593 {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1594 {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1595 {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1596 {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1597 {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1598 {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1599 {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1600 {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1601 {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1602 {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1603 {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1604 {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1605 {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
1606 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
1607};
1608
1609static const u32 ar9485_1_1[][2] = {
1610 /* Addr allmodes */
1611 {0x0000a580, 0x00000000},
1612 {0x0000a584, 0x00000000},
1613 {0x0000a588, 0x00000000},
1614 {0x0000a58c, 0x00000000},
1615 {0x0000a590, 0x00000000},
1616 {0x0000a594, 0x00000000},
1617 {0x0000a598, 0x00000000},
1618 {0x0000a59c, 0x00000000},
1619 {0x0000a5a0, 0x00000000},
1620 {0x0000a5a4, 0x00000000},
1621 {0x0000a5a8, 0x00000000},
1622 {0x0000a5ac, 0x00000000},
1623 {0x0000a5b0, 0x00000000},
1624 {0x0000a5b4, 0x00000000},
1625 {0x0000a5b8, 0x00000000},
1626 {0x0000a5bc, 0x00000000},
1627};
1628
1629static const u32 ar9485_modes_green_ob_db_tx_gain_1_1[][5] = {
1630 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1631 {0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
1632 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
1633 {0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
1634 {0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006},
1635 {0x0000a504, 0x05062002, 0x05062002, 0x03000201, 0x03000201},
1636 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x06000203, 0x06000203},
1637 {0x0000a50c, 0x11062202, 0x11062202, 0x0a000401, 0x0a000401},
1638 {0x0000a510, 0x17022e00, 0x17022e00, 0x0e000403, 0x0e000403},
1639 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x12000405, 0x12000405},
1640 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x15000604, 0x15000604},
1641 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x18000605, 0x18000605},
1642 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x1c000a04, 0x1c000a04},
1643 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x21000a06, 0x21000a06},
1644 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x29000a24, 0x29000a24},
1645 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2f000e21, 0x2f000e21},
1646 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x31000e20, 0x31000e20},
1647 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x33000e20, 0x33000e20},
1648 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
1649 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
1650 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
1651 {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
1652 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
1653 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
1654 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
1655 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
1656 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb},
1657 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
1658 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
1659 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
1660 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1661 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1662 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1663 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1664 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1665 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1666 {0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
1667 {0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
1668 {0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
1669 {0x0000b50c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
1670 {0x0000b510, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
1671 {0x0000b514, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
1672 {0x0000b518, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
1673 {0x0000b51c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
1674 {0x0000b520, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
1675 {0x0000b524, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
1676 {0x0000b528, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
1677 {0x0000b52c, 0x0000002a, 0x0000002a, 0x0000002a, 0x0000002a},
1678 {0x0000b530, 0x0000003a, 0x0000003a, 0x0000003a, 0x0000003a},
1679 {0x0000b534, 0x0000004a, 0x0000004a, 0x0000004a, 0x0000004a},
1680 {0x0000b538, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1681 {0x0000b53c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1682 {0x0000b540, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1683 {0x0000b544, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1684 {0x0000b548, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1685 {0x0000b54c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1686 {0x0000b550, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1687 {0x0000b554, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1688 {0x0000b558, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1689 {0x0000b55c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1690 {0x0000b560, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1691 {0x0000b564, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1692 {0x0000b568, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1693 {0x0000b56c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1694 {0x0000b570, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1695 {0x0000b574, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1696 {0x0000b578, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1697 {0x0000b57c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1698 {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
1699 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
1700};
1701
1702static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = {
1703 /* Addr allmodes */
1704 {0x00018c00, 0x10013e5e},
1705 {0x00018c04, 0x000801d8},
1706 {0x00018c08, 0x0000080c},
1707};
1708
1709static const u32 ar9485_1_1_soc_preamble[][2] = {
1710 /* Addr allmodes */
1711 {0x00004014, 0xba280400},
1712 {0x00004090, 0x00aa10aa},
1713 {0x000040a4, 0x00a0c9c9},
1714 {0x00007010, 0x00000022},
1715 {0x00007020, 0x00000000},
1716 {0x00007034, 0x00000002},
1717 {0x00007038, 0x000004c2},
1718 {0x00007048, 0x00000002},
1719};
1720
1721static const u32 ar9485_1_1_baseband_core_txfir_coeff_japan_2484[][2] = {
1722 /* Addr allmodes */
1723 {0x0000a398, 0x00000000},
1724 {0x0000a39c, 0x6f7f0301},
1725 {0x0000a3a0, 0xca9228ee},
1726};
1727
1728static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
1729 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1730 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
1731 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
1732 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1733 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
1734 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
1735 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
1736 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
1737 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
1738 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
1739 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
1740 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
1741 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
1742 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
1743 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
1744 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
1745 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
1746 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
1747 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
1748 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
1749 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
1750 {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
1751 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
1752 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
1753 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
1754 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
1755 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb},
1756 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
1757 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
1758 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
1759 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1760 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1761 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1762 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1763 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1764 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1765 {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1766 {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1767 {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1768 {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1769 {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1770 {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1771 {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1772 {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1773 {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1774 {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1775 {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1776 {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1777 {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1778 {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1779 {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1780 {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1781 {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1782 {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1783 {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1784 {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1785 {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1786 {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1787 {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1788 {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1789 {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1790 {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1791 {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1792 {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1793 {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1794 {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1795 {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1796 {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1797 {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
1798 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
1799};
1800
1801static const u32 ar9485_fast_clock_1_1_baseband_postamble[][3] = {
1802 /* Addr 5G_HT2 5G_HT40 */
1803 {0x00009e00, 0x03721821, 0x03721821},
1804 {0x0000a230, 0x0000400b, 0x00004016},
1805 {0x0000a254, 0x00000898, 0x00001130},
1806};
1807
1808static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
1809 /* Addr allmodes */
1810 {0x00018c00, 0x10012e5e},
1811 {0x00018c04, 0x000801d8},
1812 {0x00018c08, 0x0000080c},
1813};
1814
1815static const u32 ar9485_common_rx_gain_1_1[][2] = {
1816 /* Addr allmodes */
1817 {0x0000a000, 0x00010000},
1818 {0x0000a004, 0x00030002},
1819 {0x0000a008, 0x00050004},
1820 {0x0000a00c, 0x00810080},
1821 {0x0000a010, 0x01800082},
1822 {0x0000a014, 0x01820181},
1823 {0x0000a018, 0x01840183},
1824 {0x0000a01c, 0x01880185},
1825 {0x0000a020, 0x018a0189},
1826 {0x0000a024, 0x02850284},
1827 {0x0000a028, 0x02890288},
1828 {0x0000a02c, 0x03850384},
1829 {0x0000a030, 0x03890388},
1830 {0x0000a034, 0x038b038a},
1831 {0x0000a038, 0x038d038c},
1832 {0x0000a03c, 0x03910390},
1833 {0x0000a040, 0x03930392},
1834 {0x0000a044, 0x03950394},
1835 {0x0000a048, 0x00000396},
1836 {0x0000a04c, 0x00000000},
1837 {0x0000a050, 0x00000000},
1838 {0x0000a054, 0x00000000},
1839 {0x0000a058, 0x00000000},
1840 {0x0000a05c, 0x00000000},
1841 {0x0000a060, 0x00000000},
1842 {0x0000a064, 0x00000000},
1843 {0x0000a068, 0x00000000},
1844 {0x0000a06c, 0x00000000},
1845 {0x0000a070, 0x00000000},
1846 {0x0000a074, 0x00000000},
1847 {0x0000a078, 0x00000000},
1848 {0x0000a07c, 0x00000000},
1849 {0x0000a080, 0x28282828},
1850 {0x0000a084, 0x28282828},
1851 {0x0000a088, 0x28282828},
1852 {0x0000a08c, 0x28282828},
1853 {0x0000a090, 0x28282828},
1854 {0x0000a094, 0x21212128},
1855 {0x0000a098, 0x171c1c1c},
1856 {0x0000a09c, 0x02020212},
1857 {0x0000a0a0, 0x00000202},
1858 {0x0000a0a4, 0x00000000},
1859 {0x0000a0a8, 0x00000000},
1860 {0x0000a0ac, 0x00000000},
1861 {0x0000a0b0, 0x00000000},
1862 {0x0000a0b4, 0x00000000},
1863 {0x0000a0b8, 0x00000000},
1864 {0x0000a0bc, 0x00000000},
1865 {0x0000a0c0, 0x001f0000},
1866 {0x0000a0c4, 0x111f1100},
1867 {0x0000a0c8, 0x111d111e},
1868 {0x0000a0cc, 0x111b111c},
1869 {0x0000a0d0, 0x22032204},
1870 {0x0000a0d4, 0x22012202},
1871 {0x0000a0d8, 0x221f2200},
1872 {0x0000a0dc, 0x221d221e},
1873 {0x0000a0e0, 0x33013302},
1874 {0x0000a0e4, 0x331f3300},
1875 {0x0000a0e8, 0x4402331e},
1876 {0x0000a0ec, 0x44004401},
1877 {0x0000a0f0, 0x441e441f},
1878 {0x0000a0f4, 0x55015502},
1879 {0x0000a0f8, 0x551f5500},
1880 {0x0000a0fc, 0x6602551e},
1881 {0x0000a100, 0x66006601},
1882 {0x0000a104, 0x661e661f},
1883 {0x0000a108, 0x7703661d},
1884 {0x0000a10c, 0x77017702},
1885 {0x0000a110, 0x00007700},
1886 {0x0000a114, 0x00000000},
1887 {0x0000a118, 0x00000000},
1888 {0x0000a11c, 0x00000000},
1889 {0x0000a120, 0x00000000},
1890 {0x0000a124, 0x00000000},
1891 {0x0000a128, 0x00000000},
1892 {0x0000a12c, 0x00000000},
1893 {0x0000a130, 0x00000000},
1894 {0x0000a134, 0x00000000},
1895 {0x0000a138, 0x00000000},
1896 {0x0000a13c, 0x00000000},
1897 {0x0000a140, 0x001f0000},
1898 {0x0000a144, 0x111f1100},
1899 {0x0000a148, 0x111d111e},
1900 {0x0000a14c, 0x111b111c},
1901 {0x0000a150, 0x22032204},
1902 {0x0000a154, 0x22012202},
1903 {0x0000a158, 0x221f2200},
1904 {0x0000a15c, 0x221d221e},
1905 {0x0000a160, 0x33013302},
1906 {0x0000a164, 0x331f3300},
1907 {0x0000a168, 0x4402331e},
1908 {0x0000a16c, 0x44004401},
1909 {0x0000a170, 0x441e441f},
1910 {0x0000a174, 0x55015502},
1911 {0x0000a178, 0x551f5500},
1912 {0x0000a17c, 0x6602551e},
1913 {0x0000a180, 0x66006601},
1914 {0x0000a184, 0x661e661f},
1915 {0x0000a188, 0x7703661d},
1916 {0x0000a18c, 0x77017702},
1917 {0x0000a190, 0x00007700},
1918 {0x0000a194, 0x00000000},
1919 {0x0000a198, 0x00000000},
1920 {0x0000a19c, 0x00000000},
1921 {0x0000a1a0, 0x00000000},
1922 {0x0000a1a4, 0x00000000},
1923 {0x0000a1a8, 0x00000000},
1924 {0x0000a1ac, 0x00000000},
1925 {0x0000a1b0, 0x00000000},
1926 {0x0000a1b4, 0x00000000},
1927 {0x0000a1b8, 0x00000000},
1928 {0x0000a1bc, 0x00000000},
1929 {0x0000a1c0, 0x00000000},
1930 {0x0000a1c4, 0x00000000},
1931 {0x0000a1c8, 0x00000000},
1932 {0x0000a1cc, 0x00000000},
1933 {0x0000a1d0, 0x00000000},
1934 {0x0000a1d4, 0x00000000},
1935 {0x0000a1d8, 0x00000000},
1936 {0x0000a1dc, 0x00000000},
1937 {0x0000a1e0, 0x00000000},
1938 {0x0000a1e4, 0x00000000},
1939 {0x0000a1e8, 0x00000000},
1940 {0x0000a1ec, 0x00000000},
1941 {0x0000a1f0, 0x00000396},
1942 {0x0000a1f4, 0x00000396},
1943 {0x0000a1f8, 0x00000396},
1944 {0x0000a1fc, 0x00000296},
1945};
1946
1947static const u32 ar9485_1_1_pcie_phy_clkreq_enable_L1[][2] = {
1948 /* Addr allmodes */
1949 {0x00018c00, 0x10053e5e},
1950 {0x00018c04, 0x000801d8},
1951 {0x00018c08, 0x0000080c},
1952};
1953
1954static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = {
1955 /* Addr allmodes */
1956 {0x0000a000, 0x00060005},
1957 {0x0000a004, 0x00810080},
1958 {0x0000a008, 0x00830082},
1959 {0x0000a00c, 0x00850084},
1960 {0x0000a010, 0x01820181},
1961 {0x0000a014, 0x01840183},
1962 {0x0000a018, 0x01880185},
1963 {0x0000a01c, 0x018a0189},
1964 {0x0000a020, 0x02850284},
1965 {0x0000a024, 0x02890288},
1966 {0x0000a028, 0x028b028a},
1967 {0x0000a02c, 0x03850384},
1968 {0x0000a030, 0x03890388},
1969 {0x0000a034, 0x038b038a},
1970 {0x0000a038, 0x038d038c},
1971 {0x0000a03c, 0x03910390},
1972 {0x0000a040, 0x03930392},
1973 {0x0000a044, 0x03950394},
1974 {0x0000a048, 0x00000396},
1975 {0x0000a04c, 0x00000000},
1976 {0x0000a050, 0x00000000},
1977 {0x0000a054, 0x00000000},
1978 {0x0000a058, 0x00000000},
1979 {0x0000a05c, 0x00000000},
1980 {0x0000a060, 0x00000000},
1981 {0x0000a064, 0x00000000},
1982 {0x0000a068, 0x00000000},
1983 {0x0000a06c, 0x00000000},
1984 {0x0000a070, 0x00000000},
1985 {0x0000a074, 0x00000000},
1986 {0x0000a078, 0x00000000},
1987 {0x0000a07c, 0x00000000},
1988 {0x0000a080, 0x28282828},
1989 {0x0000a084, 0x28282828},
1990 {0x0000a088, 0x28282828},
1991 {0x0000a08c, 0x28282828},
1992 {0x0000a090, 0x28282828},
1993 {0x0000a094, 0x24242428},
1994 {0x0000a098, 0x171e1e1e},
1995 {0x0000a09c, 0x02020b0b},
1996 {0x0000a0a0, 0x02020202},
1997 {0x0000a0a4, 0x00000000},
1998 {0x0000a0a8, 0x00000000},
1999 {0x0000a0ac, 0x00000000},
2000 {0x0000a0b0, 0x00000000},
2001 {0x0000a0b4, 0x00000000},
2002 {0x0000a0b8, 0x00000000},
2003 {0x0000a0bc, 0x00000000},
2004 {0x0000a0c0, 0x22072208},
2005 {0x0000a0c4, 0x22052206},
2006 {0x0000a0c8, 0x22032204},
2007 {0x0000a0cc, 0x22012202},
2008 {0x0000a0d0, 0x221f2200},
2009 {0x0000a0d4, 0x221d221e},
2010 {0x0000a0d8, 0x33023303},
2011 {0x0000a0dc, 0x33003301},
2012 {0x0000a0e0, 0x331e331f},
2013 {0x0000a0e4, 0x4402331d},
2014 {0x0000a0e8, 0x44004401},
2015 {0x0000a0ec, 0x441e441f},
2016 {0x0000a0f0, 0x55025503},
2017 {0x0000a0f4, 0x55005501},
2018 {0x0000a0f8, 0x551e551f},
2019 {0x0000a0fc, 0x6602551d},
2020 {0x0000a100, 0x66006601},
2021 {0x0000a104, 0x661e661f},
2022 {0x0000a108, 0x7703661d},
2023 {0x0000a10c, 0x77017702},
2024 {0x0000a110, 0x00007700},
2025 {0x0000a114, 0x00000000},
2026 {0x0000a118, 0x00000000},
2027 {0x0000a11c, 0x00000000},
2028 {0x0000a120, 0x00000000},
2029 {0x0000a124, 0x00000000},
2030 {0x0000a128, 0x00000000},
2031 {0x0000a12c, 0x00000000},
2032 {0x0000a130, 0x00000000},
2033 {0x0000a134, 0x00000000},
2034 {0x0000a138, 0x00000000},
2035 {0x0000a13c, 0x00000000},
2036 {0x0000a140, 0x001f0000},
2037 {0x0000a144, 0x111f1100},
2038 {0x0000a148, 0x111d111e},
2039 {0x0000a14c, 0x111b111c},
2040 {0x0000a150, 0x22032204},
2041 {0x0000a154, 0x22012202},
2042 {0x0000a158, 0x221f2200},
2043 {0x0000a15c, 0x221d221e},
2044 {0x0000a160, 0x33013302},
2045 {0x0000a164, 0x331f3300},
2046 {0x0000a168, 0x4402331e},
2047 {0x0000a16c, 0x44004401},
2048 {0x0000a170, 0x441e441f},
2049 {0x0000a174, 0x55015502},
2050 {0x0000a178, 0x551f5500},
2051 {0x0000a17c, 0x6602551e},
2052 {0x0000a180, 0x66006601},
2053 {0x0000a184, 0x661e661f},
2054 {0x0000a188, 0x7703661d},
2055 {0x0000a18c, 0x77017702},
2056 {0x0000a190, 0x00007700},
2057 {0x0000a194, 0x00000000},
2058 {0x0000a198, 0x00000000},
2059 {0x0000a19c, 0x00000000},
2060 {0x0000a1a0, 0x00000000},
2061 {0x0000a1a4, 0x00000000},
2062 {0x0000a1a8, 0x00000000},
2063 {0x0000a1ac, 0x00000000},
2064 {0x0000a1b0, 0x00000000},
2065 {0x0000a1b4, 0x00000000},
2066 {0x0000a1b8, 0x00000000},
2067 {0x0000a1bc, 0x00000000},
2068 {0x0000a1c0, 0x00000000},
2069 {0x0000a1c4, 0x00000000},
2070 {0x0000a1c8, 0x00000000},
2071 {0x0000a1cc, 0x00000000},
2072 {0x0000a1d0, 0x00000000},
2073 {0x0000a1d4, 0x00000000},
2074 {0x0000a1d8, 0x00000000},
2075 {0x0000a1dc, 0x00000000},
2076 {0x0000a1e0, 0x00000000},
2077 {0x0000a1e4, 0x00000000},
2078 {0x0000a1e8, 0x00000000},
2079 {0x0000a1ec, 0x00000000},
2080 {0x0000a1f0, 0x00000396},
2081 {0x0000a1f4, 0x00000396},
2082 {0x0000a1f8, 0x00000396},
2083 {0x0000a1fc, 0x00000296},
2084};
2085
943#endif 2086#endif
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 1a7fa6ea4cf5..099bd4183ad0 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -92,9 +92,9 @@ struct ath_config {
92 * @BUF_XRETRY: To denote excessive retries of the buffer 92 * @BUF_XRETRY: To denote excessive retries of the buffer
93 */ 93 */
94enum buffer_type { 94enum buffer_type {
95 BUF_AMPDU = BIT(2), 95 BUF_AMPDU = BIT(0),
96 BUF_AGGR = BIT(3), 96 BUF_AGGR = BIT(1),
97 BUF_XRETRY = BIT(5), 97 BUF_XRETRY = BIT(2),
98}; 98};
99 99
100#define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU) 100#define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU)
@@ -134,7 +134,6 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
134 (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \ 134 (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
135 WME_AC_VO) 135 WME_AC_VO)
136 136
137#define ADDBA_EXCHANGE_ATTEMPTS 10
138#define ATH_AGGR_DELIM_SZ 4 137#define ATH_AGGR_DELIM_SZ 4
139#define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */ 138#define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */
140/* number of delimiters for encryption padding */ 139/* number of delimiters for encryption padding */
@@ -181,7 +180,8 @@ enum ATH_AGGR_STATUS {
181 180
182#define ATH_TXFIFO_DEPTH 8 181#define ATH_TXFIFO_DEPTH 8
183struct ath_txq { 182struct ath_txq {
184 u32 axq_qnum; 183 int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */
184 u32 axq_qnum; /* ath9k hardware queue number */
185 u32 *axq_link; 185 u32 *axq_link;
186 struct list_head axq_q; 186 struct list_head axq_q;
187 spinlock_t axq_lock; 187 spinlock_t axq_lock;
@@ -231,7 +231,6 @@ struct ath_buf {
231 bool bf_stale; 231 bool bf_stale;
232 u16 bf_flags; 232 u16 bf_flags;
233 struct ath_buf_state bf_state; 233 struct ath_buf_state bf_state;
234 struct ath_wiphy *aphy;
235}; 234};
236 235
237struct ath_atx_tid { 236struct ath_atx_tid {
@@ -252,7 +251,10 @@ struct ath_atx_tid {
252}; 251};
253 252
254struct ath_node { 253struct ath_node {
255 struct ath_common *common; 254#ifdef CONFIG_ATH9K_DEBUGFS
255 struct list_head list; /* for sc->nodes */
256 struct ieee80211_sta *sta; /* station struct we're part of */
257#endif
256 struct ath_atx_tid tid[WME_NUM_TID]; 258 struct ath_atx_tid tid[WME_NUM_TID];
257 struct ath_atx_ac ac[WME_NUM_AC]; 259 struct ath_atx_ac ac[WME_NUM_AC];
258 u16 maxampdu; 260 u16 maxampdu;
@@ -275,6 +277,11 @@ struct ath_tx_control {
275#define ATH_TX_XRETRY 0x02 277#define ATH_TX_XRETRY 0x02
276#define ATH_TX_BAR 0x04 278#define ATH_TX_BAR 0x04
277 279
280/**
281 * @txq_map: Index is mac80211 queue number. This is
282 * not necessarily the same as the hardware queue number
283 * (axq_qnum).
284 */
278struct ath_tx { 285struct ath_tx {
279 u16 seq_no; 286 u16 seq_no;
280 u32 txqsetup; 287 u32 txqsetup;
@@ -301,6 +308,8 @@ struct ath_rx {
301 struct ath_descdma rxdma; 308 struct ath_descdma rxdma;
302 struct ath_buf *rx_bufptr; 309 struct ath_buf *rx_bufptr;
303 struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX]; 310 struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
311
312 struct sk_buff *frag;
304}; 313};
305 314
306int ath_startrecv(struct ath_softc *sc); 315int ath_startrecv(struct ath_softc *sc);
@@ -337,10 +346,10 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid
337 346
338struct ath_vif { 347struct ath_vif {
339 int av_bslot; 348 int av_bslot;
349 bool is_bslot_active;
340 __le64 tsf_adjust; /* TSF adjustment for staggered beacons */ 350 __le64 tsf_adjust; /* TSF adjustment for staggered beacons */
341 enum nl80211_iftype av_opmode; 351 enum nl80211_iftype av_opmode;
342 struct ath_buf *av_bcbuf; 352 struct ath_buf *av_bcbuf;
343 struct ath_tx_control av_btxctl;
344 u8 bssid[ETH_ALEN]; /* current BSSID from config_interface */ 353 u8 bssid[ETH_ALEN]; /* current BSSID from config_interface */
345}; 354};
346 355
@@ -360,7 +369,7 @@ struct ath_vif {
360#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024) 369#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
361 370
362struct ath_beacon_config { 371struct ath_beacon_config {
363 u16 beacon_interval; 372 int beacon_interval;
364 u16 listen_interval; 373 u16 listen_interval;
365 u16 dtim_period; 374 u16 dtim_period;
366 u16 bmiss_timeout; 375 u16 bmiss_timeout;
@@ -379,7 +388,6 @@ struct ath_beacon {
379 u32 ast_be_xmit; 388 u32 ast_be_xmit;
380 u64 bc_tstamp; 389 u64 bc_tstamp;
381 struct ieee80211_vif *bslot[ATH_BCBUF]; 390 struct ieee80211_vif *bslot[ATH_BCBUF];
382 struct ath_wiphy *bslot_aphy[ATH_BCBUF];
383 int slottime; 391 int slottime;
384 int slotupdate; 392 int slotupdate;
385 struct ath9k_tx_queue_info beacon_qi; 393 struct ath9k_tx_queue_info beacon_qi;
@@ -390,9 +398,10 @@ struct ath_beacon {
390 398
391void ath_beacon_tasklet(unsigned long data); 399void ath_beacon_tasklet(unsigned long data);
392void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif); 400void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif);
393int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif); 401int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif);
394void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp); 402void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp);
395int ath_beaconq_config(struct ath_softc *sc); 403int ath_beaconq_config(struct ath_softc *sc);
404void ath9k_set_beaconing_status(struct ath_softc *sc, bool status);
396 405
397/*******/ 406/*******/
398/* ANI */ 407/* ANI */
@@ -439,26 +448,21 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc);
439 448
440#define ATH_LED_PIN_DEF 1 449#define ATH_LED_PIN_DEF 1
441#define ATH_LED_PIN_9287 8 450#define ATH_LED_PIN_9287 8
442#define ATH_LED_ON_DURATION_IDLE 350 /* in msecs */ 451#define ATH_LED_PIN_9485 6
443#define ATH_LED_OFF_DURATION_IDLE 250 /* in msecs */
444
445enum ath_led_type {
446 ATH_LED_RADIO,
447 ATH_LED_ASSOC,
448 ATH_LED_TX,
449 ATH_LED_RX
450};
451
452struct ath_led {
453 struct ath_softc *sc;
454 struct led_classdev led_cdev;
455 enum ath_led_type led_type;
456 char name[32];
457 bool registered;
458};
459 452
453#ifdef CONFIG_MAC80211_LEDS
460void ath_init_leds(struct ath_softc *sc); 454void ath_init_leds(struct ath_softc *sc);
461void ath_deinit_leds(struct ath_softc *sc); 455void ath_deinit_leds(struct ath_softc *sc);
456#else
457static inline void ath_init_leds(struct ath_softc *sc)
458{
459}
460
461static inline void ath_deinit_leds(struct ath_softc *sc)
462{
463}
464#endif
465
462 466
463/* Antenna diversity/combining */ 467/* Antenna diversity/combining */
464#define ATH_ANT_RX_CURRENT_SHIFT 4 468#define ATH_ANT_RX_CURRENT_SHIFT 4
@@ -527,7 +531,6 @@ struct ath_ant_comb {
527#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */ 531#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */
528#define ATH_MAX_SW_RETRIES 10 532#define ATH_MAX_SW_RETRIES 10
529#define ATH_CHAN_MAX 255 533#define ATH_CHAN_MAX 255
530#define IEEE80211_WEP_NKID 4 /* number of key ids */
531 534
532#define ATH_TXPOWER_MAX 100 /* .5 dBm units */ 535#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
533#define ATH_RATE_DUMMY_MARKER 0 536#define ATH_RATE_DUMMY_MARKER 0
@@ -555,27 +558,28 @@ struct ath_ant_comb {
555#define PS_WAIT_FOR_TX_ACK BIT(3) 558#define PS_WAIT_FOR_TX_ACK BIT(3)
556#define PS_BEACON_SYNC BIT(4) 559#define PS_BEACON_SYNC BIT(4)
557 560
558struct ath_wiphy;
559struct ath_rate_table; 561struct ath_rate_table;
560 562
563struct ath9k_vif_iter_data {
564 const u8 *hw_macaddr; /* phy's hardware address, set
565 * before starting iteration for
566 * valid bssid mask.
567 */
568 u8 mask[ETH_ALEN]; /* bssid mask */
569 int naps; /* number of AP vifs */
570 int nmeshes; /* number of mesh vifs */
571 int nstations; /* number of station vifs */
572 int nwds; /* number of nwd vifs */
573 int nadhocs; /* number of adhoc vifs */
574 int nothers; /* number of vifs not specified above. */
575};
576
561struct ath_softc { 577struct ath_softc {
562 struct ieee80211_hw *hw; 578 struct ieee80211_hw *hw;
563 struct device *dev; 579 struct device *dev;
564 580
565 spinlock_t wiphy_lock; /* spinlock to protect ath_wiphy data */
566 struct ath_wiphy *pri_wiphy;
567 struct ath_wiphy **sec_wiphy; /* secondary wiphys (virtual radios); may
568 * have NULL entries */
569 int num_sec_wiphy; /* number of sec_wiphy pointers in the array */
570 int chan_idx; 581 int chan_idx;
571 int chan_is_ht; 582 int chan_is_ht;
572 struct ath_wiphy *next_wiphy;
573 struct work_struct chan_work;
574 int wiphy_select_failures;
575 unsigned long wiphy_select_first_fail;
576 struct delayed_work wiphy_work;
577 unsigned long wiphy_scheduler_int;
578 int wiphy_scheduler_index;
579 struct survey_info *cur_survey; 583 struct survey_info *cur_survey;
580 struct survey_info survey[ATH9K_NUM_CHANNELS]; 584 struct survey_info survey[ATH9K_NUM_CHANNELS];
581 585
@@ -592,14 +596,16 @@ struct ath_softc {
592 struct work_struct hw_check_work; 596 struct work_struct hw_check_work;
593 struct completion paprd_complete; 597 struct completion paprd_complete;
594 598
599 unsigned int hw_busy_count;
600
595 u32 intrstatus; 601 u32 intrstatus;
596 u32 sc_flags; /* SC_OP_* */ 602 u32 sc_flags; /* SC_OP_* */
597 u16 ps_flags; /* PS_* */ 603 u16 ps_flags; /* PS_* */
598 u16 curtxpow; 604 u16 curtxpow;
599 u8 nbcnvifs;
600 u16 nvifs;
601 bool ps_enabled; 605 bool ps_enabled;
602 bool ps_idle; 606 bool ps_idle;
607 short nbcnvifs;
608 short nvifs;
603 unsigned long ps_usecount; 609 unsigned long ps_usecount;
604 610
605 struct ath_config config; 611 struct ath_config config;
@@ -608,23 +614,24 @@ struct ath_softc {
608 struct ath_beacon beacon; 614 struct ath_beacon beacon;
609 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; 615 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
610 616
611 struct ath_led radio_led; 617#ifdef CONFIG_MAC80211_LEDS
612 struct ath_led assoc_led; 618 bool led_registered;
613 struct ath_led tx_led; 619 char led_name[32];
614 struct ath_led rx_led; 620 struct led_classdev led_cdev;
615 struct delayed_work ath_led_blink_work; 621#endif
616 int led_on_duration;
617 int led_off_duration;
618 int led_on_cnt;
619 int led_off_cnt;
620 622
621 int beacon_interval; 623 struct ath9k_hw_cal_data caldata;
624 int last_rssi;
622 625
623#ifdef CONFIG_ATH9K_DEBUGFS 626#ifdef CONFIG_ATH9K_DEBUGFS
624 struct ath9k_debug debug; 627 struct ath9k_debug debug;
628 spinlock_t nodes_lock;
629 struct list_head nodes; /* basically, stations */
630 unsigned int tx_complete_poll_work_seen;
625#endif 631#endif
626 struct ath_beacon_config cur_beacon_conf; 632 struct ath_beacon_config cur_beacon_conf;
627 struct delayed_work tx_complete_work; 633 struct delayed_work tx_complete_work;
634 struct delayed_work hw_pll_work;
628 struct ath_btcoex btcoex; 635 struct ath_btcoex btcoex;
629 636
630 struct ath_descdma txsdma; 637 struct ath_descdma txsdma;
@@ -632,23 +639,6 @@ struct ath_softc {
632 struct ath_ant_comb ant_comb; 639 struct ath_ant_comb ant_comb;
633}; 640};
634 641
635struct ath_wiphy {
636 struct ath_softc *sc; /* shared for all virtual wiphys */
637 struct ieee80211_hw *hw;
638 struct ath9k_hw_cal_data caldata;
639 enum ath_wiphy_state {
640 ATH_WIPHY_INACTIVE,
641 ATH_WIPHY_ACTIVE,
642 ATH_WIPHY_PAUSING,
643 ATH_WIPHY_PAUSED,
644 ATH_WIPHY_SCAN,
645 } state;
646 bool idle;
647 int chan_idx;
648 int chan_is_ht;
649 int last_rssi;
650};
651
652void ath9k_tasklet(unsigned long data); 642void ath9k_tasklet(unsigned long data);
653int ath_reset(struct ath_softc *sc, bool retry_tx); 643int ath_reset(struct ath_softc *sc, bool retry_tx);
654int ath_cabq_update(struct ath_softc *); 644int ath_cabq_update(struct ath_softc *);
@@ -669,14 +659,13 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
669 const struct ath_bus_ops *bus_ops); 659 const struct ath_bus_ops *bus_ops);
670void ath9k_deinit_device(struct ath_softc *sc); 660void ath9k_deinit_device(struct ath_softc *sc);
671void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw); 661void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
672void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
673 struct ath9k_channel *ichan);
674int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, 662int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
675 struct ath9k_channel *hchan); 663 struct ath9k_channel *hchan);
676 664
677void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw); 665void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw);
678void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw); 666void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw);
679bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode); 667bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode);
668bool ath9k_uses_beacons(int type);
680 669
681#ifdef CONFIG_PCI 670#ifdef CONFIG_PCI
682int ath_pci_init(void); 671int ath_pci_init(void);
@@ -700,26 +689,12 @@ void ath9k_ps_restore(struct ath_softc *sc);
700u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate); 689u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate);
701 690
702void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif); 691void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
703int ath9k_wiphy_add(struct ath_softc *sc);
704int ath9k_wiphy_del(struct ath_wiphy *aphy);
705void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, int ftype);
706int ath9k_wiphy_pause(struct ath_wiphy *aphy);
707int ath9k_wiphy_unpause(struct ath_wiphy *aphy);
708int ath9k_wiphy_select(struct ath_wiphy *aphy);
709void ath9k_wiphy_set_scheduler(struct ath_softc *sc, unsigned int msec_int);
710void ath9k_wiphy_chan_work(struct work_struct *work);
711bool ath9k_wiphy_started(struct ath_softc *sc);
712void ath9k_wiphy_pause_all_forced(struct ath_softc *sc,
713 struct ath_wiphy *selected);
714bool ath9k_wiphy_scanning(struct ath_softc *sc);
715void ath9k_wiphy_work(struct work_struct *work);
716bool ath9k_all_wiphys_idle(struct ath_softc *sc);
717void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle);
718
719void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue);
720bool ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue);
721 692
722void ath_start_rfkill_poll(struct ath_softc *sc); 693void ath_start_rfkill_poll(struct ath_softc *sc);
723extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw); 694extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
695void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
696 struct ieee80211_vif *vif,
697 struct ath9k_vif_iter_data *iter_data);
698
724 699
725#endif /* ATH9K_H */ 700#endif /* ATH9K_H */
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 385ba03134ba..6d2a545fc35e 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -112,8 +112,7 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
112 112
113static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb) 113static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
114{ 114{
115 struct ath_wiphy *aphy = hw->priv; 115 struct ath_softc *sc = hw->priv;
116 struct ath_softc *sc = aphy->sc;
117 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 116 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
118 struct ath_tx_control txctl; 117 struct ath_tx_control txctl;
119 118
@@ -132,8 +131,7 @@ static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
132static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw, 131static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
133 struct ieee80211_vif *vif) 132 struct ieee80211_vif *vif)
134{ 133{
135 struct ath_wiphy *aphy = hw->priv; 134 struct ath_softc *sc = hw->priv;
136 struct ath_softc *sc = aphy->sc;
137 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 135 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
138 struct ath_buf *bf; 136 struct ath_buf *bf;
139 struct ath_vif *avp; 137 struct ath_vif *avp;
@@ -142,13 +140,10 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
142 struct ieee80211_tx_info *info; 140 struct ieee80211_tx_info *info;
143 int cabq_depth; 141 int cabq_depth;
144 142
145 if (aphy->state != ATH_WIPHY_ACTIVE)
146 return NULL;
147
148 avp = (void *)vif->drv_priv; 143 avp = (void *)vif->drv_priv;
149 cabq = sc->beacon.cabq; 144 cabq = sc->beacon.cabq;
150 145
151 if (avp->av_bcbuf == NULL) 146 if ((avp->av_bcbuf == NULL) || !avp->is_bslot_active)
152 return NULL; 147 return NULL;
153 148
154 /* Release the old beacon first */ 149 /* Release the old beacon first */
@@ -225,13 +220,13 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
225 return bf; 220 return bf;
226} 221}
227 222
228int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif) 223int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif)
229{ 224{
230 struct ath_softc *sc = aphy->sc;
231 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 225 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
232 struct ath_vif *avp; 226 struct ath_vif *avp;
233 struct ath_buf *bf; 227 struct ath_buf *bf;
234 struct sk_buff *skb; 228 struct sk_buff *skb;
229 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
235 __le64 tstamp; 230 __le64 tstamp;
236 231
237 avp = (void *)vif->drv_priv; 232 avp = (void *)vif->drv_priv;
@@ -244,9 +239,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
244 struct ath_buf, list); 239 struct ath_buf, list);
245 list_del(&avp->av_bcbuf->list); 240 list_del(&avp->av_bcbuf->list);
246 241
247 if (sc->sc_ah->opmode == NL80211_IFTYPE_AP || 242 if (ath9k_uses_beacons(vif->type)) {
248 sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC ||
249 sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT) {
250 int slot; 243 int slot;
251 /* 244 /*
252 * Assign the vif to a beacon xmit slot. As 245 * Assign the vif to a beacon xmit slot. As
@@ -256,6 +249,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
256 for (slot = 0; slot < ATH_BCBUF; slot++) 249 for (slot = 0; slot < ATH_BCBUF; slot++)
257 if (sc->beacon.bslot[slot] == NULL) { 250 if (sc->beacon.bslot[slot] == NULL) {
258 avp->av_bslot = slot; 251 avp->av_bslot = slot;
252 avp->is_bslot_active = false;
259 253
260 /* NB: keep looking for a double slot */ 254 /* NB: keep looking for a double slot */
261 if (slot == 0 || !sc->beacon.bslot[slot-1]) 255 if (slot == 0 || !sc->beacon.bslot[slot-1])
@@ -263,7 +257,6 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
263 } 257 }
264 BUG_ON(sc->beacon.bslot[avp->av_bslot] != NULL); 258 BUG_ON(sc->beacon.bslot[avp->av_bslot] != NULL);
265 sc->beacon.bslot[avp->av_bslot] = vif; 259 sc->beacon.bslot[avp->av_bslot] = vif;
266 sc->beacon.bslot_aphy[avp->av_bslot] = aphy;
267 sc->nbcnvifs++; 260 sc->nbcnvifs++;
268 } 261 }
269 } 262 }
@@ -281,10 +274,8 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
281 274
282 /* NB: the beacon data buffer must be 32-bit aligned. */ 275 /* NB: the beacon data buffer must be 32-bit aligned. */
283 skb = ieee80211_beacon_get(sc->hw, vif); 276 skb = ieee80211_beacon_get(sc->hw, vif);
284 if (skb == NULL) { 277 if (skb == NULL)
285 ath_dbg(common, ATH_DBG_BEACON, "cannot get skb\n");
286 return -ENOMEM; 278 return -ENOMEM;
287 }
288 279
289 tstamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; 280 tstamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
290 sc->beacon.bc_tstamp = le64_to_cpu(tstamp); 281 sc->beacon.bc_tstamp = le64_to_cpu(tstamp);
@@ -293,7 +284,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
293 u64 tsfadjust; 284 u64 tsfadjust;
294 int intval; 285 int intval;
295 286
296 intval = sc->beacon_interval ? : ATH_DEFAULT_BINTVAL; 287 intval = cur_conf->beacon_interval ? : ATH_DEFAULT_BINTVAL;
297 288
298 /* 289 /*
299 * Calculate the TSF offset for this beacon slot, i.e., the 290 * Calculate the TSF offset for this beacon slot, i.e., the
@@ -325,6 +316,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
325 ath_err(common, "dma_mapping_error on beacon alloc\n"); 316 ath_err(common, "dma_mapping_error on beacon alloc\n");
326 return -ENOMEM; 317 return -ENOMEM;
327 } 318 }
319 avp->is_bslot_active = true;
328 320
329 return 0; 321 return 0;
330} 322}
@@ -336,7 +328,6 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp)
336 328
337 if (avp->av_bslot != -1) { 329 if (avp->av_bslot != -1) {
338 sc->beacon.bslot[avp->av_bslot] = NULL; 330 sc->beacon.bslot[avp->av_bslot] = NULL;
339 sc->beacon.bslot_aphy[avp->av_bslot] = NULL;
340 sc->nbcnvifs--; 331 sc->nbcnvifs--;
341 } 332 }
342 333
@@ -358,11 +349,11 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp)
358void ath_beacon_tasklet(unsigned long data) 349void ath_beacon_tasklet(unsigned long data)
359{ 350{
360 struct ath_softc *sc = (struct ath_softc *)data; 351 struct ath_softc *sc = (struct ath_softc *)data;
352 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
361 struct ath_hw *ah = sc->sc_ah; 353 struct ath_hw *ah = sc->sc_ah;
362 struct ath_common *common = ath9k_hw_common(ah); 354 struct ath_common *common = ath9k_hw_common(ah);
363 struct ath_buf *bf = NULL; 355 struct ath_buf *bf = NULL;
364 struct ieee80211_vif *vif; 356 struct ieee80211_vif *vif;
365 struct ath_wiphy *aphy;
366 int slot; 357 int slot;
367 u32 bfaddr, bc = 0, tsftu; 358 u32 bfaddr, bc = 0, tsftu;
368 u64 tsf; 359 u64 tsf;
@@ -382,6 +373,7 @@ void ath_beacon_tasklet(unsigned long data)
382 ath_dbg(common, ATH_DBG_BSTUCK, 373 ath_dbg(common, ATH_DBG_BSTUCK,
383 "missed %u consecutive beacons\n", 374 "missed %u consecutive beacons\n",
384 sc->beacon.bmisscnt); 375 sc->beacon.bmisscnt);
376 ath9k_hw_stop_dma_queue(ah, sc->beacon.beaconq);
385 ath9k_hw_bstuck_nfcal(ah); 377 ath9k_hw_bstuck_nfcal(ah);
386 } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) { 378 } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) {
387 ath_dbg(common, ATH_DBG_BSTUCK, 379 ath_dbg(common, ATH_DBG_BSTUCK,
@@ -406,7 +398,7 @@ void ath_beacon_tasklet(unsigned long data)
406 * on the tsf to safeguard against missing an swba. 398 * on the tsf to safeguard against missing an swba.
407 */ 399 */
408 400
409 intval = sc->beacon_interval ? : ATH_DEFAULT_BINTVAL; 401 intval = cur_conf->beacon_interval ? : ATH_DEFAULT_BINTVAL;
410 402
411 tsf = ath9k_hw_gettsf64(ah); 403 tsf = ath9k_hw_gettsf64(ah);
412 tsftu = TSF_TO_TU(tsf>>32, tsf); 404 tsftu = TSF_TO_TU(tsf>>32, tsf);
@@ -420,7 +412,6 @@ void ath_beacon_tasklet(unsigned long data)
420 */ 412 */
421 slot = ATH_BCBUF - slot - 1; 413 slot = ATH_BCBUF - slot - 1;
422 vif = sc->beacon.bslot[slot]; 414 vif = sc->beacon.bslot[slot];
423 aphy = sc->beacon.bslot_aphy[slot];
424 415
425 ath_dbg(common, ATH_DBG_BEACON, 416 ath_dbg(common, ATH_DBG_BEACON,
426 "slot %d [tsf %llu tsftu %u intval %u] vif %p\n", 417 "slot %d [tsf %llu tsftu %u intval %u] vif %p\n",
@@ -428,7 +419,7 @@ void ath_beacon_tasklet(unsigned long data)
428 419
429 bfaddr = 0; 420 bfaddr = 0;
430 if (vif) { 421 if (vif) {
431 bf = ath_beacon_generate(aphy->hw, vif); 422 bf = ath_beacon_generate(sc->hw, vif);
432 if (bf != NULL) { 423 if (bf != NULL) {
433 bfaddr = bf->bf_daddr; 424 bfaddr = bf->bf_daddr;
434 bc = 1; 425 bc = 1;
@@ -460,16 +451,6 @@ void ath_beacon_tasklet(unsigned long data)
460 sc->beacon.updateslot = OK; 451 sc->beacon.updateslot = OK;
461 } 452 }
462 if (bfaddr != 0) { 453 if (bfaddr != 0) {
463 /*
464 * Stop any current dma and put the new frame(s) on the queue.
465 * This should never fail since we check above that no frames
466 * are still pending on the queue.
467 */
468 if (!ath9k_hw_stoptxdma(ah, sc->beacon.beaconq)) {
469 ath_err(common, "beacon queue %u did not stop?\n",
470 sc->beacon.beaconq);
471 }
472
473 /* NB: cabq traffic should already be queued and primed */ 454 /* NB: cabq traffic should already be queued and primed */
474 ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bfaddr); 455 ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bfaddr);
475 ath9k_hw_txstart(ah, sc->beacon.beaconq); 456 ath9k_hw_txstart(ah, sc->beacon.beaconq);
@@ -720,10 +701,10 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
720 iftype = sc->sc_ah->opmode; 701 iftype = sc->sc_ah->opmode;
721 } 702 }
722 703
723 cur_conf->listen_interval = 1; 704 cur_conf->listen_interval = 1;
724 cur_conf->dtim_count = 1; 705 cur_conf->dtim_count = 1;
725 cur_conf->bmiss_timeout = 706 cur_conf->bmiss_timeout =
726 ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval; 707 ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
727 708
728 /* 709 /*
729 * It looks like mac80211 may end up using beacon interval of zero in 710 * It looks like mac80211 may end up using beacon interval of zero in
@@ -735,8 +716,9 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
735 cur_conf->beacon_interval = 100; 716 cur_conf->beacon_interval = 100;
736 717
737 /* 718 /*
738 * Some times we dont parse dtim period from mac80211, in that case 719 * We don't parse dtim period from mac80211 during the driver
739 * use a default value 720 * initialization as it breaks association with hidden-ssid
721 * AP and it causes latency in roaming
740 */ 722 */
741 if (cur_conf->dtim_period == 0) 723 if (cur_conf->dtim_period == 0)
742 cur_conf->dtim_period = 1; 724 cur_conf->dtim_period = 1;
@@ -760,3 +742,36 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
760 742
761 sc->sc_flags |= SC_OP_BEACONS; 743 sc->sc_flags |= SC_OP_BEACONS;
762} 744}
745
746void ath9k_set_beaconing_status(struct ath_softc *sc, bool status)
747{
748 struct ath_hw *ah = sc->sc_ah;
749 struct ath_vif *avp;
750 int slot;
751 bool found = false;
752
753 ath9k_ps_wakeup(sc);
754 if (status) {
755 for (slot = 0; slot < ATH_BCBUF; slot++) {
756 if (sc->beacon.bslot[slot]) {
757 avp = (void *)sc->beacon.bslot[slot]->drv_priv;
758 if (avp->is_bslot_active) {
759 found = true;
760 break;
761 }
762 }
763 }
764 if (found) {
765 /* Re-enable beaconing */
766 ah->imask |= ATH9K_INT_SWBA;
767 ath9k_hw_set_interrupts(ah, ah->imask);
768 }
769 } else {
770 /* Disable SWBA interrupt */
771 ah->imask &= ~ATH9K_INT_SWBA;
772 ath9k_hw_set_interrupts(ah, ah->imask);
773 tasklet_kill(&sc->bcon_tasklet);
774 ath9k_hw_stop_dma_queue(ah, sc->beacon.beaconq);
775 }
776 ath9k_ps_restore(sc);
777}
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index b68a1acbddd0..8649581fa4dd 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -262,7 +262,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
262 * since 250us often results in NF load timeout and causes deaf 262 * since 250us often results in NF load timeout and causes deaf
263 * condition during stress testing 12/12/2009 263 * condition during stress testing 12/12/2009
264 */ 264 */
265 for (j = 0; j < 1000; j++) { 265 for (j = 0; j < 10000; j++) {
266 if ((REG_READ(ah, AR_PHY_AGC_CONTROL) & 266 if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
267 AR_PHY_AGC_CONTROL_NF) == 0) 267 AR_PHY_AGC_CONTROL_NF) == 0)
268 break; 268 break;
@@ -278,7 +278,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
278 * here, the baseband nf cal will just be capped by our present 278 * here, the baseband nf cal will just be capped by our present
279 * noisefloor until the next calibration timer. 279 * noisefloor until the next calibration timer.
280 */ 280 */
281 if (j == 1000) { 281 if (j == 10000) {
282 ath_dbg(common, ATH_DBG_ANY, 282 ath_dbg(common, ATH_DBG_ANY,
283 "Timeout while waiting for nf to load: AR_PHY_AGC_CONTROL=0x%x\n", 283 "Timeout while waiting for nf to load: AR_PHY_AGC_CONTROL=0x%x\n",
284 REG_READ(ah, AR_PHY_AGC_CONTROL)); 284 REG_READ(ah, AR_PHY_AGC_CONTROL));
@@ -382,9 +382,8 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
382 s16 default_nf; 382 s16 default_nf;
383 int i, j; 383 int i, j;
384 384
385 if (!ah->caldata) 385 ah->caldata->channel = chan->channel;
386 return; 386 ah->caldata->channelFlags = chan->channelFlags & ~CHANNEL_CW_INT;
387
388 h = ah->caldata->nfCalHist; 387 h = ah->caldata->nfCalHist;
389 default_nf = ath9k_hw_get_default_nf(ah, chan); 388 default_nf = ath9k_hw_get_default_nf(ah, chan);
390 for (i = 0; i < NUM_NF_READINGS; i++) { 389 for (i = 0; i < NUM_NF_READINGS; i++) {
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index df1998d48253..615e68276e72 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -189,6 +189,17 @@ void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
189} 189}
190EXPORT_SYMBOL(ath9k_cmn_btcoex_bt_stomp); 190EXPORT_SYMBOL(ath9k_cmn_btcoex_bt_stomp);
191 191
192void ath9k_cmn_update_txpow(struct ath_hw *ah, u16 cur_txpow,
193 u16 new_txpow, u16 *txpower)
194{
195 if (cur_txpow != new_txpow) {
196 ath9k_hw_set_txpowerlimit(ah, new_txpow, false);
197 /* read back in case value is clamped */
198 *txpower = ath9k_hw_regulatory(ah)->power_limit;
199 }
200}
201EXPORT_SYMBOL(ath9k_cmn_update_txpow);
202
192static int __init ath9k_cmn_init(void) 203static int __init ath9k_cmn_init(void)
193{ 204{
194 return 0; 205 return 0;
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index a126bddebb0a..b2f7b5f89097 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -23,8 +23,6 @@
23 23
24/* Common header for Atheros 802.11n base driver cores */ 24/* Common header for Atheros 802.11n base driver cores */
25 25
26#define IEEE80211_WEP_NKID 4
27
28#define WME_NUM_TID 16 26#define WME_NUM_TID 16
29#define WME_BA_BMP_SIZE 64 27#define WME_BA_BMP_SIZE 64
30#define WME_MAX_BA WME_BA_BMP_SIZE 28#define WME_MAX_BA WME_BA_BMP_SIZE
@@ -70,3 +68,5 @@ struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
70int ath9k_cmn_count_streams(unsigned int chainmask, int max); 68int ath9k_cmn_count_streams(unsigned int chainmask, int max);
71void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common, 69void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
72 enum ath_stomp_type stomp_type); 70 enum ath_stomp_type stomp_type);
71void ath9k_cmn_update_txpow(struct ath_hw *ah, u16 cur_txpow,
72 u16 new_txpow, u16 *txpower);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 3586c43077a7..8df5a92a20f1 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/vmalloc.h>
18#include <asm/unaligned.h> 19#include <asm/unaligned.h>
19 20
20#include "ath9k.h" 21#include "ath9k.h"
@@ -30,6 +31,19 @@ static int ath9k_debugfs_open(struct inode *inode, struct file *file)
30 return 0; 31 return 0;
31} 32}
32 33
34static ssize_t ath9k_debugfs_read_buf(struct file *file, char __user *user_buf,
35 size_t count, loff_t *ppos)
36{
37 u8 *buf = file->private_data;
38 return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
39}
40
41static int ath9k_debugfs_release_buf(struct inode *inode, struct file *file)
42{
43 vfree(file->private_data);
44 return 0;
45}
46
33#ifdef CONFIG_ATH_DEBUG 47#ifdef CONFIG_ATH_DEBUG
34 48
35static ssize_t read_file_debug(struct file *file, char __user *user_buf, 49static ssize_t read_file_debug(struct file *file, char __user *user_buf,
@@ -381,41 +395,40 @@ static const struct file_operations fops_interrupt = {
381 .llseek = default_llseek, 395 .llseek = default_llseek,
382}; 396};
383 397
384static const char * ath_wiphy_state_str(enum ath_wiphy_state state) 398static const char *channel_type_str(enum nl80211_channel_type t)
385{ 399{
386 switch (state) { 400 switch (t) {
387 case ATH_WIPHY_INACTIVE: 401 case NL80211_CHAN_NO_HT:
388 return "INACTIVE"; 402 return "no ht";
389 case ATH_WIPHY_ACTIVE: 403 case NL80211_CHAN_HT20:
390 return "ACTIVE"; 404 return "ht20";
391 case ATH_WIPHY_PAUSING: 405 case NL80211_CHAN_HT40MINUS:
392 return "PAUSING"; 406 return "ht40-";
393 case ATH_WIPHY_PAUSED: 407 case NL80211_CHAN_HT40PLUS:
394 return "PAUSED"; 408 return "ht40+";
395 case ATH_WIPHY_SCAN: 409 default:
396 return "SCAN"; 410 return "???";
397 } 411 }
398 return "?";
399} 412}
400 413
401static ssize_t read_file_wiphy(struct file *file, char __user *user_buf, 414static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
402 size_t count, loff_t *ppos) 415 size_t count, loff_t *ppos)
403{ 416{
404 struct ath_softc *sc = file->private_data; 417 struct ath_softc *sc = file->private_data;
405 struct ath_wiphy *aphy = sc->pri_wiphy; 418 struct ieee80211_channel *chan = sc->hw->conf.channel;
406 struct ieee80211_channel *chan = aphy->hw->conf.channel; 419 struct ieee80211_conf *conf = &(sc->hw->conf);
407 char buf[512]; 420 char buf[512];
408 unsigned int len = 0; 421 unsigned int len = 0;
409 int i;
410 u8 addr[ETH_ALEN]; 422 u8 addr[ETH_ALEN];
411 u32 tmp; 423 u32 tmp;
412 424
413 len += snprintf(buf + len, sizeof(buf) - len, 425 len += snprintf(buf + len, sizeof(buf) - len,
414 "primary: %s (%s chan=%d ht=%d)\n", 426 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
415 wiphy_name(sc->pri_wiphy->hw->wiphy), 427 wiphy_name(sc->hw->wiphy),
416 ath_wiphy_state_str(sc->pri_wiphy->state),
417 ieee80211_frequency_to_channel(chan->center_freq), 428 ieee80211_frequency_to_channel(chan->center_freq),
418 aphy->chan_is_ht); 429 chan->center_freq,
430 conf->channel_type,
431 channel_type_str(conf->channel_type));
419 432
420 put_unaligned_le32(REG_READ_D(sc->sc_ah, AR_STA_ID0), addr); 433 put_unaligned_le32(REG_READ_D(sc->sc_ah, AR_STA_ID0), addr);
421 put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_STA_ID1) & 0xffff, addr + 4); 434 put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_STA_ID1) & 0xffff, addr + 4);
@@ -457,156 +470,82 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
457 else 470 else
458 len += snprintf(buf + len, sizeof(buf) - len, "\n"); 471 len += snprintf(buf + len, sizeof(buf) - len, "\n");
459 472
460 /* Put variable-length stuff down here, and check for overflows. */
461 for (i = 0; i < sc->num_sec_wiphy; i++) {
462 struct ath_wiphy *aphy_tmp = sc->sec_wiphy[i];
463 if (aphy_tmp == NULL)
464 continue;
465 chan = aphy_tmp->hw->conf.channel;
466 len += snprintf(buf + len, sizeof(buf) - len,
467 "secondary: %s (%s chan=%d ht=%d)\n",
468 wiphy_name(aphy_tmp->hw->wiphy),
469 ath_wiphy_state_str(aphy_tmp->state),
470 ieee80211_frequency_to_channel(chan->center_freq),
471 aphy_tmp->chan_is_ht);
472 }
473 if (len > sizeof(buf)) 473 if (len > sizeof(buf))
474 len = sizeof(buf); 474 len = sizeof(buf);
475 475
476 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 476 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
477} 477}
478 478
479static struct ath_wiphy * get_wiphy(struct ath_softc *sc, const char *name)
480{
481 int i;
482 if (strcmp(name, wiphy_name(sc->pri_wiphy->hw->wiphy)) == 0)
483 return sc->pri_wiphy;
484 for (i = 0; i < sc->num_sec_wiphy; i++) {
485 struct ath_wiphy *aphy = sc->sec_wiphy[i];
486 if (aphy && strcmp(name, wiphy_name(aphy->hw->wiphy)) == 0)
487 return aphy;
488 }
489 return NULL;
490}
491
492static int del_wiphy(struct ath_softc *sc, const char *name)
493{
494 struct ath_wiphy *aphy = get_wiphy(sc, name);
495 if (!aphy)
496 return -ENOENT;
497 return ath9k_wiphy_del(aphy);
498}
499
500static int pause_wiphy(struct ath_softc *sc, const char *name)
501{
502 struct ath_wiphy *aphy = get_wiphy(sc, name);
503 if (!aphy)
504 return -ENOENT;
505 return ath9k_wiphy_pause(aphy);
506}
507
508static int unpause_wiphy(struct ath_softc *sc, const char *name)
509{
510 struct ath_wiphy *aphy = get_wiphy(sc, name);
511 if (!aphy)
512 return -ENOENT;
513 return ath9k_wiphy_unpause(aphy);
514}
515
516static int select_wiphy(struct ath_softc *sc, const char *name)
517{
518 struct ath_wiphy *aphy = get_wiphy(sc, name);
519 if (!aphy)
520 return -ENOENT;
521 return ath9k_wiphy_select(aphy);
522}
523
524static int schedule_wiphy(struct ath_softc *sc, const char *msec)
525{
526 ath9k_wiphy_set_scheduler(sc, simple_strtoul(msec, NULL, 0));
527 return 0;
528}
529
530static ssize_t write_file_wiphy(struct file *file, const char __user *user_buf,
531 size_t count, loff_t *ppos)
532{
533 struct ath_softc *sc = file->private_data;
534 char buf[50];
535 size_t len;
536
537 len = min(count, sizeof(buf) - 1);
538 if (copy_from_user(buf, user_buf, len))
539 return -EFAULT;
540 buf[len] = '\0';
541 if (len > 0 && buf[len - 1] == '\n')
542 buf[len - 1] = '\0';
543
544 if (strncmp(buf, "add", 3) == 0) {
545 int res = ath9k_wiphy_add(sc);
546 if (res < 0)
547 return res;
548 } else if (strncmp(buf, "del=", 4) == 0) {
549 int res = del_wiphy(sc, buf + 4);
550 if (res < 0)
551 return res;
552 } else if (strncmp(buf, "pause=", 6) == 0) {
553 int res = pause_wiphy(sc, buf + 6);
554 if (res < 0)
555 return res;
556 } else if (strncmp(buf, "unpause=", 8) == 0) {
557 int res = unpause_wiphy(sc, buf + 8);
558 if (res < 0)
559 return res;
560 } else if (strncmp(buf, "select=", 7) == 0) {
561 int res = select_wiphy(sc, buf + 7);
562 if (res < 0)
563 return res;
564 } else if (strncmp(buf, "schedule=", 9) == 0) {
565 int res = schedule_wiphy(sc, buf + 9);
566 if (res < 0)
567 return res;
568 } else
569 return -EOPNOTSUPP;
570
571 return count;
572}
573
574static const struct file_operations fops_wiphy = { 479static const struct file_operations fops_wiphy = {
575 .read = read_file_wiphy, 480 .read = read_file_wiphy,
576 .write = write_file_wiphy,
577 .open = ath9k_debugfs_open, 481 .open = ath9k_debugfs_open,
578 .owner = THIS_MODULE, 482 .owner = THIS_MODULE,
579 .llseek = default_llseek, 483 .llseek = default_llseek,
580}; 484};
581 485
486#define PR_QNUM(_n) sc->tx.txq_map[_n]->axq_qnum
582#define PR(str, elem) \ 487#define PR(str, elem) \
583 do { \ 488 do { \
584 len += snprintf(buf + len, size - len, \ 489 len += snprintf(buf + len, size - len, \
585 "%s%13u%11u%10u%10u\n", str, \ 490 "%s%13u%11u%10u%10u\n", str, \
586 sc->debug.stats.txstats[WME_AC_BE].elem, \ 491 sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].elem, \
587 sc->debug.stats.txstats[WME_AC_BK].elem, \ 492 sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].elem, \
588 sc->debug.stats.txstats[WME_AC_VI].elem, \ 493 sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].elem, \
589 sc->debug.stats.txstats[WME_AC_VO].elem); \ 494 sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].elem); \
495 if (len >= size) \
496 goto done; \
497} while(0)
498
499#define PRX(str, elem) \
500do { \
501 len += snprintf(buf + len, size - len, \
502 "%s%13u%11u%10u%10u\n", str, \
503 (unsigned int)(sc->tx.txq_map[WME_AC_BE]->elem), \
504 (unsigned int)(sc->tx.txq_map[WME_AC_BK]->elem), \
505 (unsigned int)(sc->tx.txq_map[WME_AC_VI]->elem), \
506 (unsigned int)(sc->tx.txq_map[WME_AC_VO]->elem)); \
507 if (len >= size) \
508 goto done; \
590} while(0) 509} while(0)
591 510
511#define PRQLE(str, elem) \
512do { \
513 len += snprintf(buf + len, size - len, \
514 "%s%13i%11i%10i%10i\n", str, \
515 list_empty(&sc->tx.txq_map[WME_AC_BE]->elem), \
516 list_empty(&sc->tx.txq_map[WME_AC_BK]->elem), \
517 list_empty(&sc->tx.txq_map[WME_AC_VI]->elem), \
518 list_empty(&sc->tx.txq_map[WME_AC_VO]->elem)); \
519 if (len >= size) \
520 goto done; \
521} while (0)
522
592static ssize_t read_file_xmit(struct file *file, char __user *user_buf, 523static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
593 size_t count, loff_t *ppos) 524 size_t count, loff_t *ppos)
594{ 525{
595 struct ath_softc *sc = file->private_data; 526 struct ath_softc *sc = file->private_data;
596 char *buf; 527 char *buf;
597 unsigned int len = 0, size = 2048; 528 unsigned int len = 0, size = 8000;
529 int i;
598 ssize_t retval = 0; 530 ssize_t retval = 0;
531 char tmp[32];
599 532
600 buf = kzalloc(size, GFP_KERNEL); 533 buf = kzalloc(size, GFP_KERNEL);
601 if (buf == NULL) 534 if (buf == NULL)
602 return -ENOMEM; 535 return -ENOMEM;
603 536
604 len += sprintf(buf, "%30s %10s%10s%10s\n\n", "BE", "BK", "VI", "VO"); 537 len += sprintf(buf, "Num-Tx-Queues: %i tx-queues-setup: 0x%x"
538 " poll-work-seen: %u\n"
539 "%30s %10s%10s%10s\n\n",
540 ATH9K_NUM_TX_QUEUES, sc->tx.txqsetup,
541 sc->tx_complete_poll_work_seen,
542 "BE", "BK", "VI", "VO");
605 543
606 PR("MPDUs Queued: ", queued); 544 PR("MPDUs Queued: ", queued);
607 PR("MPDUs Completed: ", completed); 545 PR("MPDUs Completed: ", completed);
608 PR("Aggregates: ", a_aggr); 546 PR("Aggregates: ", a_aggr);
609 PR("AMPDUs Queued: ", a_queued); 547 PR("AMPDUs Queued HW:", a_queued_hw);
548 PR("AMPDUs Queued SW:", a_queued_sw);
610 PR("AMPDUs Completed:", a_completed); 549 PR("AMPDUs Completed:", a_completed);
611 PR("AMPDUs Retried: ", a_retries); 550 PR("AMPDUs Retried: ", a_retries);
612 PR("AMPDUs XRetried: ", a_xretries); 551 PR("AMPDUs XRetried: ", a_xretries);
@@ -618,6 +557,223 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
618 PR("DELIM Underrun: ", delim_underrun); 557 PR("DELIM Underrun: ", delim_underrun);
619 PR("TX-Pkts-All: ", tx_pkts_all); 558 PR("TX-Pkts-All: ", tx_pkts_all);
620 PR("TX-Bytes-All: ", tx_bytes_all); 559 PR("TX-Bytes-All: ", tx_bytes_all);
560 PR("hw-put-tx-buf: ", puttxbuf);
561 PR("hw-tx-start: ", txstart);
562 PR("hw-tx-proc-desc: ", txprocdesc);
563 len += snprintf(buf + len, size - len,
564 "%s%11p%11p%10p%10p\n", "txq-memory-address:",
565 sc->tx.txq_map[WME_AC_BE],
566 sc->tx.txq_map[WME_AC_BK],
567 sc->tx.txq_map[WME_AC_VI],
568 sc->tx.txq_map[WME_AC_VO]);
569 if (len >= size)
570 goto done;
571
572 PRX("axq-qnum: ", axq_qnum);
573 PRX("axq-depth: ", axq_depth);
574 PRX("axq-ampdu_depth: ", axq_ampdu_depth);
575 PRX("axq-stopped ", stopped);
576 PRX("tx-in-progress ", axq_tx_inprogress);
577 PRX("pending-frames ", pending_frames);
578 PRX("txq_headidx: ", txq_headidx);
579 PRX("txq_tailidx: ", txq_headidx);
580
581 PRQLE("axq_q empty: ", axq_q);
582 PRQLE("axq_acq empty: ", axq_acq);
583 PRQLE("txq_fifo_pending: ", txq_fifo_pending);
584 for (i = 0; i < ATH_TXFIFO_DEPTH; i++) {
585 snprintf(tmp, sizeof(tmp) - 1, "txq_fifo[%i] empty: ", i);
586 PRQLE(tmp, txq_fifo[i]);
587 }
588
589 /* Print out more detailed queue-info */
590 for (i = 0; i <= WME_AC_BK; i++) {
591 struct ath_txq *txq = &(sc->tx.txq[i]);
592 struct ath_atx_ac *ac;
593 struct ath_atx_tid *tid;
594 if (len >= size)
595 goto done;
596 spin_lock_bh(&txq->axq_lock);
597 if (!list_empty(&txq->axq_acq)) {
598 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac,
599 list);
600 len += snprintf(buf + len, size - len,
601 "txq[%i] first-ac: %p sched: %i\n",
602 i, ac, ac->sched);
603 if (list_empty(&ac->tid_q) || (len >= size))
604 goto done_for;
605 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
606 list);
607 len += snprintf(buf + len, size - len,
608 " first-tid: %p sched: %i paused: %i\n",
609 tid, tid->sched, tid->paused);
610 }
611 done_for:
612 spin_unlock_bh(&txq->axq_lock);
613 }
614
615done:
616 if (len > size)
617 len = size;
618
619 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
620 kfree(buf);
621
622 return retval;
623}
624
625static ssize_t read_file_stations(struct file *file, char __user *user_buf,
626 size_t count, loff_t *ppos)
627{
628 struct ath_softc *sc = file->private_data;
629 char *buf;
630 unsigned int len = 0, size = 64000;
631 struct ath_node *an = NULL;
632 ssize_t retval = 0;
633 int q;
634
635 buf = kzalloc(size, GFP_KERNEL);
636 if (buf == NULL)
637 return -ENOMEM;
638
639 len += snprintf(buf + len, size - len,
640 "Stations:\n"
641 " tid: addr sched paused buf_q-empty an ac\n"
642 " ac: addr sched tid_q-empty txq\n");
643
644 spin_lock(&sc->nodes_lock);
645 list_for_each_entry(an, &sc->nodes, list) {
646 len += snprintf(buf + len, size - len,
647 "%pM\n", an->sta->addr);
648 if (len >= size)
649 goto done;
650
651 for (q = 0; q < WME_NUM_TID; q++) {
652 struct ath_atx_tid *tid = &(an->tid[q]);
653 len += snprintf(buf + len, size - len,
654 " tid: %p %s %s %i %p %p\n",
655 tid, tid->sched ? "sched" : "idle",
656 tid->paused ? "paused" : "running",
657 list_empty(&tid->buf_q),
658 tid->an, tid->ac);
659 if (len >= size)
660 goto done;
661 }
662
663 for (q = 0; q < WME_NUM_AC; q++) {
664 struct ath_atx_ac *ac = &(an->ac[q]);
665 len += snprintf(buf + len, size - len,
666 " ac: %p %s %i %p\n",
667 ac, ac->sched ? "sched" : "idle",
668 list_empty(&ac->tid_q), ac->txq);
669 if (len >= size)
670 goto done;
671 }
672 }
673
674done:
675 spin_unlock(&sc->nodes_lock);
676 if (len > size)
677 len = size;
678
679 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
680 kfree(buf);
681
682 return retval;
683}
684
685static ssize_t read_file_misc(struct file *file, char __user *user_buf,
686 size_t count, loff_t *ppos)
687{
688 struct ath_softc *sc = file->private_data;
689 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
690 struct ath_hw *ah = sc->sc_ah;
691 struct ieee80211_hw *hw = sc->hw;
692 char *buf;
693 unsigned int len = 0, size = 8000;
694 ssize_t retval = 0;
695 const char *tmp;
696 unsigned int reg;
697 struct ath9k_vif_iter_data iter_data;
698
699 ath9k_calculate_iter_data(hw, NULL, &iter_data);
700
701 buf = kzalloc(size, GFP_KERNEL);
702 if (buf == NULL)
703 return -ENOMEM;
704
705 switch (sc->sc_ah->opmode) {
706 case NL80211_IFTYPE_ADHOC:
707 tmp = "ADHOC";
708 break;
709 case NL80211_IFTYPE_MESH_POINT:
710 tmp = "MESH";
711 break;
712 case NL80211_IFTYPE_AP:
713 tmp = "AP";
714 break;
715 case NL80211_IFTYPE_STATION:
716 tmp = "STATION";
717 break;
718 default:
719 tmp = "???";
720 break;
721 }
722
723 len += snprintf(buf + len, size - len,
724 "curbssid: %pM\n"
725 "OP-Mode: %s(%i)\n"
726 "Beacon-Timer-Register: 0x%x\n",
727 common->curbssid,
728 tmp, (int)(sc->sc_ah->opmode),
729 REG_READ(ah, AR_BEACON_PERIOD));
730
731 reg = REG_READ(ah, AR_TIMER_MODE);
732 len += snprintf(buf + len, size - len, "Timer-Mode-Register: 0x%x (",
733 reg);
734 if (reg & AR_TBTT_TIMER_EN)
735 len += snprintf(buf + len, size - len, "TBTT ");
736 if (reg & AR_DBA_TIMER_EN)
737 len += snprintf(buf + len, size - len, "DBA ");
738 if (reg & AR_SWBA_TIMER_EN)
739 len += snprintf(buf + len, size - len, "SWBA ");
740 if (reg & AR_HCF_TIMER_EN)
741 len += snprintf(buf + len, size - len, "HCF ");
742 if (reg & AR_TIM_TIMER_EN)
743 len += snprintf(buf + len, size - len, "TIM ");
744 if (reg & AR_DTIM_TIMER_EN)
745 len += snprintf(buf + len, size - len, "DTIM ");
746 len += snprintf(buf + len, size - len, ")\n");
747
748 reg = sc->sc_ah->imask;
749 len += snprintf(buf + len, size - len, "imask: 0x%x (", reg);
750 if (reg & ATH9K_INT_SWBA)
751 len += snprintf(buf + len, size - len, "SWBA ");
752 if (reg & ATH9K_INT_BMISS)
753 len += snprintf(buf + len, size - len, "BMISS ");
754 if (reg & ATH9K_INT_CST)
755 len += snprintf(buf + len, size - len, "CST ");
756 if (reg & ATH9K_INT_RX)
757 len += snprintf(buf + len, size - len, "RX ");
758 if (reg & ATH9K_INT_RXHP)
759 len += snprintf(buf + len, size - len, "RXHP ");
760 if (reg & ATH9K_INT_RXLP)
761 len += snprintf(buf + len, size - len, "RXLP ");
762 if (reg & ATH9K_INT_BB_WATCHDOG)
763 len += snprintf(buf + len, size - len, "BB_WATCHDOG ");
764 /* there are other IRQs if one wanted to add them. */
765 len += snprintf(buf + len, size - len, ")\n");
766
767 len += snprintf(buf + len, size - len,
768 "VIF Counts: AP: %i STA: %i MESH: %i WDS: %i"
769 " ADHOC: %i OTHER: %i nvifs: %hi beacon-vifs: %hi\n",
770 iter_data.naps, iter_data.nstations, iter_data.nmeshes,
771 iter_data.nwds, iter_data.nadhocs, iter_data.nothers,
772 sc->nvifs, sc->nbcnvifs);
773
774 len += snprintf(buf + len, size - len,
775 "Calculated-BSSID-Mask: %pM\n",
776 iter_data.mask);
621 777
622 if (len > size) 778 if (len > size)
623 len = size; 779 len = size;
@@ -629,9 +785,9 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
629} 785}
630 786
631void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf, 787void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
632 struct ath_tx_status *ts) 788 struct ath_tx_status *ts, struct ath_txq *txq)
633{ 789{
634 int qnum = skb_get_queue_mapping(bf->bf_mpdu); 790 int qnum = txq->axq_qnum;
635 791
636 TX_STAT_INC(qnum, tx_pkts_all); 792 TX_STAT_INC(qnum, tx_pkts_all);
637 sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len; 793 sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len;
@@ -666,6 +822,20 @@ static const struct file_operations fops_xmit = {
666 .llseek = default_llseek, 822 .llseek = default_llseek,
667}; 823};
668 824
825static const struct file_operations fops_stations = {
826 .read = read_file_stations,
827 .open = ath9k_debugfs_open,
828 .owner = THIS_MODULE,
829 .llseek = default_llseek,
830};
831
832static const struct file_operations fops_misc = {
833 .read = read_file_misc,
834 .open = ath9k_debugfs_open,
835 .owner = THIS_MODULE,
836 .llseek = default_llseek,
837};
838
669static ssize_t read_file_recv(struct file *file, char __user *user_buf, 839static ssize_t read_file_recv(struct file *file, char __user *user_buf,
670 size_t count, loff_t *ppos) 840 size_t count, loff_t *ppos)
671{ 841{
@@ -871,6 +1041,42 @@ static const struct file_operations fops_regval = {
871 .llseek = default_llseek, 1041 .llseek = default_llseek,
872}; 1042};
873 1043
1044#define REGDUMP_LINE_SIZE 20
1045
1046static int open_file_regdump(struct inode *inode, struct file *file)
1047{
1048 struct ath_softc *sc = inode->i_private;
1049 unsigned int len = 0;
1050 u8 *buf;
1051 int i;
1052 unsigned long num_regs, regdump_len, max_reg_offset;
1053
1054 max_reg_offset = AR_SREV_9300_20_OR_LATER(sc->sc_ah) ? 0x16bd4 : 0xb500;
1055 num_regs = max_reg_offset / 4 + 1;
1056 regdump_len = num_regs * REGDUMP_LINE_SIZE + 1;
1057 buf = vmalloc(regdump_len);
1058 if (!buf)
1059 return -ENOMEM;
1060
1061 ath9k_ps_wakeup(sc);
1062 for (i = 0; i < num_regs; i++)
1063 len += scnprintf(buf + len, regdump_len - len,
1064 "0x%06x 0x%08x\n", i << 2, REG_READ(sc->sc_ah, i << 2));
1065 ath9k_ps_restore(sc);
1066
1067 file->private_data = buf;
1068
1069 return 0;
1070}
1071
1072static const struct file_operations fops_regdump = {
1073 .open = open_file_regdump,
1074 .read = ath9k_debugfs_read_buf,
1075 .release = ath9k_debugfs_release_buf,
1076 .owner = THIS_MODULE,
1077 .llseek = default_llseek,/* read accesses f_pos */
1078};
1079
874int ath9k_init_debug(struct ath_hw *ah) 1080int ath9k_init_debug(struct ath_hw *ah)
875{ 1081{
876 struct ath_common *common = ath9k_hw_common(ah); 1082 struct ath_common *common = ath9k_hw_common(ah);
@@ -903,6 +1109,14 @@ int ath9k_init_debug(struct ath_hw *ah)
903 sc, &fops_xmit)) 1109 sc, &fops_xmit))
904 goto err; 1110 goto err;
905 1111
1112 if (!debugfs_create_file("stations", S_IRUSR, sc->debug.debugfs_phy,
1113 sc, &fops_stations))
1114 goto err;
1115
1116 if (!debugfs_create_file("misc", S_IRUSR, sc->debug.debugfs_phy,
1117 sc, &fops_misc))
1118 goto err;
1119
906 if (!debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy, 1120 if (!debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy,
907 sc, &fops_recv)) 1121 sc, &fops_recv))
908 goto err; 1122 goto err;
@@ -927,6 +1141,10 @@ int ath9k_init_debug(struct ath_hw *ah)
927 sc->debug.debugfs_phy, &ah->config.cwm_ignore_extcca)) 1141 sc->debug.debugfs_phy, &ah->config.cwm_ignore_extcca))
928 goto err; 1142 goto err;
929 1143
1144 if (!debugfs_create_file("regdump", S_IRUSR, sc->debug.debugfs_phy,
1145 sc, &fops_regdump))
1146 goto err;
1147
930 sc->debug.regidx = 0; 1148 sc->debug.regidx = 0;
931 return 0; 1149 return 0;
932err: 1150err:
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 1e5078bd0344..59338de0ce19 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -89,7 +89,8 @@ struct ath_interrupt_stats {
89 * @queued: Total MPDUs (non-aggr) queued 89 * @queued: Total MPDUs (non-aggr) queued
90 * @completed: Total MPDUs (non-aggr) completed 90 * @completed: Total MPDUs (non-aggr) completed
91 * @a_aggr: Total no. of aggregates queued 91 * @a_aggr: Total no. of aggregates queued
92 * @a_queued: Total AMPDUs queued 92 * @a_queued_hw: Total AMPDUs queued to hardware
93 * @a_queued_sw: Total AMPDUs queued to software queues
93 * @a_completed: Total AMPDUs completed 94 * @a_completed: Total AMPDUs completed
94 * @a_retries: No. of AMPDUs retried (SW) 95 * @a_retries: No. of AMPDUs retried (SW)
95 * @a_xretries: No. of AMPDUs dropped due to xretries 96 * @a_xretries: No. of AMPDUs dropped due to xretries
@@ -102,6 +103,9 @@ struct ath_interrupt_stats {
102 * @desc_cfg_err: Descriptor configuration errors 103 * @desc_cfg_err: Descriptor configuration errors
103 * @data_urn: TX data underrun errors 104 * @data_urn: TX data underrun errors
104 * @delim_urn: TX delimiter underrun errors 105 * @delim_urn: TX delimiter underrun errors
106 * @puttxbuf: Number of times hardware was given txbuf to write.
107 * @txstart: Number of times hardware was told to start tx.
108 * @txprocdesc: Number of times tx descriptor was processed
105 */ 109 */
106struct ath_tx_stats { 110struct ath_tx_stats {
107 u32 tx_pkts_all; 111 u32 tx_pkts_all;
@@ -109,7 +113,8 @@ struct ath_tx_stats {
109 u32 queued; 113 u32 queued;
110 u32 completed; 114 u32 completed;
111 u32 a_aggr; 115 u32 a_aggr;
112 u32 a_queued; 116 u32 a_queued_hw;
117 u32 a_queued_sw;
113 u32 a_completed; 118 u32 a_completed;
114 u32 a_retries; 119 u32 a_retries;
115 u32 a_xretries; 120 u32 a_xretries;
@@ -119,6 +124,9 @@ struct ath_tx_stats {
119 u32 desc_cfg_err; 124 u32 desc_cfg_err;
120 u32 data_underrun; 125 u32 data_underrun;
121 u32 delim_underrun; 126 u32 delim_underrun;
127 u32 puttxbuf;
128 u32 txstart;
129 u32 txprocdesc;
122}; 130};
123 131
124/** 132/**
@@ -167,7 +175,7 @@ int ath9k_init_debug(struct ath_hw *ah);
167 175
168void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status); 176void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
169void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf, 177void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
170 struct ath_tx_status *ts); 178 struct ath_tx_status *ts, struct ath_txq *txq);
171void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs); 179void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs);
172 180
173#else 181#else
@@ -184,7 +192,8 @@ static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
184 192
185static inline void ath_debug_stat_tx(struct ath_softc *sc, 193static inline void ath_debug_stat_tx(struct ath_softc *sc,
186 struct ath_buf *bf, 194 struct ath_buf *bf,
187 struct ath_tx_status *ts) 195 struct ath_tx_status *ts,
196 struct ath_txq *txq)
188{ 197{
189} 198}
190 199
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index d05163159572..8c18bed3a558 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -89,6 +89,38 @@ bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize,
89 return false; 89 return false;
90} 90}
91 91
92void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data,
93 int eep_start_loc, int size)
94{
95 int i = 0, j, addr;
96 u32 addrdata[8];
97 u32 data[8];
98
99 for (addr = 0; addr < size; addr++) {
100 addrdata[i] = AR5416_EEPROM_OFFSET +
101 ((addr + eep_start_loc) << AR5416_EEPROM_S);
102 i++;
103 if (i == 8) {
104 REG_READ_MULTI(ah, addrdata, data, i);
105
106 for (j = 0; j < i; j++) {
107 *eep_data = data[j];
108 eep_data++;
109 }
110 i = 0;
111 }
112 }
113
114 if (i != 0) {
115 REG_READ_MULTI(ah, addrdata, data, i);
116
117 for (j = 0; j < i; j++) {
118 *eep_data = data[j];
119 eep_data++;
120 }
121 }
122}
123
92bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data) 124bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data)
93{ 125{
94 return common->bus_ops->eeprom_read(common, off, data); 126 return common->bus_ops->eeprom_read(common, off, data);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 58e2ddc927a9..bd82447f5b78 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -665,6 +665,8 @@ int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight,
665bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize, 665bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize,
666 u16 *indexL, u16 *indexR); 666 u16 *indexL, u16 *indexR);
667bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data); 667bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data);
668void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data,
669 int eep_start_loc, int size);
668void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList, 670void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList,
669 u8 *pVpdList, u16 numIntercepts, 671 u8 *pVpdList, u16 numIntercepts,
670 u8 *pRetVpdList); 672 u8 *pRetVpdList);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index fbdff7e47952..bc77a308c901 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -27,19 +27,13 @@ static int ath9k_hw_4k_get_eeprom_rev(struct ath_hw *ah)
27 return ((ah->eeprom.map4k.baseEepHeader.version) & 0xFFF); 27 return ((ah->eeprom.map4k.baseEepHeader.version) & 0xFFF);
28} 28}
29 29
30static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
31{
32#define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) 30#define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
31
32static bool __ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
33{
33 struct ath_common *common = ath9k_hw_common(ah); 34 struct ath_common *common = ath9k_hw_common(ah);
34 u16 *eep_data = (u16 *)&ah->eeprom.map4k; 35 u16 *eep_data = (u16 *)&ah->eeprom.map4k;
35 int addr, eep_start_loc = 0; 36 int addr, eep_start_loc = 64;
36
37 eep_start_loc = 64;
38
39 if (!ath9k_hw_use_flash(ah)) {
40 ath_dbg(common, ATH_DBG_EEPROM,
41 "Reading from EEPROM, not flash\n");
42 }
43 37
44 for (addr = 0; addr < SIZE_EEPROM_4K; addr++) { 38 for (addr = 0; addr < SIZE_EEPROM_4K; addr++) {
45 if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) { 39 if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) {
@@ -51,9 +45,34 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
51 } 45 }
52 46
53 return true; 47 return true;
54#undef SIZE_EEPROM_4K
55} 48}
56 49
50static bool __ath9k_hw_usb_4k_fill_eeprom(struct ath_hw *ah)
51{
52 u16 *eep_data = (u16 *)&ah->eeprom.map4k;
53
54 ath9k_hw_usb_gen_fill_eeprom(ah, eep_data, 64, SIZE_EEPROM_4K);
55
56 return true;
57}
58
59static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
60{
61 struct ath_common *common = ath9k_hw_common(ah);
62
63 if (!ath9k_hw_use_flash(ah)) {
64 ath_dbg(common, ATH_DBG_EEPROM,
65 "Reading from EEPROM, not flash\n");
66 }
67
68 if (common->bus_ops->ath_bus_type == ATH_USB)
69 return __ath9k_hw_usb_4k_fill_eeprom(ah);
70 else
71 return __ath9k_hw_4k_fill_eeprom(ah);
72}
73
74#undef SIZE_EEPROM_4K
75
57static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah) 76static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
58{ 77{
59#define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) 78#define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 9b6bc8a953bc..8cd8333cc086 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -17,7 +17,7 @@
17#include "hw.h" 17#include "hw.h"
18#include "ar9002_phy.h" 18#include "ar9002_phy.h"
19 19
20#define NUM_EEP_WORDS (sizeof(struct ar9287_eeprom) / sizeof(u16)) 20#define SIZE_EEPROM_AR9287 (sizeof(struct ar9287_eeprom) / sizeof(u16))
21 21
22static int ath9k_hw_ar9287_get_eeprom_ver(struct ath_hw *ah) 22static int ath9k_hw_ar9287_get_eeprom_ver(struct ath_hw *ah)
23{ 23{
@@ -29,25 +29,15 @@ static int ath9k_hw_ar9287_get_eeprom_rev(struct ath_hw *ah)
29 return (ah->eeprom.map9287.baseEepHeader.version) & 0xFFF; 29 return (ah->eeprom.map9287.baseEepHeader.version) & 0xFFF;
30} 30}
31 31
32static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah) 32static bool __ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
33{ 33{
34 struct ar9287_eeprom *eep = &ah->eeprom.map9287; 34 struct ar9287_eeprom *eep = &ah->eeprom.map9287;
35 struct ath_common *common = ath9k_hw_common(ah); 35 struct ath_common *common = ath9k_hw_common(ah);
36 u16 *eep_data; 36 u16 *eep_data;
37 int addr, eep_start_loc; 37 int addr, eep_start_loc = AR9287_EEP_START_LOC;
38 eep_data = (u16 *)eep; 38 eep_data = (u16 *)eep;
39 39
40 if (common->bus_ops->ath_bus_type == ATH_USB) 40 for (addr = 0; addr < SIZE_EEPROM_AR9287; addr++) {
41 eep_start_loc = AR9287_HTC_EEP_START_LOC;
42 else
43 eep_start_loc = AR9287_EEP_START_LOC;
44
45 if (!ath9k_hw_use_flash(ah)) {
46 ath_dbg(common, ATH_DBG_EEPROM,
47 "Reading from EEPROM, not flash\n");
48 }
49
50 for (addr = 0; addr < NUM_EEP_WORDS; addr++) {
51 if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, 41 if (!ath9k_hw_nvram_read(common, addr + eep_start_loc,
52 eep_data)) { 42 eep_data)) {
53 ath_dbg(common, ATH_DBG_EEPROM, 43 ath_dbg(common, ATH_DBG_EEPROM,
@@ -60,6 +50,31 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
60 return true; 50 return true;
61} 51}
62 52
53static bool __ath9k_hw_usb_ar9287_fill_eeprom(struct ath_hw *ah)
54{
55 u16 *eep_data = (u16 *)&ah->eeprom.map9287;
56
57 ath9k_hw_usb_gen_fill_eeprom(ah, eep_data,
58 AR9287_HTC_EEP_START_LOC,
59 SIZE_EEPROM_AR9287);
60 return true;
61}
62
63static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
64{
65 struct ath_common *common = ath9k_hw_common(ah);
66
67 if (!ath9k_hw_use_flash(ah)) {
68 ath_dbg(common, ATH_DBG_EEPROM,
69 "Reading from EEPROM, not flash\n");
70 }
71
72 if (common->bus_ops->ath_bus_type == ATH_USB)
73 return __ath9k_hw_usb_ar9287_fill_eeprom(ah);
74 else
75 return __ath9k_hw_ar9287_fill_eeprom(ah);
76}
77
63static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah) 78static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
64{ 79{
65 u32 sum = 0, el, integer; 80 u32 sum = 0, el, integer;
@@ -86,7 +101,7 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
86 need_swap = true; 101 need_swap = true;
87 eepdata = (u16 *)(&ah->eeprom); 102 eepdata = (u16 *)(&ah->eeprom);
88 103
89 for (addr = 0; addr < NUM_EEP_WORDS; addr++) { 104 for (addr = 0; addr < SIZE_EEPROM_AR9287; addr++) {
90 temp = swab16(*eepdata); 105 temp = swab16(*eepdata);
91 *eepdata = temp; 106 *eepdata = temp;
92 eepdata++; 107 eepdata++;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 749a93608664..fccd87df7300 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -86,9 +86,10 @@ static int ath9k_hw_def_get_eeprom_rev(struct ath_hw *ah)
86 return ((ah->eeprom.def.baseEepHeader.version) & 0xFFF); 86 return ((ah->eeprom.def.baseEepHeader.version) & 0xFFF);
87} 87}
88 88
89static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
90{
91#define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16)) 89#define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16))
90
91static bool __ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
92{
92 struct ath_common *common = ath9k_hw_common(ah); 93 struct ath_common *common = ath9k_hw_common(ah);
93 u16 *eep_data = (u16 *)&ah->eeprom.def; 94 u16 *eep_data = (u16 *)&ah->eeprom.def;
94 int addr, ar5416_eep_start_loc = 0x100; 95 int addr, ar5416_eep_start_loc = 0x100;
@@ -103,9 +104,34 @@ static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
103 eep_data++; 104 eep_data++;
104 } 105 }
105 return true; 106 return true;
106#undef SIZE_EEPROM_DEF
107} 107}
108 108
109static bool __ath9k_hw_usb_def_fill_eeprom(struct ath_hw *ah)
110{
111 u16 *eep_data = (u16 *)&ah->eeprom.def;
112
113 ath9k_hw_usb_gen_fill_eeprom(ah, eep_data,
114 0x100, SIZE_EEPROM_DEF);
115 return true;
116}
117
118static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
119{
120 struct ath_common *common = ath9k_hw_common(ah);
121
122 if (!ath9k_hw_use_flash(ah)) {
123 ath_dbg(common, ATH_DBG_EEPROM,
124 "Reading from EEPROM, not flash\n");
125 }
126
127 if (common->bus_ops->ath_bus_type == ATH_USB)
128 return __ath9k_hw_usb_def_fill_eeprom(ah);
129 else
130 return __ath9k_hw_def_fill_eeprom(ah);
131}
132
133#undef SIZE_EEPROM_DEF
134
109static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) 135static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
110{ 136{
111 struct ar5416_eeprom_def *eep = 137 struct ar5416_eeprom_def *eep =
@@ -221,9 +247,9 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
221 } 247 }
222 248
223 /* Enable fixup for AR_AN_TOP2 if necessary */ 249 /* Enable fixup for AR_AN_TOP2 if necessary */
224 if (AR_SREV_9280_20_OR_LATER(ah) && 250 if ((ah->hw_version.devid == AR9280_DEVID_PCI) &&
225 (eep->baseEepHeader.version & 0xff) > 0x0a && 251 ((eep->baseEepHeader.version & 0xff) > 0x0a) &&
226 eep->baseEepHeader.pwdclkind == 0) 252 (eep->baseEepHeader.pwdclkind == 0))
227 ah->need_an_top2_fixup = 1; 253 ah->need_an_top2_fixup = 1;
228 254
229 if ((common->bus_ops->ath_bus_type == ATH_USB) && 255 if ((common->bus_ops->ath_bus_type == ATH_USB) &&
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 133764069246..0fb8f8ac275a 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -20,121 +20,31 @@
20/* LED functions */ 20/* LED functions */
21/********************************/ 21/********************************/
22 22
23static void ath_led_blink_work(struct work_struct *work) 23#ifdef CONFIG_MAC80211_LEDS
24{
25 struct ath_softc *sc = container_of(work, struct ath_softc,
26 ath_led_blink_work.work);
27
28 if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
29 return;
30
31 if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
32 (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
33 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
34 else
35 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
36 (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
37
38 ieee80211_queue_delayed_work(sc->hw,
39 &sc->ath_led_blink_work,
40 (sc->sc_flags & SC_OP_LED_ON) ?
41 msecs_to_jiffies(sc->led_off_duration) :
42 msecs_to_jiffies(sc->led_on_duration));
43
44 sc->led_on_duration = sc->led_on_cnt ?
45 max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) :
46 ATH_LED_ON_DURATION_IDLE;
47 sc->led_off_duration = sc->led_off_cnt ?
48 max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) :
49 ATH_LED_OFF_DURATION_IDLE;
50 sc->led_on_cnt = sc->led_off_cnt = 0;
51 if (sc->sc_flags & SC_OP_LED_ON)
52 sc->sc_flags &= ~SC_OP_LED_ON;
53 else
54 sc->sc_flags |= SC_OP_LED_ON;
55}
56
57static void ath_led_brightness(struct led_classdev *led_cdev, 24static void ath_led_brightness(struct led_classdev *led_cdev,
58 enum led_brightness brightness) 25 enum led_brightness brightness)
59{ 26{
60 struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev); 27 struct ath_softc *sc = container_of(led_cdev, struct ath_softc, led_cdev);
61 struct ath_softc *sc = led->sc; 28 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, (brightness == LED_OFF));
62
63 switch (brightness) {
64 case LED_OFF:
65 if (led->led_type == ATH_LED_ASSOC ||
66 led->led_type == ATH_LED_RADIO) {
67 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
68 (led->led_type == ATH_LED_RADIO));
69 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
70 if (led->led_type == ATH_LED_RADIO)
71 sc->sc_flags &= ~SC_OP_LED_ON;
72 } else {
73 sc->led_off_cnt++;
74 }
75 break;
76 case LED_FULL:
77 if (led->led_type == ATH_LED_ASSOC) {
78 sc->sc_flags |= SC_OP_LED_ASSOCIATED;
79 if (led_blink)
80 ieee80211_queue_delayed_work(sc->hw,
81 &sc->ath_led_blink_work, 0);
82 } else if (led->led_type == ATH_LED_RADIO) {
83 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
84 sc->sc_flags |= SC_OP_LED_ON;
85 } else {
86 sc->led_on_cnt++;
87 }
88 break;
89 default:
90 break;
91 }
92}
93
94static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
95 char *trigger)
96{
97 int ret;
98
99 led->sc = sc;
100 led->led_cdev.name = led->name;
101 led->led_cdev.default_trigger = trigger;
102 led->led_cdev.brightness_set = ath_led_brightness;
103
104 ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
105 if (ret)
106 ath_err(ath9k_hw_common(sc->sc_ah),
107 "Failed to register led:%s", led->name);
108 else
109 led->registered = 1;
110 return ret;
111}
112
113static void ath_unregister_led(struct ath_led *led)
114{
115 if (led->registered) {
116 led_classdev_unregister(&led->led_cdev);
117 led->registered = 0;
118 }
119} 29}
120 30
121void ath_deinit_leds(struct ath_softc *sc) 31void ath_deinit_leds(struct ath_softc *sc)
122{ 32{
123 ath_unregister_led(&sc->assoc_led); 33 if (!sc->led_registered)
124 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED; 34 return;
125 ath_unregister_led(&sc->tx_led); 35
126 ath_unregister_led(&sc->rx_led); 36 ath_led_brightness(&sc->led_cdev, LED_OFF);
127 ath_unregister_led(&sc->radio_led); 37 led_classdev_unregister(&sc->led_cdev);
128 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
129} 38}
130 39
131void ath_init_leds(struct ath_softc *sc) 40void ath_init_leds(struct ath_softc *sc)
132{ 41{
133 char *trigger;
134 int ret; 42 int ret;
135 43
136 if (AR_SREV_9287(sc->sc_ah)) 44 if (AR_SREV_9287(sc->sc_ah))
137 sc->sc_ah->led_pin = ATH_LED_PIN_9287; 45 sc->sc_ah->led_pin = ATH_LED_PIN_9287;
46 else if (AR_SREV_9485(sc->sc_ah))
47 sc->sc_ah->led_pin = ATH_LED_PIN_9485;
138 else 48 else
139 sc->sc_ah->led_pin = ATH_LED_PIN_DEF; 49 sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
140 50
@@ -144,48 +54,22 @@ void ath_init_leds(struct ath_softc *sc)
144 /* LED off, active low */ 54 /* LED off, active low */
145 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1); 55 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
146 56
147 if (led_blink) 57 if (!led_blink)
148 INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work); 58 sc->led_cdev.default_trigger =
149 59 ieee80211_get_radio_led_name(sc->hw);
150 trigger = ieee80211_get_radio_led_name(sc->hw); 60
151 snprintf(sc->radio_led.name, sizeof(sc->radio_led.name), 61 snprintf(sc->led_name, sizeof(sc->led_name),
152 "ath9k-%s::radio", wiphy_name(sc->hw->wiphy)); 62 "ath9k-%s", wiphy_name(sc->hw->wiphy));
153 ret = ath_register_led(sc, &sc->radio_led, trigger); 63 sc->led_cdev.name = sc->led_name;
154 sc->radio_led.led_type = ATH_LED_RADIO; 64 sc->led_cdev.brightness_set = ath_led_brightness;
155 if (ret) 65
156 goto fail; 66 ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &sc->led_cdev);
157 67 if (ret < 0)
158 trigger = ieee80211_get_assoc_led_name(sc->hw); 68 return;
159 snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name), 69
160 "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy)); 70 sc->led_registered = true;
161 ret = ath_register_led(sc, &sc->assoc_led, trigger);
162 sc->assoc_led.led_type = ATH_LED_ASSOC;
163 if (ret)
164 goto fail;
165
166 trigger = ieee80211_get_tx_led_name(sc->hw);
167 snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
168 "ath9k-%s::tx", wiphy_name(sc->hw->wiphy));
169 ret = ath_register_led(sc, &sc->tx_led, trigger);
170 sc->tx_led.led_type = ATH_LED_TX;
171 if (ret)
172 goto fail;
173
174 trigger = ieee80211_get_rx_led_name(sc->hw);
175 snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
176 "ath9k-%s::rx", wiphy_name(sc->hw->wiphy));
177 ret = ath_register_led(sc, &sc->rx_led, trigger);
178 sc->rx_led.led_type = ATH_LED_RX;
179 if (ret)
180 goto fail;
181
182 return;
183
184fail:
185 if (led_blink)
186 cancel_delayed_work_sync(&sc->ath_led_blink_work);
187 ath_deinit_leds(sc);
188} 71}
72#endif
189 73
190/*******************/ 74/*******************/
191/* Rfkill */ 75/* Rfkill */
@@ -201,8 +85,7 @@ static bool ath_is_rfkill_set(struct ath_softc *sc)
201 85
202void ath9k_rfkill_poll_state(struct ieee80211_hw *hw) 86void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
203{ 87{
204 struct ath_wiphy *aphy = hw->priv; 88 struct ath_softc *sc = hw->priv;
205 struct ath_softc *sc = aphy->sc;
206 bool blocked = !!ath_is_rfkill_set(sc); 89 bool blocked = !!ath_is_rfkill_set(sc);
207 90
208 wiphy_rfkill_set_hw_state(hw->wiphy, blocked); 91 wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 07b1633b7f3f..f1b8af64569c 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -52,6 +52,9 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
52 { USB_DEVICE(0x083A, 0xA704), 52 { USB_DEVICE(0x083A, 0xA704),
53 .driver_info = AR9280_USB }, /* SMC Networks */ 53 .driver_info = AR9280_USB }, /* SMC Networks */
54 54
55 { USB_DEVICE(0x0cf3, 0x20ff),
56 .driver_info = STORAGE_DEVICE },
57
55 { }, 58 { },
56}; 59};
57 60
@@ -914,13 +917,11 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev, u32 drv_info)
914 if (ret) { 917 if (ret) {
915 dev_err(&hif_dev->udev->dev, 918 dev_err(&hif_dev->udev->dev,
916 "ath9k_htc: Unable to allocate URBs\n"); 919 "ath9k_htc: Unable to allocate URBs\n");
917 goto err_urb; 920 goto err_fw_download;
918 } 921 }
919 922
920 return 0; 923 return 0;
921 924
922err_urb:
923 ath9k_hif_usb_dealloc_urbs(hif_dev);
924err_fw_download: 925err_fw_download:
925 release_firmware(hif_dev->firmware); 926 release_firmware(hif_dev->firmware);
926err_fw_req: 927err_fw_req:
@@ -935,6 +936,61 @@ static void ath9k_hif_usb_dev_deinit(struct hif_device_usb *hif_dev)
935 release_firmware(hif_dev->firmware); 936 release_firmware(hif_dev->firmware);
936} 937}
937 938
939/*
940 * An exact copy of the function from zd1211rw.
941 */
942static int send_eject_command(struct usb_interface *interface)
943{
944 struct usb_device *udev = interface_to_usbdev(interface);
945 struct usb_host_interface *iface_desc = &interface->altsetting[0];
946 struct usb_endpoint_descriptor *endpoint;
947 unsigned char *cmd;
948 u8 bulk_out_ep;
949 int r;
950
951 /* Find bulk out endpoint */
952 for (r = 1; r >= 0; r--) {
953 endpoint = &iface_desc->endpoint[r].desc;
954 if (usb_endpoint_dir_out(endpoint) &&
955 usb_endpoint_xfer_bulk(endpoint)) {
956 bulk_out_ep = endpoint->bEndpointAddress;
957 break;
958 }
959 }
960 if (r == -1) {
961 dev_err(&udev->dev,
962 "ath9k_htc: Could not find bulk out endpoint\n");
963 return -ENODEV;
964 }
965
966 cmd = kzalloc(31, GFP_KERNEL);
967 if (cmd == NULL)
968 return -ENODEV;
969
970 /* USB bulk command block */
971 cmd[0] = 0x55; /* bulk command signature */
972 cmd[1] = 0x53; /* bulk command signature */
973 cmd[2] = 0x42; /* bulk command signature */
974 cmd[3] = 0x43; /* bulk command signature */
975 cmd[14] = 6; /* command length */
976
977 cmd[15] = 0x1b; /* SCSI command: START STOP UNIT */
978 cmd[19] = 0x2; /* eject disc */
979
980 dev_info(&udev->dev, "Ejecting storage device...\n");
981 r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, bulk_out_ep),
982 cmd, 31, NULL, 2000);
983 kfree(cmd);
984 if (r)
985 return r;
986
987 /* At this point, the device disconnects and reconnects with the real
988 * ID numbers. */
989
990 usb_set_intfdata(interface, NULL);
991 return 0;
992}
993
938static int ath9k_hif_usb_probe(struct usb_interface *interface, 994static int ath9k_hif_usb_probe(struct usb_interface *interface,
939 const struct usb_device_id *id) 995 const struct usb_device_id *id)
940{ 996{
@@ -942,6 +998,9 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
942 struct hif_device_usb *hif_dev; 998 struct hif_device_usb *hif_dev;
943 int ret = 0; 999 int ret = 0;
944 1000
1001 if (id->driver_info == STORAGE_DEVICE)
1002 return send_eject_command(interface);
1003
945 hif_dev = kzalloc(sizeof(struct hif_device_usb), GFP_KERNEL); 1004 hif_dev = kzalloc(sizeof(struct hif_device_usb), GFP_KERNEL);
946 if (!hif_dev) { 1005 if (!hif_dev) {
947 ret = -ENOMEM; 1006 ret = -ENOMEM;
@@ -1028,12 +1087,13 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
1028 struct hif_device_usb *hif_dev = usb_get_intfdata(interface); 1087 struct hif_device_usb *hif_dev = usb_get_intfdata(interface);
1029 bool unplugged = (udev->state == USB_STATE_NOTATTACHED) ? true : false; 1088 bool unplugged = (udev->state == USB_STATE_NOTATTACHED) ? true : false;
1030 1089
1031 if (hif_dev) { 1090 if (!hif_dev)
1032 ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged); 1091 return;
1033 ath9k_htc_hw_free(hif_dev->htc_handle); 1092
1034 ath9k_hif_usb_dev_deinit(hif_dev); 1093 ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
1035 usb_set_intfdata(interface, NULL); 1094 ath9k_htc_hw_free(hif_dev->htc_handle);
1036 } 1095 ath9k_hif_usb_dev_deinit(hif_dev);
1096 usb_set_intfdata(interface, NULL);
1037 1097
1038 if (!unplugged && (hif_dev->flags & HIF_USB_START)) 1098 if (!unplugged && (hif_dev->flags & HIF_USB_START))
1039 ath9k_hif_usb_reboot(udev); 1099 ath9k_hif_usb_reboot(udev);
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 780ac5eac501..753a245c5ad1 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -32,6 +32,7 @@
32#include "wmi.h" 32#include "wmi.h"
33 33
34#define ATH_STA_SHORT_CALINTERVAL 1000 /* 1 second */ 34#define ATH_STA_SHORT_CALINTERVAL 1000 /* 1 second */
35#define ATH_AP_SHORT_CALINTERVAL 100 /* 100 ms */
35#define ATH_ANI_POLLINTERVAL 100 /* 100 ms */ 36#define ATH_ANI_POLLINTERVAL 100 /* 100 ms */
36#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */ 37#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
37#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */ 38#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
@@ -204,8 +205,50 @@ struct ath9k_htc_target_stats {
204 __be32 ht_tx_xretries; 205 __be32 ht_tx_xretries;
205} __packed; 206} __packed;
206 207
208#define ATH9K_HTC_MAX_VIF 2
209#define ATH9K_HTC_MAX_BCN_VIF 2
210
211#define INC_VIF(_priv, _type) do { \
212 switch (_type) { \
213 case NL80211_IFTYPE_STATION: \
214 _priv->num_sta_vif++; \
215 break; \
216 case NL80211_IFTYPE_ADHOC: \
217 _priv->num_ibss_vif++; \
218 break; \
219 case NL80211_IFTYPE_AP: \
220 _priv->num_ap_vif++; \
221 break; \
222 default: \
223 break; \
224 } \
225 } while (0)
226
227#define DEC_VIF(_priv, _type) do { \
228 switch (_type) { \
229 case NL80211_IFTYPE_STATION: \
230 _priv->num_sta_vif--; \
231 break; \
232 case NL80211_IFTYPE_ADHOC: \
233 _priv->num_ibss_vif--; \
234 break; \
235 case NL80211_IFTYPE_AP: \
236 _priv->num_ap_vif--; \
237 break; \
238 default: \
239 break; \
240 } \
241 } while (0)
242
207struct ath9k_htc_vif { 243struct ath9k_htc_vif {
208 u8 index; 244 u8 index;
245 u16 seq_no;
246 bool beacon_configured;
247};
248
249struct ath9k_vif_iter_data {
250 const u8 *hw_macaddr;
251 u8 mask[ETH_ALEN];
209}; 252};
210 253
211#define ATH9K_HTC_MAX_STA 8 254#define ATH9K_HTC_MAX_STA 8
@@ -310,10 +353,8 @@ struct ath_led {
310 353
311struct htc_beacon_config { 354struct htc_beacon_config {
312 u16 beacon_interval; 355 u16 beacon_interval;
313 u16 listen_interval;
314 u16 dtim_period; 356 u16 dtim_period;
315 u16 bmiss_timeout; 357 u16 bmiss_timeout;
316 u8 dtim_count;
317}; 358};
318 359
319struct ath_btcoex { 360struct ath_btcoex {
@@ -333,13 +374,12 @@ void ath_htc_cancel_btcoex_work(struct ath9k_htc_priv *priv);
333#define OP_SCANNING BIT(1) 374#define OP_SCANNING BIT(1)
334#define OP_LED_ASSOCIATED BIT(2) 375#define OP_LED_ASSOCIATED BIT(2)
335#define OP_LED_ON BIT(3) 376#define OP_LED_ON BIT(3)
336#define OP_PREAMBLE_SHORT BIT(4) 377#define OP_ENABLE_BEACON BIT(4)
337#define OP_PROTECT_ENABLE BIT(5) 378#define OP_LED_DEINIT BIT(5)
338#define OP_ASSOCIATED BIT(6) 379#define OP_BT_PRIORITY_DETECTED BIT(6)
339#define OP_ENABLE_BEACON BIT(7) 380#define OP_BT_SCAN BIT(7)
340#define OP_LED_DEINIT BIT(8) 381#define OP_ANI_RUNNING BIT(8)
341#define OP_BT_PRIORITY_DETECTED BIT(9) 382#define OP_TSF_RESET BIT(9)
342#define OP_BT_SCAN BIT(10)
343 383
344struct ath9k_htc_priv { 384struct ath9k_htc_priv {
345 struct device *dev; 385 struct device *dev;
@@ -358,15 +398,24 @@ struct ath9k_htc_priv {
358 enum htc_endpoint_id data_vi_ep; 398 enum htc_endpoint_id data_vi_ep;
359 enum htc_endpoint_id data_vo_ep; 399 enum htc_endpoint_id data_vo_ep;
360 400
401 u8 vif_slot;
402 u8 mon_vif_idx;
403 u8 sta_slot;
404 u8 vif_sta_pos[ATH9K_HTC_MAX_VIF];
405 u8 num_ibss_vif;
406 u8 num_sta_vif;
407 u8 num_ap_vif;
408
361 u16 op_flags; 409 u16 op_flags;
362 u16 curtxpow; 410 u16 curtxpow;
363 u16 txpowlimit; 411 u16 txpowlimit;
364 u16 nvifs; 412 u16 nvifs;
365 u16 nstations; 413 u16 nstations;
366 u16 seq_no;
367 u32 bmiss_cnt; 414 u32 bmiss_cnt;
415 bool rearm_ani;
416 bool reconfig_beacon;
368 417
369 struct ath9k_hw_cal_data caldata[ATH9K_NUM_CHANNELS]; 418 struct ath9k_hw_cal_data caldata;
370 419
371 spinlock_t beacon_lock; 420 spinlock_t beacon_lock;
372 421
@@ -382,7 +431,7 @@ struct ath9k_htc_priv {
382 struct ath9k_htc_rx rx; 431 struct ath9k_htc_rx rx;
383 struct tasklet_struct tx_tasklet; 432 struct tasklet_struct tx_tasklet;
384 struct sk_buff_head tx_queue; 433 struct sk_buff_head tx_queue;
385 struct delayed_work ath9k_ani_work; 434 struct delayed_work ani_work;
386 struct work_struct ps_work; 435 struct work_struct ps_work;
387 struct work_struct fatal_work; 436 struct work_struct fatal_work;
388 437
@@ -424,6 +473,7 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv);
424void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv); 473void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv);
425void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv, 474void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
426 struct ieee80211_vif *vif); 475 struct ieee80211_vif *vif);
476void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv);
427void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending); 477void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending);
428 478
429void ath9k_htc_rxep(void *priv, struct sk_buff *skb, 479void ath9k_htc_rxep(void *priv, struct sk_buff *skb,
@@ -436,8 +486,9 @@ void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
436int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv); 486int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv);
437void ath9k_htc_station_work(struct work_struct *work); 487void ath9k_htc_station_work(struct work_struct *work);
438void ath9k_htc_aggr_work(struct work_struct *work); 488void ath9k_htc_aggr_work(struct work_struct *work);
439void ath9k_ani_work(struct work_struct *work);; 489void ath9k_htc_ani_work(struct work_struct *work);
440void ath_start_ani(struct ath9k_htc_priv *priv); 490void ath9k_htc_start_ani(struct ath9k_htc_priv *priv);
491void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv);
441 492
442int ath9k_tx_init(struct ath9k_htc_priv *priv); 493int ath9k_tx_init(struct ath9k_htc_priv *priv);
443void ath9k_tx_tasklet(unsigned long data); 494void ath9k_tx_tasklet(unsigned long data);
@@ -460,7 +511,6 @@ void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv);
460void ath9k_ps_work(struct work_struct *work); 511void ath9k_ps_work(struct work_struct *work);
461bool ath9k_htc_setpower(struct ath9k_htc_priv *priv, 512bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
462 enum ath9k_power_mode mode); 513 enum ath9k_power_mode mode);
463void ath_update_txpow(struct ath9k_htc_priv *priv);
464 514
465void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv); 515void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv);
466void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw); 516void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 87cc65a78a3f..8d1d8792436d 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -123,8 +123,9 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
123 /* TSF out of range threshold fixed at 1 second */ 123 /* TSF out of range threshold fixed at 1 second */
124 bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD; 124 bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
125 125
126 ath_dbg(common, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu); 126 ath_dbg(common, ATH_DBG_CONFIG, "intval: %u tsf: %llu tsftu: %u\n",
127 ath_dbg(common, ATH_DBG_BEACON, 127 intval, tsf, tsftu);
128 ath_dbg(common, ATH_DBG_CONFIG,
128 "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n", 129 "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
129 bs.bs_bmissthreshold, bs.bs_sleepduration, 130 bs.bs_bmissthreshold, bs.bs_sleepduration,
130 bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext); 131 bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
@@ -138,25 +139,81 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
138 WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask); 139 WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
139} 140}
140 141
142static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
143 struct htc_beacon_config *bss_conf)
144{
145 struct ath_common *common = ath9k_hw_common(priv->ah);
146 enum ath9k_int imask = 0;
147 u32 nexttbtt, intval, tsftu;
148 __be32 htc_imask = 0;
149 int ret;
150 u8 cmd_rsp;
151 u64 tsf;
152
153 intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
154 intval /= ATH9K_HTC_MAX_BCN_VIF;
155 nexttbtt = intval;
156
157 if (priv->op_flags & OP_TSF_RESET) {
158 intval |= ATH9K_BEACON_RESET_TSF;
159 priv->op_flags &= ~OP_TSF_RESET;
160 } else {
161 /*
162 * Pull nexttbtt forward to reflect the current TSF.
163 */
164 tsf = ath9k_hw_gettsf64(priv->ah);
165 tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE;
166 do {
167 nexttbtt += intval;
168 } while (nexttbtt < tsftu);
169 }
170
171 intval |= ATH9K_BEACON_ENA;
172
173 if (priv->op_flags & OP_ENABLE_BEACON)
174 imask |= ATH9K_INT_SWBA;
175
176 ath_dbg(common, ATH_DBG_CONFIG,
177 "AP Beacon config, intval: %d, nexttbtt: %u imask: 0x%x\n",
178 bss_conf->beacon_interval, nexttbtt, imask);
179
180 WMI_CMD(WMI_DISABLE_INTR_CMDID);
181 ath9k_hw_beaconinit(priv->ah, nexttbtt, intval);
182 priv->bmiss_cnt = 0;
183 htc_imask = cpu_to_be32(imask);
184 WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
185}
186
141static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv, 187static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
142 struct htc_beacon_config *bss_conf) 188 struct htc_beacon_config *bss_conf)
143{ 189{
144 struct ath_common *common = ath9k_hw_common(priv->ah); 190 struct ath_common *common = ath9k_hw_common(priv->ah);
145 enum ath9k_int imask = 0; 191 enum ath9k_int imask = 0;
146 u32 nexttbtt, intval; 192 u32 nexttbtt, intval, tsftu;
147 __be32 htc_imask = 0; 193 __be32 htc_imask = 0;
148 int ret; 194 int ret;
149 u8 cmd_rsp; 195 u8 cmd_rsp;
196 u64 tsf;
150 197
151 intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD; 198 intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
152 nexttbtt = intval; 199 nexttbtt = intval;
200
201 /*
202 * Pull nexttbtt forward to reflect the current TSF.
203 */
204 tsf = ath9k_hw_gettsf64(priv->ah);
205 tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE;
206 do {
207 nexttbtt += intval;
208 } while (nexttbtt < tsftu);
209
153 intval |= ATH9K_BEACON_ENA; 210 intval |= ATH9K_BEACON_ENA;
154 if (priv->op_flags & OP_ENABLE_BEACON) 211 if (priv->op_flags & OP_ENABLE_BEACON)
155 imask |= ATH9K_INT_SWBA; 212 imask |= ATH9K_INT_SWBA;
156 213
157 ath_dbg(common, ATH_DBG_BEACON, 214 ath_dbg(common, ATH_DBG_CONFIG,
158 "IBSS Beacon config, intval: %d, imask: 0x%x\n", 215 "IBSS Beacon config, intval: %d, nexttbtt: %u, imask: 0x%x\n",
159 bss_conf->beacon_interval, imask); 216 bss_conf->beacon_interval, nexttbtt, imask);
160 217
161 WMI_CMD(WMI_DISABLE_INTR_CMDID); 218 WMI_CMD(WMI_DISABLE_INTR_CMDID);
162 ath9k_hw_beaconinit(priv->ah, nexttbtt, intval); 219 ath9k_hw_beaconinit(priv->ah, nexttbtt, intval);
@@ -207,9 +264,9 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending)
207 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 264 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
208 struct ieee80211_hdr *hdr = 265 struct ieee80211_hdr *hdr =
209 (struct ieee80211_hdr *) beacon->data; 266 (struct ieee80211_hdr *) beacon->data;
210 priv->seq_no += 0x10; 267 avp->seq_no += 0x10;
211 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 268 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
212 hdr->seq_ctrl |= cpu_to_le16(priv->seq_no); 269 hdr->seq_ctrl |= cpu_to_le16(avp->seq_no);
213 } 270 }
214 271
215 tx_ctl.type = ATH9K_HTC_NORMAL; 272 tx_ctl.type = ATH9K_HTC_NORMAL;
@@ -253,30 +310,123 @@ void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv)
253 } 310 }
254} 311}
255 312
313static void ath9k_htc_beacon_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
314{
315 bool *beacon_configured = (bool *)data;
316 struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
317
318 if (vif->type == NL80211_IFTYPE_STATION &&
319 avp->beacon_configured)
320 *beacon_configured = true;
321}
322
323static bool ath9k_htc_check_beacon_config(struct ath9k_htc_priv *priv,
324 struct ieee80211_vif *vif)
325{
326 struct ath_common *common = ath9k_hw_common(priv->ah);
327 struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
328 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
329 bool beacon_configured;
330
331 /*
332 * Changing the beacon interval when multiple AP interfaces
333 * are configured will affect beacon transmission of all
334 * of them.
335 */
336 if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
337 (priv->num_ap_vif > 1) &&
338 (vif->type == NL80211_IFTYPE_AP) &&
339 (cur_conf->beacon_interval != bss_conf->beacon_int)) {
340 ath_dbg(common, ATH_DBG_CONFIG,
341 "Changing beacon interval of multiple AP interfaces !\n");
342 return false;
343 }
344
345 /*
346 * If the HW is operating in AP mode, any new station interfaces that
347 * are added cannot change the beacon parameters.
348 */
349 if (priv->num_ap_vif &&
350 (vif->type != NL80211_IFTYPE_AP)) {
351 ath_dbg(common, ATH_DBG_CONFIG,
352 "HW in AP mode, cannot set STA beacon parameters\n");
353 return false;
354 }
355
356 /*
357 * The beacon parameters are configured only for the first
358 * station interface.
359 */
360 if ((priv->ah->opmode == NL80211_IFTYPE_STATION) &&
361 (priv->num_sta_vif > 1) &&
362 (vif->type == NL80211_IFTYPE_STATION)) {
363 beacon_configured = false;
364 ieee80211_iterate_active_interfaces_atomic(priv->hw,
365 ath9k_htc_beacon_iter,
366 &beacon_configured);
367
368 if (beacon_configured) {
369 ath_dbg(common, ATH_DBG_CONFIG,
370 "Beacon already configured for a station interface\n");
371 return false;
372 }
373 }
374
375 return true;
376}
377
256void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv, 378void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
257 struct ieee80211_vif *vif) 379 struct ieee80211_vif *vif)
258{ 380{
259 struct ath_common *common = ath9k_hw_common(priv->ah); 381 struct ath_common *common = ath9k_hw_common(priv->ah);
260 struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf; 382 struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
261 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 383 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
384 struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
385
386 if (!ath9k_htc_check_beacon_config(priv, vif))
387 return;
262 388
263 cur_conf->beacon_interval = bss_conf->beacon_int; 389 cur_conf->beacon_interval = bss_conf->beacon_int;
264 if (cur_conf->beacon_interval == 0) 390 if (cur_conf->beacon_interval == 0)
265 cur_conf->beacon_interval = 100; 391 cur_conf->beacon_interval = 100;
266 392
267 cur_conf->dtim_period = bss_conf->dtim_period; 393 cur_conf->dtim_period = bss_conf->dtim_period;
268 cur_conf->listen_interval = 1;
269 cur_conf->dtim_count = 1;
270 cur_conf->bmiss_timeout = 394 cur_conf->bmiss_timeout =
271 ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval; 395 ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
272 396
273 switch (vif->type) { 397 switch (vif->type) {
274 case NL80211_IFTYPE_STATION: 398 case NL80211_IFTYPE_STATION:
275 ath9k_htc_beacon_config_sta(priv, cur_conf); 399 ath9k_htc_beacon_config_sta(priv, cur_conf);
400 avp->beacon_configured = true;
401 break;
402 case NL80211_IFTYPE_ADHOC:
403 ath9k_htc_beacon_config_adhoc(priv, cur_conf);
404 break;
405 case NL80211_IFTYPE_AP:
406 ath9k_htc_beacon_config_ap(priv, cur_conf);
407 break;
408 default:
409 ath_dbg(common, ATH_DBG_CONFIG,
410 "Unsupported beaconing mode\n");
411 return;
412 }
413}
414
415void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv)
416{
417 struct ath_common *common = ath9k_hw_common(priv->ah);
418 struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
419
420 switch (priv->ah->opmode) {
421 case NL80211_IFTYPE_STATION:
422 ath9k_htc_beacon_config_sta(priv, cur_conf);
276 break; 423 break;
277 case NL80211_IFTYPE_ADHOC: 424 case NL80211_IFTYPE_ADHOC:
278 ath9k_htc_beacon_config_adhoc(priv, cur_conf); 425 ath9k_htc_beacon_config_adhoc(priv, cur_conf);
279 break; 426 break;
427 case NL80211_IFTYPE_AP:
428 ath9k_htc_beacon_config_ap(priv, cur_conf);
429 break;
280 default: 430 default:
281 ath_dbg(common, ATH_DBG_CONFIG, 431 ath_dbg(common, ATH_DBG_CONFIG,
282 "Unsupported beaconing mode\n"); 432 "Unsupported beaconing mode\n");
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index fe70f67aa088..7e630a81b453 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -389,7 +389,8 @@ void ath9k_htc_radio_enable(struct ieee80211_hw *hw)
389 ret, ah->curchan->channel); 389 ret, ah->curchan->channel);
390 } 390 }
391 391
392 ath_update_txpow(priv); 392 ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
393 &priv->curtxpow);
393 394
394 /* Start RX */ 395 /* Start RX */
395 WMI_CMD(WMI_START_RECV_CMDID); 396 WMI_CMD(WMI_START_RECV_CMDID);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 0352f0994caa..fc67c937e172 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -294,6 +294,34 @@ static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
294 return be32_to_cpu(val); 294 return be32_to_cpu(val);
295} 295}
296 296
297static void ath9k_multi_regread(void *hw_priv, u32 *addr,
298 u32 *val, u16 count)
299{
300 struct ath_hw *ah = (struct ath_hw *) hw_priv;
301 struct ath_common *common = ath9k_hw_common(ah);
302 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
303 __be32 tmpaddr[8];
304 __be32 tmpval[8];
305 int i, ret;
306
307 for (i = 0; i < count; i++) {
308 tmpaddr[i] = cpu_to_be32(addr[i]);
309 }
310
311 ret = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID,
312 (u8 *)tmpaddr , sizeof(u32) * count,
313 (u8 *)tmpval, sizeof(u32) * count,
314 100);
315 if (unlikely(ret)) {
316 ath_dbg(common, ATH_DBG_WMI,
317 "Multiple REGISTER READ FAILED (count: %d)\n", count);
318 }
319
320 for (i = 0; i < count; i++) {
321 val[i] = be32_to_cpu(tmpval[i]);
322 }
323}
324
297static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset) 325static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset)
298{ 326{
299 struct ath_hw *ah = (struct ath_hw *) hw_priv; 327 struct ath_hw *ah = (struct ath_hw *) hw_priv;
@@ -404,6 +432,7 @@ static void ath9k_regwrite_flush(void *hw_priv)
404 432
405static const struct ath_ops ath9k_common_ops = { 433static const struct ath_ops ath9k_common_ops = {
406 .read = ath9k_regread, 434 .read = ath9k_regread,
435 .multi_read = ath9k_multi_regread,
407 .write = ath9k_regwrite, 436 .write = ath9k_regwrite,
408 .enable_write_buffer = ath9k_enable_regwrite_buffer, 437 .enable_write_buffer = ath9k_enable_regwrite_buffer,
409 .write_flush = ath9k_regwrite_flush, 438 .write_flush = ath9k_regwrite_flush,
@@ -650,7 +679,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
650 (unsigned long)priv); 679 (unsigned long)priv);
651 tasklet_init(&priv->tx_tasklet, ath9k_tx_tasklet, 680 tasklet_init(&priv->tx_tasklet, ath9k_tx_tasklet,
652 (unsigned long)priv); 681 (unsigned long)priv);
653 INIT_DELAYED_WORK(&priv->ath9k_ani_work, ath9k_ani_work); 682 INIT_DELAYED_WORK(&priv->ani_work, ath9k_htc_ani_work);
654 INIT_WORK(&priv->ps_work, ath9k_ps_work); 683 INIT_WORK(&priv->ps_work, ath9k_ps_work);
655 INIT_WORK(&priv->fatal_work, ath9k_fatal_work); 684 INIT_WORK(&priv->fatal_work, ath9k_fatal_work);
656 685
@@ -758,6 +787,7 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv,
758 struct ath_hw *ah; 787 struct ath_hw *ah;
759 int error = 0; 788 int error = 0;
760 struct ath_regulatory *reg; 789 struct ath_regulatory *reg;
790 char hw_name[64];
761 791
762 /* Bring up device */ 792 /* Bring up device */
763 error = ath9k_init_priv(priv, devid, product, drv_info); 793 error = ath9k_init_priv(priv, devid, product, drv_info);
@@ -798,6 +828,22 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv,
798 goto err_world; 828 goto err_world;
799 } 829 }
800 830
831 ath_dbg(common, ATH_DBG_CONFIG,
832 "WMI:%d, BCN:%d, CAB:%d, UAPSD:%d, MGMT:%d, "
833 "BE:%d, BK:%d, VI:%d, VO:%d\n",
834 priv->wmi_cmd_ep,
835 priv->beacon_ep,
836 priv->cab_ep,
837 priv->uapsd_ep,
838 priv->mgmt_ep,
839 priv->data_be_ep,
840 priv->data_bk_ep,
841 priv->data_vi_ep,
842 priv->data_vo_ep);
843
844 ath9k_hw_name(priv->ah, hw_name, sizeof(hw_name));
845 wiphy_info(hw->wiphy, "%s\n", hw_name);
846
801 ath9k_init_leds(priv); 847 ath9k_init_leds(priv);
802 ath9k_start_rfkill_poll(priv); 848 ath9k_start_rfkill_poll(priv);
803 849
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 6bb59958f71e..db8c0c044e9e 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -24,17 +24,6 @@ static struct dentry *ath9k_debugfs_root;
24/* Utilities */ 24/* Utilities */
25/*************/ 25/*************/
26 26
27void ath_update_txpow(struct ath9k_htc_priv *priv)
28{
29 struct ath_hw *ah = priv->ah;
30
31 if (priv->curtxpow != priv->txpowlimit) {
32 ath9k_hw_set_txpowerlimit(ah, priv->txpowlimit, false);
33 /* read back in case value is clamped */
34 priv->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
35 }
36}
37
38/* HACK Alert: Use 11NG for 2.4, use 11NA for 5 */ 27/* HACK Alert: Use 11NG for 2.4, use 11NA for 5 */
39static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv, 28static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv,
40 struct ath9k_channel *ichan) 29 struct ath9k_channel *ichan)
@@ -116,12 +105,88 @@ void ath9k_ps_work(struct work_struct *work)
116 ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP); 105 ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP);
117} 106}
118 107
108static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
109{
110 struct ath9k_htc_priv *priv = data;
111 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
112
113 if ((vif->type == NL80211_IFTYPE_AP) && bss_conf->enable_beacon)
114 priv->reconfig_beacon = true;
115
116 if (bss_conf->assoc) {
117 priv->rearm_ani = true;
118 priv->reconfig_beacon = true;
119 }
120}
121
122static void ath9k_htc_vif_reconfig(struct ath9k_htc_priv *priv)
123{
124 priv->rearm_ani = false;
125 priv->reconfig_beacon = false;
126
127 ieee80211_iterate_active_interfaces_atomic(priv->hw,
128 ath9k_htc_vif_iter, priv);
129 if (priv->rearm_ani)
130 ath9k_htc_start_ani(priv);
131
132 if (priv->reconfig_beacon) {
133 ath9k_htc_ps_wakeup(priv);
134 ath9k_htc_beacon_reconfig(priv);
135 ath9k_htc_ps_restore(priv);
136 }
137}
138
139static void ath9k_htc_bssid_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
140{
141 struct ath9k_vif_iter_data *iter_data = data;
142 int i;
143
144 for (i = 0; i < ETH_ALEN; i++)
145 iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
146}
147
148static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
149 struct ieee80211_vif *vif)
150{
151 struct ath_common *common = ath9k_hw_common(priv->ah);
152 struct ath9k_vif_iter_data iter_data;
153
154 /*
155 * Use the hardware MAC address as reference, the hardware uses it
156 * together with the BSSID mask when matching addresses.
157 */
158 iter_data.hw_macaddr = common->macaddr;
159 memset(&iter_data.mask, 0xff, ETH_ALEN);
160
161 if (vif)
162 ath9k_htc_bssid_iter(&iter_data, vif->addr, vif);
163
164 /* Get list of all active MAC addresses */
165 ieee80211_iterate_active_interfaces_atomic(priv->hw, ath9k_htc_bssid_iter,
166 &iter_data);
167
168 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
169 ath_hw_setbssidmask(common);
170}
171
172static void ath9k_htc_set_opmode(struct ath9k_htc_priv *priv)
173{
174 if (priv->num_ibss_vif)
175 priv->ah->opmode = NL80211_IFTYPE_ADHOC;
176 else if (priv->num_ap_vif)
177 priv->ah->opmode = NL80211_IFTYPE_AP;
178 else
179 priv->ah->opmode = NL80211_IFTYPE_STATION;
180
181 ath9k_hw_setopmode(priv->ah);
182}
183
119void ath9k_htc_reset(struct ath9k_htc_priv *priv) 184void ath9k_htc_reset(struct ath9k_htc_priv *priv)
120{ 185{
121 struct ath_hw *ah = priv->ah; 186 struct ath_hw *ah = priv->ah;
122 struct ath_common *common = ath9k_hw_common(ah); 187 struct ath_common *common = ath9k_hw_common(ah);
123 struct ieee80211_channel *channel = priv->hw->conf.channel; 188 struct ieee80211_channel *channel = priv->hw->conf.channel;
124 struct ath9k_hw_cal_data *caldata; 189 struct ath9k_hw_cal_data *caldata = NULL;
125 enum htc_phymode mode; 190 enum htc_phymode mode;
126 __be16 htc_mode; 191 __be16 htc_mode;
127 u8 cmd_rsp; 192 u8 cmd_rsp;
@@ -130,16 +195,14 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv)
130 mutex_lock(&priv->mutex); 195 mutex_lock(&priv->mutex);
131 ath9k_htc_ps_wakeup(priv); 196 ath9k_htc_ps_wakeup(priv);
132 197
133 if (priv->op_flags & OP_ASSOCIATED) 198 ath9k_htc_stop_ani(priv);
134 cancel_delayed_work_sync(&priv->ath9k_ani_work);
135
136 ieee80211_stop_queues(priv->hw); 199 ieee80211_stop_queues(priv->hw);
137 htc_stop(priv->htc); 200 htc_stop(priv->htc);
138 WMI_CMD(WMI_DISABLE_INTR_CMDID); 201 WMI_CMD(WMI_DISABLE_INTR_CMDID);
139 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID); 202 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
140 WMI_CMD(WMI_STOP_RECV_CMDID); 203 WMI_CMD(WMI_STOP_RECV_CMDID);
141 204
142 caldata = &priv->caldata[channel->hw_value]; 205 caldata = &priv->caldata;
143 ret = ath9k_hw_reset(ah, ah->curchan, caldata, false); 206 ret = ath9k_hw_reset(ah, ah->curchan, caldata, false);
144 if (ret) { 207 if (ret) {
145 ath_err(common, 208 ath_err(common,
@@ -147,7 +210,8 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv)
147 channel->center_freq, ret); 210 channel->center_freq, ret);
148 } 211 }
149 212
150 ath_update_txpow(priv); 213 ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
214 &priv->curtxpow);
151 215
152 WMI_CMD(WMI_START_RECV_CMDID); 216 WMI_CMD(WMI_START_RECV_CMDID);
153 ath9k_host_rx_init(priv); 217 ath9k_host_rx_init(priv);
@@ -158,12 +222,7 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv)
158 222
159 WMI_CMD(WMI_ENABLE_INTR_CMDID); 223 WMI_CMD(WMI_ENABLE_INTR_CMDID);
160 htc_start(priv->htc); 224 htc_start(priv->htc);
161 225 ath9k_htc_vif_reconfig(priv);
162 if (priv->op_flags & OP_ASSOCIATED) {
163 ath9k_htc_beacon_config(priv, priv->vif);
164 ath_start_ani(priv);
165 }
166
167 ieee80211_wake_queues(priv->hw); 226 ieee80211_wake_queues(priv->hw);
168 227
169 ath9k_htc_ps_restore(priv); 228 ath9k_htc_ps_restore(priv);
@@ -179,7 +238,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
179 struct ieee80211_conf *conf = &common->hw->conf; 238 struct ieee80211_conf *conf = &common->hw->conf;
180 bool fastcc; 239 bool fastcc;
181 struct ieee80211_channel *channel = hw->conf.channel; 240 struct ieee80211_channel *channel = hw->conf.channel;
182 struct ath9k_hw_cal_data *caldata; 241 struct ath9k_hw_cal_data *caldata = NULL;
183 enum htc_phymode mode; 242 enum htc_phymode mode;
184 __be16 htc_mode; 243 __be16 htc_mode;
185 u8 cmd_rsp; 244 u8 cmd_rsp;
@@ -202,7 +261,8 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
202 channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf), 261 channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf),
203 fastcc); 262 fastcc);
204 263
205 caldata = &priv->caldata[channel->hw_value]; 264 if (!fastcc)
265 caldata = &priv->caldata;
206 ret = ath9k_hw_reset(ah, hchan, caldata, fastcc); 266 ret = ath9k_hw_reset(ah, hchan, caldata, fastcc);
207 if (ret) { 267 if (ret) {
208 ath_err(common, 268 ath_err(common,
@@ -211,7 +271,8 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
211 goto err; 271 goto err;
212 } 272 }
213 273
214 ath_update_txpow(priv); 274 ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
275 &priv->curtxpow);
215 276
216 WMI_CMD(WMI_START_RECV_CMDID); 277 WMI_CMD(WMI_START_RECV_CMDID);
217 if (ret) 278 if (ret)
@@ -230,11 +291,23 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
230 goto err; 291 goto err;
231 292
232 htc_start(priv->htc); 293 htc_start(priv->htc);
294
295 if (!(priv->op_flags & OP_SCANNING) &&
296 !(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
297 ath9k_htc_vif_reconfig(priv);
298
233err: 299err:
234 ath9k_htc_ps_restore(priv); 300 ath9k_htc_ps_restore(priv);
235 return ret; 301 return ret;
236} 302}
237 303
304/*
305 * Monitor mode handling is a tad complicated because the firmware requires
306 * an interface to be created exclusively, while mac80211 doesn't associate
307 * an interface with the mode.
308 *
309 * So, for now, only one monitor interface can be configured.
310 */
238static void __ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv) 311static void __ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
239{ 312{
240 struct ath_common *common = ath9k_hw_common(priv->ah); 313 struct ath_common *common = ath9k_hw_common(priv->ah);
@@ -244,9 +317,10 @@ static void __ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
244 317
245 memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif)); 318 memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
246 memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN); 319 memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
247 hvif.index = 0; /* Should do for now */ 320 hvif.index = priv->mon_vif_idx;
248 WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif); 321 WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
249 priv->nvifs--; 322 priv->nvifs--;
323 priv->vif_slot &= ~(1 << priv->mon_vif_idx);
250} 324}
251 325
252static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv) 326static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
@@ -254,70 +328,87 @@ static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
254 struct ath_common *common = ath9k_hw_common(priv->ah); 328 struct ath_common *common = ath9k_hw_common(priv->ah);
255 struct ath9k_htc_target_vif hvif; 329 struct ath9k_htc_target_vif hvif;
256 struct ath9k_htc_target_sta tsta; 330 struct ath9k_htc_target_sta tsta;
257 int ret = 0; 331 int ret = 0, sta_idx;
258 u8 cmd_rsp; 332 u8 cmd_rsp;
259 333
260 if (priv->nvifs > 0) 334 if ((priv->nvifs >= ATH9K_HTC_MAX_VIF) ||
261 return -ENOBUFS; 335 (priv->nstations >= ATH9K_HTC_MAX_STA)) {
336 ret = -ENOBUFS;
337 goto err_vif;
338 }
262 339
263 if (priv->nstations >= ATH9K_HTC_MAX_STA) 340 sta_idx = ffz(priv->sta_slot);
264 return -ENOBUFS; 341 if ((sta_idx < 0) || (sta_idx > ATH9K_HTC_MAX_STA)) {
342 ret = -ENOBUFS;
343 goto err_vif;
344 }
265 345
266 /* 346 /*
267 * Add an interface. 347 * Add an interface.
268 */ 348 */
269
270 memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif)); 349 memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
271 memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN); 350 memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
272 351
273 hvif.opmode = cpu_to_be32(HTC_M_MONITOR); 352 hvif.opmode = cpu_to_be32(HTC_M_MONITOR);
274 priv->ah->opmode = NL80211_IFTYPE_MONITOR; 353 hvif.index = ffz(priv->vif_slot);
275 hvif.index = priv->nvifs;
276 354
277 WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif); 355 WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif);
278 if (ret) 356 if (ret)
279 return ret; 357 goto err_vif;
358
359 /*
360 * Assign the monitor interface index as a special case here.
361 * This is needed when the interface is brought down.
362 */
363 priv->mon_vif_idx = hvif.index;
364 priv->vif_slot |= (1 << hvif.index);
365
366 /*
367 * Set the hardware mode to monitor only if there are no
368 * other interfaces.
369 */
370 if (!priv->nvifs)
371 priv->ah->opmode = NL80211_IFTYPE_MONITOR;
280 372
281 priv->nvifs++; 373 priv->nvifs++;
282 374
283 /* 375 /*
284 * Associate a station with the interface for packet injection. 376 * Associate a station with the interface for packet injection.
285 */ 377 */
286
287 memset(&tsta, 0, sizeof(struct ath9k_htc_target_sta)); 378 memset(&tsta, 0, sizeof(struct ath9k_htc_target_sta));
288 379
289 memcpy(&tsta.macaddr, common->macaddr, ETH_ALEN); 380 memcpy(&tsta.macaddr, common->macaddr, ETH_ALEN);
290 381
291 tsta.is_vif_sta = 1; 382 tsta.is_vif_sta = 1;
292 tsta.sta_index = priv->nstations; 383 tsta.sta_index = sta_idx;
293 tsta.vif_index = hvif.index; 384 tsta.vif_index = hvif.index;
294 tsta.maxampdu = 0xffff; 385 tsta.maxampdu = 0xffff;
295 386
296 WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta); 387 WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta);
297 if (ret) { 388 if (ret) {
298 ath_err(common, "Unable to add station entry for monitor mode\n"); 389 ath_err(common, "Unable to add station entry for monitor mode\n");
299 goto err_vif; 390 goto err_sta;
300 } 391 }
301 392
393 priv->sta_slot |= (1 << sta_idx);
302 priv->nstations++; 394 priv->nstations++;
303 395 priv->vif_sta_pos[priv->mon_vif_idx] = sta_idx;
304 /*
305 * Set chainmask etc. on the target.
306 */
307 ret = ath9k_htc_update_cap_target(priv);
308 if (ret)
309 ath_dbg(common, ATH_DBG_CONFIG,
310 "Failed to update capability in target\n");
311
312 priv->ah->is_monitoring = true; 396 priv->ah->is_monitoring = true;
313 397
398 ath_dbg(common, ATH_DBG_CONFIG,
399 "Attached a monitor interface at idx: %d, sta idx: %d\n",
400 priv->mon_vif_idx, sta_idx);
401
314 return 0; 402 return 0;
315 403
316err_vif: 404err_sta:
317 /* 405 /*
318 * Remove the interface from the target. 406 * Remove the interface from the target.
319 */ 407 */
320 __ath9k_htc_remove_monitor_interface(priv); 408 __ath9k_htc_remove_monitor_interface(priv);
409err_vif:
410 ath_dbg(common, ATH_DBG_FATAL, "Unable to attach a monitor interface\n");
411
321 return ret; 412 return ret;
322} 413}
323 414
@@ -329,7 +420,7 @@ static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
329 420
330 __ath9k_htc_remove_monitor_interface(priv); 421 __ath9k_htc_remove_monitor_interface(priv);
331 422
332 sta_idx = 0; /* Only single interface, for now */ 423 sta_idx = priv->vif_sta_pos[priv->mon_vif_idx];
333 424
334 WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx); 425 WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx);
335 if (ret) { 426 if (ret) {
@@ -337,9 +428,14 @@ static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
337 return ret; 428 return ret;
338 } 429 }
339 430
431 priv->sta_slot &= ~(1 << sta_idx);
340 priv->nstations--; 432 priv->nstations--;
341 priv->ah->is_monitoring = false; 433 priv->ah->is_monitoring = false;
342 434
435 ath_dbg(common, ATH_DBG_CONFIG,
436 "Removed a monitor interface at idx: %d, sta idx: %d\n",
437 priv->mon_vif_idx, sta_idx);
438
343 return 0; 439 return 0;
344} 440}
345 441
@@ -351,12 +447,16 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
351 struct ath9k_htc_target_sta tsta; 447 struct ath9k_htc_target_sta tsta;
352 struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv; 448 struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
353 struct ath9k_htc_sta *ista; 449 struct ath9k_htc_sta *ista;
354 int ret; 450 int ret, sta_idx;
355 u8 cmd_rsp; 451 u8 cmd_rsp;
356 452
357 if (priv->nstations >= ATH9K_HTC_MAX_STA) 453 if (priv->nstations >= ATH9K_HTC_MAX_STA)
358 return -ENOBUFS; 454 return -ENOBUFS;
359 455
456 sta_idx = ffz(priv->sta_slot);
457 if ((sta_idx < 0) || (sta_idx > ATH9K_HTC_MAX_STA))
458 return -ENOBUFS;
459
360 memset(&tsta, 0, sizeof(struct ath9k_htc_target_sta)); 460 memset(&tsta, 0, sizeof(struct ath9k_htc_target_sta));
361 461
362 if (sta) { 462 if (sta) {
@@ -366,13 +466,13 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
366 tsta.associd = common->curaid; 466 tsta.associd = common->curaid;
367 tsta.is_vif_sta = 0; 467 tsta.is_vif_sta = 0;
368 tsta.valid = true; 468 tsta.valid = true;
369 ista->index = priv->nstations; 469 ista->index = sta_idx;
370 } else { 470 } else {
371 memcpy(&tsta.macaddr, vif->addr, ETH_ALEN); 471 memcpy(&tsta.macaddr, vif->addr, ETH_ALEN);
372 tsta.is_vif_sta = 1; 472 tsta.is_vif_sta = 1;
373 } 473 }
374 474
375 tsta.sta_index = priv->nstations; 475 tsta.sta_index = sta_idx;
376 tsta.vif_index = avp->index; 476 tsta.vif_index = avp->index;
377 tsta.maxampdu = 0xffff; 477 tsta.maxampdu = 0xffff;
378 if (sta && sta->ht_cap.ht_supported) 478 if (sta && sta->ht_cap.ht_supported)
@@ -387,12 +487,21 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
387 return ret; 487 return ret;
388 } 488 }
389 489
390 if (sta) 490 if (sta) {
391 ath_dbg(common, ATH_DBG_CONFIG, 491 ath_dbg(common, ATH_DBG_CONFIG,
392 "Added a station entry for: %pM (idx: %d)\n", 492 "Added a station entry for: %pM (idx: %d)\n",
393 sta->addr, tsta.sta_index); 493 sta->addr, tsta.sta_index);
494 } else {
495 ath_dbg(common, ATH_DBG_CONFIG,
496 "Added a station entry for VIF %d (idx: %d)\n",
497 avp->index, tsta.sta_index);
498 }
394 499
500 priv->sta_slot |= (1 << sta_idx);
395 priv->nstations++; 501 priv->nstations++;
502 if (!sta)
503 priv->vif_sta_pos[avp->index] = sta_idx;
504
396 return 0; 505 return 0;
397} 506}
398 507
@@ -401,6 +510,7 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
401 struct ieee80211_sta *sta) 510 struct ieee80211_sta *sta)
402{ 511{
403 struct ath_common *common = ath9k_hw_common(priv->ah); 512 struct ath_common *common = ath9k_hw_common(priv->ah);
513 struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
404 struct ath9k_htc_sta *ista; 514 struct ath9k_htc_sta *ista;
405 int ret; 515 int ret;
406 u8 cmd_rsp, sta_idx; 516 u8 cmd_rsp, sta_idx;
@@ -409,7 +519,7 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
409 ista = (struct ath9k_htc_sta *) sta->drv_priv; 519 ista = (struct ath9k_htc_sta *) sta->drv_priv;
410 sta_idx = ista->index; 520 sta_idx = ista->index;
411 } else { 521 } else {
412 sta_idx = 0; 522 sta_idx = priv->vif_sta_pos[avp->index];
413 } 523 }
414 524
415 WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx); 525 WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx);
@@ -421,12 +531,19 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
421 return ret; 531 return ret;
422 } 532 }
423 533
424 if (sta) 534 if (sta) {
425 ath_dbg(common, ATH_DBG_CONFIG, 535 ath_dbg(common, ATH_DBG_CONFIG,
426 "Removed a station entry for: %pM (idx: %d)\n", 536 "Removed a station entry for: %pM (idx: %d)\n",
427 sta->addr, sta_idx); 537 sta->addr, sta_idx);
538 } else {
539 ath_dbg(common, ATH_DBG_CONFIG,
540 "Removed a station entry for VIF %d (idx: %d)\n",
541 avp->index, sta_idx);
542 }
428 543
544 priv->sta_slot &= ~(1 << sta_idx);
429 priv->nstations--; 545 priv->nstations--;
546
430 return 0; 547 return 0;
431} 548}
432 549
@@ -808,7 +925,7 @@ void ath9k_htc_debug_remove_root(void)
808/* ANI */ 925/* ANI */
809/*******/ 926/*******/
810 927
811void ath_start_ani(struct ath9k_htc_priv *priv) 928void ath9k_htc_start_ani(struct ath9k_htc_priv *priv)
812{ 929{
813 struct ath_common *common = ath9k_hw_common(priv->ah); 930 struct ath_common *common = ath9k_hw_common(priv->ah);
814 unsigned long timestamp = jiffies_to_msecs(jiffies); 931 unsigned long timestamp = jiffies_to_msecs(jiffies);
@@ -817,15 +934,22 @@ void ath_start_ani(struct ath9k_htc_priv *priv)
817 common->ani.shortcal_timer = timestamp; 934 common->ani.shortcal_timer = timestamp;
818 common->ani.checkani_timer = timestamp; 935 common->ani.checkani_timer = timestamp;
819 936
820 ieee80211_queue_delayed_work(common->hw, &priv->ath9k_ani_work, 937 priv->op_flags |= OP_ANI_RUNNING;
938
939 ieee80211_queue_delayed_work(common->hw, &priv->ani_work,
821 msecs_to_jiffies(ATH_ANI_POLLINTERVAL)); 940 msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
822} 941}
823 942
824void ath9k_ani_work(struct work_struct *work) 943void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv)
944{
945 cancel_delayed_work_sync(&priv->ani_work);
946 priv->op_flags &= ~OP_ANI_RUNNING;
947}
948
949void ath9k_htc_ani_work(struct work_struct *work)
825{ 950{
826 struct ath9k_htc_priv *priv = 951 struct ath9k_htc_priv *priv =
827 container_of(work, struct ath9k_htc_priv, 952 container_of(work, struct ath9k_htc_priv, ani_work.work);
828 ath9k_ani_work.work);
829 struct ath_hw *ah = priv->ah; 953 struct ath_hw *ah = priv->ah;
830 struct ath_common *common = ath9k_hw_common(ah); 954 struct ath_common *common = ath9k_hw_common(ah);
831 bool longcal = false; 955 bool longcal = false;
@@ -834,7 +958,8 @@ void ath9k_ani_work(struct work_struct *work)
834 unsigned int timestamp = jiffies_to_msecs(jiffies); 958 unsigned int timestamp = jiffies_to_msecs(jiffies);
835 u32 cal_interval, short_cal_interval; 959 u32 cal_interval, short_cal_interval;
836 960
837 short_cal_interval = ATH_STA_SHORT_CALINTERVAL; 961 short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
962 ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
838 963
839 /* Only calibrate if awake */ 964 /* Only calibrate if awake */
840 if (ah->power_mode != ATH9K_PM_AWAKE) 965 if (ah->power_mode != ATH9K_PM_AWAKE)
@@ -903,7 +1028,7 @@ set_timer:
903 if (!common->ani.caldone) 1028 if (!common->ani.caldone)
904 cal_interval = min(cal_interval, (u32)short_cal_interval); 1029 cal_interval = min(cal_interval, (u32)short_cal_interval);
905 1030
906 ieee80211_queue_delayed_work(common->hw, &priv->ath9k_ani_work, 1031 ieee80211_queue_delayed_work(common->hw, &priv->ani_work,
907 msecs_to_jiffies(cal_interval)); 1032 msecs_to_jiffies(cal_interval));
908} 1033}
909 1034
@@ -911,7 +1036,7 @@ set_timer:
911/* mac80211 Callbacks */ 1036/* mac80211 Callbacks */
912/**********************/ 1037/**********************/
913 1038
914static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 1039static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
915{ 1040{
916 struct ieee80211_hdr *hdr; 1041 struct ieee80211_hdr *hdr;
917 struct ath9k_htc_priv *priv = hw->priv; 1042 struct ath9k_htc_priv *priv = hw->priv;
@@ -924,7 +1049,7 @@ static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
924 padsize = padpos & 3; 1049 padsize = padpos & 3;
925 if (padsize && skb->len > padpos) { 1050 if (padsize && skb->len > padpos) {
926 if (skb_headroom(skb) < padsize) 1051 if (skb_headroom(skb) < padsize)
927 return -1; 1052 goto fail_tx;
928 skb_push(skb, padsize); 1053 skb_push(skb, padsize);
929 memmove(skb->data, skb->data + padsize, padpos); 1054 memmove(skb->data, skb->data + padsize, padpos);
930 } 1055 }
@@ -945,11 +1070,10 @@ static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
945 goto fail_tx; 1070 goto fail_tx;
946 } 1071 }
947 1072
948 return 0; 1073 return;
949 1074
950fail_tx: 1075fail_tx:
951 dev_kfree_skb_any(skb); 1076 dev_kfree_skb_any(skb);
952 return 0;
953} 1077}
954 1078
955static int ath9k_htc_start(struct ieee80211_hw *hw) 1079static int ath9k_htc_start(struct ieee80211_hw *hw)
@@ -987,7 +1111,8 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
987 return ret; 1111 return ret;
988 } 1112 }
989 1113
990 ath_update_txpow(priv); 1114 ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
1115 &priv->curtxpow);
991 1116
992 mode = ath9k_htc_get_curmode(priv, init_channel); 1117 mode = ath9k_htc_get_curmode(priv, init_channel);
993 htc_mode = cpu_to_be16(mode); 1118 htc_mode = cpu_to_be16(mode);
@@ -997,6 +1122,11 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
997 1122
998 ath9k_host_rx_init(priv); 1123 ath9k_host_rx_init(priv);
999 1124
1125 ret = ath9k_htc_update_cap_target(priv);
1126 if (ret)
1127 ath_dbg(common, ATH_DBG_CONFIG,
1128 "Failed to update capability in target\n");
1129
1000 priv->op_flags &= ~OP_INVALID; 1130 priv->op_flags &= ~OP_INVALID;
1001 htc_start(priv->htc); 1131 htc_start(priv->htc);
1002 1132
@@ -1051,25 +1181,21 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
1051 cancel_work_sync(&priv->fatal_work); 1181 cancel_work_sync(&priv->fatal_work);
1052 cancel_work_sync(&priv->ps_work); 1182 cancel_work_sync(&priv->ps_work);
1053 cancel_delayed_work_sync(&priv->ath9k_led_blink_work); 1183 cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
1184 ath9k_htc_stop_ani(priv);
1054 ath9k_led_stop_brightness(priv); 1185 ath9k_led_stop_brightness(priv);
1055 1186
1056 mutex_lock(&priv->mutex); 1187 mutex_lock(&priv->mutex);
1057 1188
1058 /* Remove monitor interface here */
1059 if (ah->opmode == NL80211_IFTYPE_MONITOR) {
1060 if (ath9k_htc_remove_monitor_interface(priv))
1061 ath_err(common, "Unable to remove monitor interface\n");
1062 else
1063 ath_dbg(common, ATH_DBG_CONFIG,
1064 "Monitor interface removed\n");
1065 }
1066
1067 if (ah->btcoex_hw.enabled) { 1189 if (ah->btcoex_hw.enabled) {
1068 ath9k_hw_btcoex_disable(ah); 1190 ath9k_hw_btcoex_disable(ah);
1069 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) 1191 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
1070 ath_htc_cancel_btcoex_work(priv); 1192 ath_htc_cancel_btcoex_work(priv);
1071 } 1193 }
1072 1194
1195 /* Remove a monitor interface if it's present. */
1196 if (priv->ah->is_monitoring)
1197 ath9k_htc_remove_monitor_interface(priv);
1198
1073 ath9k_hw_phy_disable(ah); 1199 ath9k_hw_phy_disable(ah);
1074 ath9k_hw_disable(ah); 1200 ath9k_hw_disable(ah);
1075 ath9k_htc_ps_restore(priv); 1201 ath9k_htc_ps_restore(priv);
@@ -1093,10 +1219,24 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
1093 1219
1094 mutex_lock(&priv->mutex); 1220 mutex_lock(&priv->mutex);
1095 1221
1096 /* Only one interface for now */ 1222 if (priv->nvifs >= ATH9K_HTC_MAX_VIF) {
1097 if (priv->nvifs > 0) { 1223 mutex_unlock(&priv->mutex);
1098 ret = -ENOBUFS; 1224 return -ENOBUFS;
1099 goto out; 1225 }
1226
1227 if (priv->num_ibss_vif ||
1228 (priv->nvifs && vif->type == NL80211_IFTYPE_ADHOC)) {
1229 ath_err(common, "IBSS coexistence with other modes is not allowed\n");
1230 mutex_unlock(&priv->mutex);
1231 return -ENOBUFS;
1232 }
1233
1234 if (((vif->type == NL80211_IFTYPE_AP) ||
1235 (vif->type == NL80211_IFTYPE_ADHOC)) &&
1236 ((priv->num_ap_vif + priv->num_ibss_vif) >= ATH9K_HTC_MAX_BCN_VIF)) {
1237 ath_err(common, "Max. number of beaconing interfaces reached\n");
1238 mutex_unlock(&priv->mutex);
1239 return -ENOBUFS;
1100 } 1240 }
1101 1241
1102 ath9k_htc_ps_wakeup(priv); 1242 ath9k_htc_ps_wakeup(priv);
@@ -1110,6 +1250,9 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
1110 case NL80211_IFTYPE_ADHOC: 1250 case NL80211_IFTYPE_ADHOC:
1111 hvif.opmode = cpu_to_be32(HTC_M_IBSS); 1251 hvif.opmode = cpu_to_be32(HTC_M_IBSS);
1112 break; 1252 break;
1253 case NL80211_IFTYPE_AP:
1254 hvif.opmode = cpu_to_be32(HTC_M_HOSTAP);
1255 break;
1113 default: 1256 default:
1114 ath_err(common, 1257 ath_err(common,
1115 "Interface type %d not yet supported\n", vif->type); 1258 "Interface type %d not yet supported\n", vif->type);
@@ -1117,34 +1260,39 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
1117 goto out; 1260 goto out;
1118 } 1261 }
1119 1262
1120 ath_dbg(common, ATH_DBG_CONFIG,
1121 "Attach a VIF of type: %d\n", vif->type);
1122
1123 priv->ah->opmode = vif->type;
1124
1125 /* Index starts from zero on the target */ 1263 /* Index starts from zero on the target */
1126 avp->index = hvif.index = priv->nvifs; 1264 avp->index = hvif.index = ffz(priv->vif_slot);
1127 hvif.rtsthreshold = cpu_to_be16(2304); 1265 hvif.rtsthreshold = cpu_to_be16(2304);
1128 WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif); 1266 WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif);
1129 if (ret) 1267 if (ret)
1130 goto out; 1268 goto out;
1131 1269
1132 priv->nvifs++;
1133
1134 /* 1270 /*
1135 * We need a node in target to tx mgmt frames 1271 * We need a node in target to tx mgmt frames
1136 * before association. 1272 * before association.
1137 */ 1273 */
1138 ret = ath9k_htc_add_station(priv, vif, NULL); 1274 ret = ath9k_htc_add_station(priv, vif, NULL);
1139 if (ret) 1275 if (ret) {
1276 WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
1140 goto out; 1277 goto out;
1278 }
1141 1279
1142 ret = ath9k_htc_update_cap_target(priv); 1280 ath9k_htc_set_bssid_mask(priv, vif);
1143 if (ret)
1144 ath_dbg(common, ATH_DBG_CONFIG,
1145 "Failed to update capability in target\n");
1146 1281
1282 priv->vif_slot |= (1 << avp->index);
1283 priv->nvifs++;
1147 priv->vif = vif; 1284 priv->vif = vif;
1285
1286 INC_VIF(priv, vif->type);
1287 ath9k_htc_set_opmode(priv);
1288
1289 if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
1290 !(priv->op_flags & OP_ANI_RUNNING))
1291 ath9k_htc_start_ani(priv);
1292
1293 ath_dbg(common, ATH_DBG_CONFIG,
1294 "Attach a VIF of type: %d at idx: %d\n", vif->type, avp->index);
1295
1148out: 1296out:
1149 ath9k_htc_ps_restore(priv); 1297 ath9k_htc_ps_restore(priv);
1150 mutex_unlock(&priv->mutex); 1298 mutex_unlock(&priv->mutex);
@@ -1162,8 +1310,6 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
1162 int ret = 0; 1310 int ret = 0;
1163 u8 cmd_rsp; 1311 u8 cmd_rsp;
1164 1312
1165 ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n");
1166
1167 mutex_lock(&priv->mutex); 1313 mutex_lock(&priv->mutex);
1168 ath9k_htc_ps_wakeup(priv); 1314 ath9k_htc_ps_wakeup(priv);
1169 1315
@@ -1172,10 +1318,27 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
1172 hvif.index = avp->index; 1318 hvif.index = avp->index;
1173 WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif); 1319 WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
1174 priv->nvifs--; 1320 priv->nvifs--;
1321 priv->vif_slot &= ~(1 << avp->index);
1175 1322
1176 ath9k_htc_remove_station(priv, vif, NULL); 1323 ath9k_htc_remove_station(priv, vif, NULL);
1177 priv->vif = NULL; 1324 priv->vif = NULL;
1178 1325
1326 DEC_VIF(priv, vif->type);
1327 ath9k_htc_set_opmode(priv);
1328
1329 /*
1330 * Stop ANI only if there are no associated station interfaces.
1331 */
1332 if ((vif->type == NL80211_IFTYPE_AP) && (priv->num_ap_vif == 0)) {
1333 priv->rearm_ani = false;
1334 ieee80211_iterate_active_interfaces_atomic(priv->hw,
1335 ath9k_htc_vif_iter, priv);
1336 if (!priv->rearm_ani)
1337 ath9k_htc_stop_ani(priv);
1338 }
1339
1340 ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface at idx: %d\n", avp->index);
1341
1179 ath9k_htc_ps_restore(priv); 1342 ath9k_htc_ps_restore(priv);
1180 mutex_unlock(&priv->mutex); 1343 mutex_unlock(&priv->mutex);
1181} 1344}
@@ -1211,13 +1374,11 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1211 * IEEE80211_CONF_CHANGE_CHANNEL is handled. 1374 * IEEE80211_CONF_CHANGE_CHANNEL is handled.
1212 */ 1375 */
1213 if (changed & IEEE80211_CONF_CHANGE_MONITOR) { 1376 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
1214 if (conf->flags & IEEE80211_CONF_MONITOR) { 1377 if ((conf->flags & IEEE80211_CONF_MONITOR) &&
1215 if (ath9k_htc_add_monitor_interface(priv)) 1378 !priv->ah->is_monitoring)
1216 ath_err(common, "Failed to set monitor mode\n"); 1379 ath9k_htc_add_monitor_interface(priv);
1217 else 1380 else if (priv->ah->is_monitoring)
1218 ath_dbg(common, ATH_DBG_CONFIG, 1381 ath9k_htc_remove_monitor_interface(priv);
1219 "HW opmode set to Monitor mode\n");
1220 }
1221 } 1382 }
1222 1383
1223 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 1384 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
@@ -1252,7 +1413,8 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1252 1413
1253 if (changed & IEEE80211_CONF_CHANGE_POWER) { 1414 if (changed & IEEE80211_CONF_CHANGE_POWER) {
1254 priv->txpowlimit = 2 * conf->power_level; 1415 priv->txpowlimit = 2 * conf->power_level;
1255 ath_update_txpow(priv); 1416 ath9k_cmn_update_txpow(priv->ah, priv->curtxpow,
1417 priv->txpowlimit, &priv->curtxpow);
1256 } 1418 }
1257 1419
1258 if (changed & IEEE80211_CONF_CHANGE_IDLE) { 1420 if (changed & IEEE80211_CONF_CHANGE_IDLE) {
@@ -1439,66 +1601,81 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
1439 struct ath9k_htc_priv *priv = hw->priv; 1601 struct ath9k_htc_priv *priv = hw->priv;
1440 struct ath_hw *ah = priv->ah; 1602 struct ath_hw *ah = priv->ah;
1441 struct ath_common *common = ath9k_hw_common(ah); 1603 struct ath_common *common = ath9k_hw_common(ah);
1604 bool set_assoc;
1442 1605
1443 mutex_lock(&priv->mutex); 1606 mutex_lock(&priv->mutex);
1444 ath9k_htc_ps_wakeup(priv); 1607 ath9k_htc_ps_wakeup(priv);
1445 1608
1609 /*
1610 * Set the HW AID/BSSID only for the first station interface
1611 * or in IBSS mode.
1612 */
1613 set_assoc = !!((priv->ah->opmode == NL80211_IFTYPE_ADHOC) ||
1614 ((priv->ah->opmode == NL80211_IFTYPE_STATION) &&
1615 (priv->num_sta_vif == 1)));
1616
1617
1446 if (changed & BSS_CHANGED_ASSOC) { 1618 if (changed & BSS_CHANGED_ASSOC) {
1447 common->curaid = bss_conf->assoc ? 1619 if (set_assoc) {
1448 bss_conf->aid : 0; 1620 ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
1449 ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n", 1621 bss_conf->assoc);
1450 bss_conf->assoc); 1622
1451 1623 common->curaid = bss_conf->assoc ?
1452 if (bss_conf->assoc) { 1624 bss_conf->aid : 0;
1453 priv->op_flags |= OP_ASSOCIATED; 1625
1454 ath_start_ani(priv); 1626 if (bss_conf->assoc)
1455 } else { 1627 ath9k_htc_start_ani(priv);
1456 priv->op_flags &= ~OP_ASSOCIATED; 1628 else
1457 cancel_delayed_work_sync(&priv->ath9k_ani_work); 1629 ath9k_htc_stop_ani(priv);
1458 } 1630 }
1459 } 1631 }
1460 1632
1461 if (changed & BSS_CHANGED_BSSID) { 1633 if (changed & BSS_CHANGED_BSSID) {
1462 /* Set BSSID */ 1634 if (set_assoc) {
1463 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN); 1635 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1464 ath9k_hw_write_associd(ah); 1636 ath9k_hw_write_associd(ah);
1465 1637
1466 ath_dbg(common, ATH_DBG_CONFIG, 1638 ath_dbg(common, ATH_DBG_CONFIG,
1467 "BSSID: %pM aid: 0x%x\n", 1639 "BSSID: %pM aid: 0x%x\n",
1468 common->curbssid, common->curaid); 1640 common->curbssid, common->curaid);
1641 }
1469 } 1642 }
1470 1643
1471 if ((changed & BSS_CHANGED_BEACON_INT) || 1644 if ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon) {
1472 (changed & BSS_CHANGED_BEACON) || 1645 ath_dbg(common, ATH_DBG_CONFIG,
1473 ((changed & BSS_CHANGED_BEACON_ENABLED) && 1646 "Beacon enabled for BSS: %pM\n", bss_conf->bssid);
1474 bss_conf->enable_beacon)) {
1475 priv->op_flags |= OP_ENABLE_BEACON; 1647 priv->op_flags |= OP_ENABLE_BEACON;
1476 ath9k_htc_beacon_config(priv, vif); 1648 ath9k_htc_beacon_config(priv, vif);
1477 } 1649 }
1478 1650
1479 if ((changed & BSS_CHANGED_BEACON_ENABLED) && 1651 if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon) {
1480 !bss_conf->enable_beacon) { 1652 /*
1481 priv->op_flags &= ~OP_ENABLE_BEACON; 1653 * Disable SWBA interrupt only if there are no
1482 ath9k_htc_beacon_config(priv, vif); 1654 * AP/IBSS interfaces.
1483 } 1655 */
1484 1656 if ((priv->num_ap_vif <= 1) || priv->num_ibss_vif) {
1485 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 1657 ath_dbg(common, ATH_DBG_CONFIG,
1486 ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n", 1658 "Beacon disabled for BSS: %pM\n",
1487 bss_conf->use_short_preamble); 1659 bss_conf->bssid);
1488 if (bss_conf->use_short_preamble) 1660 priv->op_flags &= ~OP_ENABLE_BEACON;
1489 priv->op_flags |= OP_PREAMBLE_SHORT; 1661 ath9k_htc_beacon_config(priv, vif);
1490 else 1662 }
1491 priv->op_flags &= ~OP_PREAMBLE_SHORT;
1492 } 1663 }
1493 1664
1494 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 1665 if (changed & BSS_CHANGED_BEACON_INT) {
1495 ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n", 1666 /*
1496 bss_conf->use_cts_prot); 1667 * Reset the HW TSF for the first AP interface.
1497 if (bss_conf->use_cts_prot && 1668 */
1498 hw->conf.channel->band != IEEE80211_BAND_5GHZ) 1669 if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
1499 priv->op_flags |= OP_PROTECT_ENABLE; 1670 (priv->nvifs == 1) &&
1500 else 1671 (priv->num_ap_vif == 1) &&
1501 priv->op_flags &= ~OP_PROTECT_ENABLE; 1672 (vif->type == NL80211_IFTYPE_AP)) {
1673 priv->op_flags |= OP_TSF_RESET;
1674 }
1675 ath_dbg(common, ATH_DBG_CONFIG,
1676 "Beacon interval changed for BSS: %pM\n",
1677 bss_conf->bssid);
1678 ath9k_htc_beacon_config(priv, vif);
1502 } 1679 }
1503 1680
1504 if (changed & BSS_CHANGED_ERP_SLOT) { 1681 if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -1557,12 +1734,14 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
1557 struct ieee80211_vif *vif, 1734 struct ieee80211_vif *vif,
1558 enum ieee80211_ampdu_mlme_action action, 1735 enum ieee80211_ampdu_mlme_action action,
1559 struct ieee80211_sta *sta, 1736 struct ieee80211_sta *sta,
1560 u16 tid, u16 *ssn) 1737 u16 tid, u16 *ssn, u8 buf_size)
1561{ 1738{
1562 struct ath9k_htc_priv *priv = hw->priv; 1739 struct ath9k_htc_priv *priv = hw->priv;
1563 struct ath9k_htc_sta *ista; 1740 struct ath9k_htc_sta *ista;
1564 int ret = 0; 1741 int ret = 0;
1565 1742
1743 mutex_lock(&priv->mutex);
1744
1566 switch (action) { 1745 switch (action) {
1567 case IEEE80211_AMPDU_RX_START: 1746 case IEEE80211_AMPDU_RX_START:
1568 break; 1747 break;
@@ -1587,6 +1766,8 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
1587 ath_err(ath9k_hw_common(priv->ah), "Unknown AMPDU action\n"); 1766 ath_err(ath9k_hw_common(priv->ah), "Unknown AMPDU action\n");
1588 } 1767 }
1589 1768
1769 mutex_unlock(&priv->mutex);
1770
1590 return ret; 1771 return ret;
1591} 1772}
1592 1773
@@ -1599,8 +1780,7 @@ static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
1599 priv->op_flags |= OP_SCANNING; 1780 priv->op_flags |= OP_SCANNING;
1600 spin_unlock_bh(&priv->beacon_lock); 1781 spin_unlock_bh(&priv->beacon_lock);
1601 cancel_work_sync(&priv->ps_work); 1782 cancel_work_sync(&priv->ps_work);
1602 if (priv->op_flags & OP_ASSOCIATED) 1783 ath9k_htc_stop_ani(priv);
1603 cancel_delayed_work_sync(&priv->ath9k_ani_work);
1604 mutex_unlock(&priv->mutex); 1784 mutex_unlock(&priv->mutex);
1605} 1785}
1606 1786
@@ -1609,14 +1789,11 @@ static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
1609 struct ath9k_htc_priv *priv = hw->priv; 1789 struct ath9k_htc_priv *priv = hw->priv;
1610 1790
1611 mutex_lock(&priv->mutex); 1791 mutex_lock(&priv->mutex);
1612 ath9k_htc_ps_wakeup(priv);
1613 spin_lock_bh(&priv->beacon_lock); 1792 spin_lock_bh(&priv->beacon_lock);
1614 priv->op_flags &= ~OP_SCANNING; 1793 priv->op_flags &= ~OP_SCANNING;
1615 spin_unlock_bh(&priv->beacon_lock); 1794 spin_unlock_bh(&priv->beacon_lock);
1616 if (priv->op_flags & OP_ASSOCIATED) { 1795 ath9k_htc_ps_wakeup(priv);
1617 ath9k_htc_beacon_config(priv, priv->vif); 1796 ath9k_htc_vif_reconfig(priv);
1618 ath_start_ani(priv);
1619 }
1620 ath9k_htc_ps_restore(priv); 1797 ath9k_htc_ps_restore(priv);
1621 mutex_unlock(&priv->mutex); 1798 mutex_unlock(&priv->mutex);
1622} 1799}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 7a5ffca21958..4a4f27ba96af 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -84,7 +84,9 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
84 struct ieee80211_hdr *hdr; 84 struct ieee80211_hdr *hdr;
85 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 85 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
86 struct ieee80211_sta *sta = tx_info->control.sta; 86 struct ieee80211_sta *sta = tx_info->control.sta;
87 struct ieee80211_vif *vif = tx_info->control.vif;
87 struct ath9k_htc_sta *ista; 88 struct ath9k_htc_sta *ista;
89 struct ath9k_htc_vif *avp;
88 struct ath9k_htc_tx_ctl tx_ctl; 90 struct ath9k_htc_tx_ctl tx_ctl;
89 enum htc_endpoint_id epid; 91 enum htc_endpoint_id epid;
90 u16 qnum; 92 u16 qnum;
@@ -95,18 +97,31 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
95 hdr = (struct ieee80211_hdr *) skb->data; 97 hdr = (struct ieee80211_hdr *) skb->data;
96 fc = hdr->frame_control; 98 fc = hdr->frame_control;
97 99
98 if (tx_info->control.vif && 100 /*
99 (struct ath9k_htc_vif *) tx_info->control.vif->drv_priv) 101 * Find out on which interface this packet has to be
100 vif_idx = ((struct ath9k_htc_vif *) 102 * sent out.
101 tx_info->control.vif->drv_priv)->index; 103 */
102 else 104 if (vif) {
103 vif_idx = priv->nvifs; 105 avp = (struct ath9k_htc_vif *) vif->drv_priv;
106 vif_idx = avp->index;
107 } else {
108 if (!priv->ah->is_monitoring) {
109 ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
110 "VIF is null, but no monitor interface !\n");
111 return -EINVAL;
112 }
104 113
114 vif_idx = priv->mon_vif_idx;
115 }
116
117 /*
118 * Find out which station this packet is destined for.
119 */
105 if (sta) { 120 if (sta) {
106 ista = (struct ath9k_htc_sta *) sta->drv_priv; 121 ista = (struct ath9k_htc_sta *) sta->drv_priv;
107 sta_idx = ista->index; 122 sta_idx = ista->index;
108 } else { 123 } else {
109 sta_idx = 0; 124 sta_idx = priv->vif_sta_pos[vif_idx];
110 } 125 }
111 126
112 memset(&tx_ctl, 0, sizeof(struct ath9k_htc_tx_ctl)); 127 memset(&tx_ctl, 0, sizeof(struct ath9k_htc_tx_ctl));
@@ -141,7 +156,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
141 156
142 /* CTS-to-self */ 157 /* CTS-to-self */
143 if (!(flags & ATH9K_HTC_TX_RTSCTS) && 158 if (!(flags & ATH9K_HTC_TX_RTSCTS) &&
144 (priv->op_flags & OP_PROTECT_ENABLE)) 159 (vif && vif->bss_conf.use_cts_prot))
145 flags |= ATH9K_HTC_TX_CTSONLY; 160 flags |= ATH9K_HTC_TX_CTSONLY;
146 161
147 tx_hdr.flags = cpu_to_be32(flags); 162 tx_hdr.flags = cpu_to_be32(flags);
@@ -217,6 +232,7 @@ static bool ath9k_htc_check_tx_aggr(struct ath9k_htc_priv *priv,
217void ath9k_tx_tasklet(unsigned long data) 232void ath9k_tx_tasklet(unsigned long data)
218{ 233{
219 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data; 234 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
235 struct ieee80211_vif *vif;
220 struct ieee80211_sta *sta; 236 struct ieee80211_sta *sta;
221 struct ieee80211_hdr *hdr; 237 struct ieee80211_hdr *hdr;
222 struct ieee80211_tx_info *tx_info; 238 struct ieee80211_tx_info *tx_info;
@@ -228,12 +244,16 @@ void ath9k_tx_tasklet(unsigned long data)
228 hdr = (struct ieee80211_hdr *) skb->data; 244 hdr = (struct ieee80211_hdr *) skb->data;
229 fc = hdr->frame_control; 245 fc = hdr->frame_control;
230 tx_info = IEEE80211_SKB_CB(skb); 246 tx_info = IEEE80211_SKB_CB(skb);
247 vif = tx_info->control.vif;
231 248
232 memset(&tx_info->status, 0, sizeof(tx_info->status)); 249 memset(&tx_info->status, 0, sizeof(tx_info->status));
233 250
251 if (!vif)
252 goto send_mac80211;
253
234 rcu_read_lock(); 254 rcu_read_lock();
235 255
236 sta = ieee80211_find_sta(priv->vif, hdr->addr1); 256 sta = ieee80211_find_sta(vif, hdr->addr1);
237 if (!sta) { 257 if (!sta) {
238 rcu_read_unlock(); 258 rcu_read_unlock();
239 ieee80211_tx_status(priv->hw, skb); 259 ieee80211_tx_status(priv->hw, skb);
@@ -263,6 +283,7 @@ void ath9k_tx_tasklet(unsigned long data)
263 283
264 rcu_read_unlock(); 284 rcu_read_unlock();
265 285
286 send_mac80211:
266 /* Send status to mac80211 */ 287 /* Send status to mac80211 */
267 ieee80211_tx_status(priv->hw, skb); 288 ieee80211_tx_status(priv->hw, skb);
268 } 289 }
@@ -386,7 +407,7 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
386 */ 407 */
387 if (((ah->opmode != NL80211_IFTYPE_AP) && 408 if (((ah->opmode != NL80211_IFTYPE_AP) &&
388 (priv->rxfilter & FIF_PROMISC_IN_BSS)) || 409 (priv->rxfilter & FIF_PROMISC_IN_BSS)) ||
389 (ah->opmode == NL80211_IFTYPE_MONITOR)) 410 ah->is_monitoring)
390 rfilt |= ATH9K_RX_FILTER_PROM; 411 rfilt |= ATH9K_RX_FILTER_PROM;
391 412
392 if (priv->rxfilter & FIF_CONTROL) 413 if (priv->rxfilter & FIF_CONTROL)
@@ -398,8 +419,13 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
398 else 419 else
399 rfilt |= ATH9K_RX_FILTER_BEACON; 420 rfilt |= ATH9K_RX_FILTER_BEACON;
400 421
401 if (conf_is_ht(&priv->hw->conf)) 422 if (conf_is_ht(&priv->hw->conf)) {
402 rfilt |= ATH9K_RX_FILTER_COMP_BAR; 423 rfilt |= ATH9K_RX_FILTER_COMP_BAR;
424 rfilt |= ATH9K_RX_FILTER_UNCOMP_BA_BAR;
425 }
426
427 if (priv->rxfilter & FIF_PSPOLL)
428 rfilt |= ATH9K_RX_FILTER_PSPOLL;
403 429
404 return rfilt; 430 return rfilt;
405 431
@@ -412,20 +438,12 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
412static void ath9k_htc_opmode_init(struct ath9k_htc_priv *priv) 438static void ath9k_htc_opmode_init(struct ath9k_htc_priv *priv)
413{ 439{
414 struct ath_hw *ah = priv->ah; 440 struct ath_hw *ah = priv->ah;
415 struct ath_common *common = ath9k_hw_common(ah);
416
417 u32 rfilt, mfilt[2]; 441 u32 rfilt, mfilt[2];
418 442
419 /* configure rx filter */ 443 /* configure rx filter */
420 rfilt = ath9k_htc_calcrxfilter(priv); 444 rfilt = ath9k_htc_calcrxfilter(priv);
421 ath9k_hw_setrxfilter(ah, rfilt); 445 ath9k_hw_setrxfilter(ah, rfilt);
422 446
423 /* configure bssid mask */
424 ath_hw_setbssidmask(common);
425
426 /* configure operational mode */
427 ath9k_hw_setopmode(ah);
428
429 /* calculate and install multicast filter */ 447 /* calculate and install multicast filter */
430 mfilt[0] = mfilt[1] = ~0; 448 mfilt[0] = mfilt[1] = ~0;
431 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); 449 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
@@ -576,31 +594,29 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
576 ath9k_process_rate(hw, rx_status, rxbuf->rxstatus.rs_rate, 594 ath9k_process_rate(hw, rx_status, rxbuf->rxstatus.rs_rate,
577 rxbuf->rxstatus.rs_flags); 595 rxbuf->rxstatus.rs_flags);
578 596
579 if (priv->op_flags & OP_ASSOCIATED) { 597 if (rxbuf->rxstatus.rs_rssi != ATH9K_RSSI_BAD &&
580 if (rxbuf->rxstatus.rs_rssi != ATH9K_RSSI_BAD && 598 !rxbuf->rxstatus.rs_moreaggr)
581 !rxbuf->rxstatus.rs_moreaggr) 599 ATH_RSSI_LPF(priv->rx.last_rssi,
582 ATH_RSSI_LPF(priv->rx.last_rssi, 600 rxbuf->rxstatus.rs_rssi);
583 rxbuf->rxstatus.rs_rssi);
584 601
585 last_rssi = priv->rx.last_rssi; 602 last_rssi = priv->rx.last_rssi;
586 603
587 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) 604 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
588 rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi, 605 rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi,
589 ATH_RSSI_EP_MULTIPLIER); 606 ATH_RSSI_EP_MULTIPLIER);
590 607
591 if (rxbuf->rxstatus.rs_rssi < 0) 608 if (rxbuf->rxstatus.rs_rssi < 0)
592 rxbuf->rxstatus.rs_rssi = 0; 609 rxbuf->rxstatus.rs_rssi = 0;
593 610
594 if (ieee80211_is_beacon(fc)) 611 if (ieee80211_is_beacon(fc))
595 priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi; 612 priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi;
596 }
597 613
598 rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp); 614 rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
599 rx_status->band = hw->conf.channel->band; 615 rx_status->band = hw->conf.channel->band;
600 rx_status->freq = hw->conf.channel->center_freq; 616 rx_status->freq = hw->conf.channel->center_freq;
601 rx_status->signal = rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR; 617 rx_status->signal = rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
602 rx_status->antenna = rxbuf->rxstatus.rs_antenna; 618 rx_status->antenna = rxbuf->rxstatus.rs_antenna;
603 rx_status->flag |= RX_FLAG_TSFT; 619 rx_status->flag |= RX_FLAG_MACTIME_MPDU;
604 620
605 return true; 621 return true;
606 622
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 9f01e50d5cda..338b07502f1a 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -495,6 +495,17 @@ static int __ath9k_hw_init(struct ath_hw *ah)
495 if (ah->hw_version.devid == AR5416_AR9100_DEVID) 495 if (ah->hw_version.devid == AR5416_AR9100_DEVID)
496 ah->hw_version.macVersion = AR_SREV_VERSION_9100; 496 ah->hw_version.macVersion = AR_SREV_VERSION_9100;
497 497
498 ath9k_hw_read_revisions(ah);
499
500 /*
501 * Read back AR_WA into a permanent copy and set bits 14 and 17.
502 * We need to do this to avoid RMW of this register. We cannot
503 * read the reg when chip is asleep.
504 */
505 ah->WARegVal = REG_READ(ah, AR_WA);
506 ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
507 AR_WA_ASPM_TIMER_BASED_DISABLE);
508
498 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { 509 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
499 ath_err(common, "Couldn't reset chip\n"); 510 ath_err(common, "Couldn't reset chip\n");
500 return -EIO; 511 return -EIO;
@@ -563,14 +574,6 @@ static int __ath9k_hw_init(struct ath_hw *ah)
563 574
564 ath9k_hw_init_mode_regs(ah); 575 ath9k_hw_init_mode_regs(ah);
565 576
566 /*
567 * Read back AR_WA into a permanent copy and set bits 14 and 17.
568 * We need to do this to avoid RMW of this register. We cannot
569 * read the reg when chip is asleep.
570 */
571 ah->WARegVal = REG_READ(ah, AR_WA);
572 ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
573 AR_WA_ASPM_TIMER_BASED_DISABLE);
574 577
575 if (ah->is_pciexpress) 578 if (ah->is_pciexpress)
576 ath9k_hw_configpcipowersave(ah, 0, 0); 579 ath9k_hw_configpcipowersave(ah, 0, 0);
@@ -668,14 +671,51 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
668 REGWRITE_BUFFER_FLUSH(ah); 671 REGWRITE_BUFFER_FLUSH(ah);
669} 672}
670 673
674unsigned long ar9003_get_pll_sqsum_dvc(struct ath_hw *ah)
675{
676 REG_WRITE(ah, PLL3, (REG_READ(ah, PLL3) & ~(PLL3_DO_MEAS_MASK)));
677 udelay(100);
678 REG_WRITE(ah, PLL3, (REG_READ(ah, PLL3) | PLL3_DO_MEAS_MASK));
679
680 while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0)
681 udelay(100);
682
683 return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3;
684}
685EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc);
686
687#define DPLL2_KD_VAL 0x3D
688#define DPLL2_KI_VAL 0x06
689#define DPLL3_PHASE_SHIFT_VAL 0x1
690
671static void ath9k_hw_init_pll(struct ath_hw *ah, 691static void ath9k_hw_init_pll(struct ath_hw *ah,
672 struct ath9k_channel *chan) 692 struct ath9k_channel *chan)
673{ 693{
674 u32 pll; 694 u32 pll;
675 695
676 if (AR_SREV_9485(ah)) 696 if (AR_SREV_9485(ah)) {
697 REG_WRITE(ah, AR_RTC_PLL_CONTROL2, 0x886666);
698 REG_WRITE(ah, AR_CH0_DDR_DPLL2, 0x19e82f01);
699
700 REG_RMW_FIELD(ah, AR_CH0_DDR_DPLL3,
701 AR_CH0_DPLL3_PHASE_SHIFT, DPLL3_PHASE_SHIFT_VAL);
702
703 REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c);
704 udelay(1000);
705
677 REG_WRITE(ah, AR_RTC_PLL_CONTROL2, 0x886666); 706 REG_WRITE(ah, AR_RTC_PLL_CONTROL2, 0x886666);
678 707
708 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
709 AR_CH0_DPLL2_KD, DPLL2_KD_VAL);
710 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
711 AR_CH0_DPLL2_KI, DPLL2_KI_VAL);
712
713 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3,
714 AR_CH0_DPLL3_PHASE_SHIFT, DPLL3_PHASE_SHIFT_VAL);
715 REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x142c);
716 udelay(1000);
717 }
718
679 pll = ath9k_hw_compute_pll_control(ah, chan); 719 pll = ath9k_hw_compute_pll_control(ah, chan);
680 720
681 REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll); 721 REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
@@ -1060,7 +1100,6 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
1060 REG_WRITE(ah, AR_RC, AR_RC_AHB); 1100 REG_WRITE(ah, AR_RC, AR_RC_AHB);
1061 1101
1062 REG_WRITE(ah, AR_RTC_RESET, 0); 1102 REG_WRITE(ah, AR_RTC_RESET, 0);
1063 udelay(2);
1064 1103
1065 REGWRITE_BUFFER_FLUSH(ah); 1104 REGWRITE_BUFFER_FLUSH(ah);
1066 1105
@@ -1082,8 +1121,6 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
1082 return false; 1121 return false;
1083 } 1122 }
1084 1123
1085 ath9k_hw_read_revisions(ah);
1086
1087 return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM); 1124 return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM);
1088} 1125}
1089 1126
@@ -1348,8 +1385,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1348 ath9k_hw_spur_mitigate_freq(ah, chan); 1385 ath9k_hw_spur_mitigate_freq(ah, chan);
1349 ah->eep_ops->set_board_values(ah, chan); 1386 ah->eep_ops->set_board_values(ah, chan);
1350 1387
1351 ath9k_hw_set_operating_mode(ah, ah->opmode);
1352
1353 ENABLE_REGWRITE_BUFFER(ah); 1388 ENABLE_REGWRITE_BUFFER(ah);
1354 1389
1355 REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr)); 1390 REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr));
@@ -1367,6 +1402,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1367 1402
1368 REGWRITE_BUFFER_FLUSH(ah); 1403 REGWRITE_BUFFER_FLUSH(ah);
1369 1404
1405 ath9k_hw_set_operating_mode(ah, ah->opmode);
1406
1370 r = ath9k_hw_rf_set_freq(ah, chan); 1407 r = ath9k_hw_rf_set_freq(ah, chan);
1371 if (r) 1408 if (r)
1372 return r; 1409 return r;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index ea9fde670646..6650fd48415c 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -70,6 +70,9 @@
70#define REG_READ(_ah, _reg) \ 70#define REG_READ(_ah, _reg) \
71 ath9k_hw_common(_ah)->ops->read((_ah), (_reg)) 71 ath9k_hw_common(_ah)->ops->read((_ah), (_reg))
72 72
73#define REG_READ_MULTI(_ah, _addr, _val, _cnt) \
74 ath9k_hw_common(_ah)->ops->multi_read((_ah), (_addr), (_val), (_cnt))
75
73#define ENABLE_REGWRITE_BUFFER(_ah) \ 76#define ENABLE_REGWRITE_BUFFER(_ah) \
74 do { \ 77 do { \
75 if (ath9k_hw_common(_ah)->ops->enable_write_buffer) \ 78 if (ath9k_hw_common(_ah)->ops->enable_write_buffer) \
@@ -92,9 +95,9 @@
92#define REG_READ_FIELD(_a, _r, _f) \ 95#define REG_READ_FIELD(_a, _r, _f) \
93 (((REG_READ(_a, _r) & _f) >> _f##_S)) 96 (((REG_READ(_a, _r) & _f) >> _f##_S))
94#define REG_SET_BIT(_a, _r, _f) \ 97#define REG_SET_BIT(_a, _r, _f) \
95 REG_WRITE(_a, _r, REG_READ(_a, _r) | _f) 98 REG_WRITE(_a, _r, REG_READ(_a, _r) | (_f))
96#define REG_CLR_BIT(_a, _r, _f) \ 99#define REG_CLR_BIT(_a, _r, _f) \
97 REG_WRITE(_a, _r, REG_READ(_a, _r) & ~_f) 100 REG_WRITE(_a, _r, REG_READ(_a, _r) & ~(_f))
98 101
99#define DO_DELAY(x) do { \ 102#define DO_DELAY(x) do { \
100 if ((++(x) % 64) == 0) \ 103 if ((++(x) % 64) == 0) \
@@ -926,6 +929,7 @@ void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
926void ath9k_hw_reset_tsf(struct ath_hw *ah); 929void ath9k_hw_reset_tsf(struct ath_hw *ah);
927void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting); 930void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting);
928void ath9k_hw_init_global_settings(struct ath_hw *ah); 931void ath9k_hw_init_global_settings(struct ath_hw *ah);
932unsigned long ar9003_get_pll_sqsum_dvc(struct ath_hw *ah);
929void ath9k_hw_set11nmac2040(struct ath_hw *ah); 933void ath9k_hw_set11nmac2040(struct ath_hw *ah);
930void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period); 934void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
931void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah, 935void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index a033d01bf8a0..79aec983279f 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -140,6 +140,21 @@ static struct ieee80211_rate ath9k_legacy_rates[] = {
140 RATE(540, 0x0c, 0), 140 RATE(540, 0x0c, 0),
141}; 141};
142 142
143#ifdef CONFIG_MAC80211_LEDS
144static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = {
145 { .throughput = 0 * 1024, .blink_time = 334 },
146 { .throughput = 1 * 1024, .blink_time = 260 },
147 { .throughput = 5 * 1024, .blink_time = 220 },
148 { .throughput = 10 * 1024, .blink_time = 190 },
149 { .throughput = 20 * 1024, .blink_time = 170 },
150 { .throughput = 50 * 1024, .blink_time = 150 },
151 { .throughput = 70 * 1024, .blink_time = 130 },
152 { .throughput = 100 * 1024, .blink_time = 110 },
153 { .throughput = 200 * 1024, .blink_time = 80 },
154 { .throughput = 300 * 1024, .blink_time = 50 },
155};
156#endif
157
143static void ath9k_deinit_softc(struct ath_softc *sc); 158static void ath9k_deinit_softc(struct ath_softc *sc);
144 159
145/* 160/*
@@ -250,8 +265,7 @@ static int ath9k_reg_notifier(struct wiphy *wiphy,
250 struct regulatory_request *request) 265 struct regulatory_request *request)
251{ 266{
252 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 267 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
253 struct ath_wiphy *aphy = hw->priv; 268 struct ath_softc *sc = hw->priv;
254 struct ath_softc *sc = aphy->sc;
255 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah); 269 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
256 270
257 return ath_reg_notifier_apply(wiphy, request, reg); 271 return ath_reg_notifier_apply(wiphy, request, reg);
@@ -438,9 +452,10 @@ static int ath9k_init_queues(struct ath_softc *sc)
438 sc->config.cabqReadytime = ATH_CABQ_READY_TIME; 452 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
439 ath_cabq_update(sc); 453 ath_cabq_update(sc);
440 454
441 for (i = 0; i < WME_NUM_AC; i++) 455 for (i = 0; i < WME_NUM_AC; i++) {
442 sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i); 456 sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
443 457 sc->tx.txq_map[i]->mac80211_qnum = i;
458 }
444 return 0; 459 return 0;
445} 460}
446 461
@@ -512,10 +527,8 @@ static void ath9k_init_misc(struct ath_softc *sc)
512 527
513 sc->beacon.slottime = ATH9K_SLOT_TIME_9; 528 sc->beacon.slottime = ATH9K_SLOT_TIME_9;
514 529
515 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) { 530 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++)
516 sc->beacon.bslot[i] = NULL; 531 sc->beacon.bslot[i] = NULL;
517 sc->beacon.bslot_aphy[i] = NULL;
518 }
519 532
520 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) 533 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
521 sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT; 534 sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
@@ -533,6 +546,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
533 if (!ah) 546 if (!ah)
534 return -ENOMEM; 547 return -ENOMEM;
535 548
549 ah->hw = sc->hw;
536 ah->hw_version.devid = devid; 550 ah->hw_version.devid = devid;
537 ah->hw_version.subsysid = subsysid; 551 ah->hw_version.subsysid = subsysid;
538 sc->sc_ah = ah; 552 sc->sc_ah = ah;
@@ -550,10 +564,13 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
550 common->btcoex_enabled = ath9k_btcoex_enable == 1; 564 common->btcoex_enabled = ath9k_btcoex_enable == 1;
551 spin_lock_init(&common->cc_lock); 565 spin_lock_init(&common->cc_lock);
552 566
553 spin_lock_init(&sc->wiphy_lock);
554 spin_lock_init(&sc->sc_serial_rw); 567 spin_lock_init(&sc->sc_serial_rw);
555 spin_lock_init(&sc->sc_pm_lock); 568 spin_lock_init(&sc->sc_pm_lock);
556 mutex_init(&sc->mutex); 569 mutex_init(&sc->mutex);
570#ifdef CONFIG_ATH9K_DEBUGFS
571 spin_lock_init(&sc->nodes_lock);
572 INIT_LIST_HEAD(&sc->nodes);
573#endif
557 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc); 574 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
558 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet, 575 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
559 (unsigned long)sc); 576 (unsigned long)sc);
@@ -695,7 +712,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
695 const struct ath_bus_ops *bus_ops) 712 const struct ath_bus_ops *bus_ops)
696{ 713{
697 struct ieee80211_hw *hw = sc->hw; 714 struct ieee80211_hw *hw = sc->hw;
698 struct ath_wiphy *aphy = hw->priv;
699 struct ath_common *common; 715 struct ath_common *common;
700 struct ath_hw *ah; 716 struct ath_hw *ah;
701 int error = 0; 717 int error = 0;
@@ -730,6 +746,13 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
730 746
731 ath9k_init_txpower_limits(sc); 747 ath9k_init_txpower_limits(sc);
732 748
749#ifdef CONFIG_MAC80211_LEDS
750 /* must be initialized before ieee80211_register_hw */
751 sc->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(sc->hw,
752 IEEE80211_TPT_LEDTRIG_FL_RADIO, ath9k_tpt_blink,
753 ARRAY_SIZE(ath9k_tpt_blink));
754#endif
755
733 /* Register with mac80211 */ 756 /* Register with mac80211 */
734 error = ieee80211_register_hw(hw); 757 error = ieee80211_register_hw(hw);
735 if (error) 758 if (error)
@@ -750,10 +773,7 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
750 773
751 INIT_WORK(&sc->hw_check_work, ath_hw_check); 774 INIT_WORK(&sc->hw_check_work, ath_hw_check);
752 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate); 775 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
753 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work); 776 sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
754 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
755 sc->wiphy_scheduler_int = msecs_to_jiffies(500);
756 aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
757 777
758 ath_init_leds(sc); 778 ath_init_leds(sc);
759 ath_start_rfkill_poll(sc); 779 ath_start_rfkill_poll(sc);
@@ -805,7 +825,6 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
805void ath9k_deinit_device(struct ath_softc *sc) 825void ath9k_deinit_device(struct ath_softc *sc)
806{ 826{
807 struct ieee80211_hw *hw = sc->hw; 827 struct ieee80211_hw *hw = sc->hw;
808 int i = 0;
809 828
810 ath9k_ps_wakeup(sc); 829 ath9k_ps_wakeup(sc);
811 830
@@ -814,20 +833,10 @@ void ath9k_deinit_device(struct ath_softc *sc)
814 833
815 ath9k_ps_restore(sc); 834 ath9k_ps_restore(sc);
816 835
817 for (i = 0; i < sc->num_sec_wiphy; i++) {
818 struct ath_wiphy *aphy = sc->sec_wiphy[i];
819 if (aphy == NULL)
820 continue;
821 sc->sec_wiphy[i] = NULL;
822 ieee80211_unregister_hw(aphy->hw);
823 ieee80211_free_hw(aphy->hw);
824 }
825
826 ieee80211_unregister_hw(hw); 836 ieee80211_unregister_hw(hw);
827 ath_rx_cleanup(sc); 837 ath_rx_cleanup(sc);
828 ath_tx_cleanup(sc); 838 ath_tx_cleanup(sc);
829 ath9k_deinit_softc(sc); 839 ath9k_deinit_softc(sc);
830 kfree(sc->sec_wiphy);
831} 840}
832 841
833void ath_descdma_cleanup(struct ath_softc *sc, 842void ath_descdma_cleanup(struct ath_softc *sc,
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 2915b11edefb..562257ac52cf 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -143,84 +143,59 @@ bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
143} 143}
144EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel); 144EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel);
145 145
146bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q) 146void ath9k_hw_abort_tx_dma(struct ath_hw *ah)
147{ 147{
148#define ATH9K_TX_STOP_DMA_TIMEOUT 4000 /* usec */ 148 int i, q;
149#define ATH9K_TIME_QUANTUM 100 /* usec */
150 struct ath_common *common = ath9k_hw_common(ah);
151 struct ath9k_hw_capabilities *pCap = &ah->caps;
152 struct ath9k_tx_queue_info *qi;
153 u32 tsfLow, j, wait;
154 u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
155 149
156 if (q >= pCap->total_queues) { 150 REG_WRITE(ah, AR_Q_TXD, AR_Q_TXD_M);
157 ath_dbg(common, ATH_DBG_QUEUE,
158 "Stopping TX DMA, invalid queue: %u\n", q);
159 return false;
160 }
161 151
162 qi = &ah->txq[q]; 152 REG_SET_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
163 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 153 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
164 ath_dbg(common, ATH_DBG_QUEUE, 154 REG_SET_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
165 "Stopping TX DMA, inactive queue: %u\n", q);
166 return false;
167 }
168 155
169 REG_WRITE(ah, AR_Q_TXD, 1 << q); 156 for (q = 0; q < AR_NUM_QCU; q++) {
157 for (i = 0; i < 1000; i++) {
158 if (i)
159 udelay(5);
170 160
171 for (wait = wait_time; wait != 0; wait--) { 161 if (!ath9k_hw_numtxpending(ah, q))
172 if (ath9k_hw_numtxpending(ah, q) == 0) 162 break;
173 break; 163 }
174 udelay(ATH9K_TIME_QUANTUM);
175 } 164 }
176 165
177 if (ath9k_hw_numtxpending(ah, q)) { 166 REG_CLR_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
178 ath_dbg(common, ATH_DBG_QUEUE, 167 REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
179 "%s: Num of pending TX Frames %d on Q %d\n", 168 REG_CLR_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
180 __func__, ath9k_hw_numtxpending(ah, q), q);
181
182 for (j = 0; j < 2; j++) {
183 tsfLow = REG_READ(ah, AR_TSF_L32);
184 REG_WRITE(ah, AR_QUIET2,
185 SM(10, AR_QUIET2_QUIET_DUR));
186 REG_WRITE(ah, AR_QUIET_PERIOD, 100);
187 REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10);
188 REG_SET_BIT(ah, AR_TIMER_MODE,
189 AR_QUIET_TIMER_EN);
190
191 if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10))
192 break;
193 169
194 ath_dbg(common, ATH_DBG_QUEUE, 170 REG_WRITE(ah, AR_Q_TXD, 0);
195 "TSF has moved while trying to set quiet time TSF: 0x%08x\n", 171}
196 tsfLow); 172EXPORT_SYMBOL(ath9k_hw_abort_tx_dma);
197 }
198 173
199 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH); 174bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q)
175{
176#define ATH9K_TX_STOP_DMA_TIMEOUT 1000 /* usec */
177#define ATH9K_TIME_QUANTUM 100 /* usec */
178 int wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
179 int wait;
200 180
201 udelay(200); 181 REG_WRITE(ah, AR_Q_TXD, 1 << q);
202 REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);
203 182
204 wait = wait_time; 183 for (wait = wait_time; wait != 0; wait--) {
205 while (ath9k_hw_numtxpending(ah, q)) { 184 if (wait != wait_time)
206 if ((--wait) == 0) {
207 ath_err(common,
208 "Failed to stop TX DMA in 100 msec after killing last frame\n");
209 break;
210 }
211 udelay(ATH9K_TIME_QUANTUM); 185 udelay(ATH9K_TIME_QUANTUM);
212 }
213 186
214 REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH); 187 if (ath9k_hw_numtxpending(ah, q) == 0)
188 break;
215 } 189 }
216 190
217 REG_WRITE(ah, AR_Q_TXD, 0); 191 REG_WRITE(ah, AR_Q_TXD, 0);
192
218 return wait != 0; 193 return wait != 0;
219 194
220#undef ATH9K_TX_STOP_DMA_TIMEOUT 195#undef ATH9K_TX_STOP_DMA_TIMEOUT
221#undef ATH9K_TIME_QUANTUM 196#undef ATH9K_TIME_QUANTUM
222} 197}
223EXPORT_SYMBOL(ath9k_hw_stoptxdma); 198EXPORT_SYMBOL(ath9k_hw_stop_dma_queue);
224 199
225void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs) 200void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
226{ 201{
@@ -690,17 +665,23 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
690 rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY; 665 rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
691 666
692 if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) { 667 if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
668 /*
669 * Treat these errors as mutually exclusive to avoid spurious
670 * extra error reports from the hardware. If a CRC error is
671 * reported, then decryption and MIC errors are irrelevant,
672 * the frame is going to be dropped either way
673 */
693 if (ads.ds_rxstatus8 & AR_CRCErr) 674 if (ads.ds_rxstatus8 & AR_CRCErr)
694 rs->rs_status |= ATH9K_RXERR_CRC; 675 rs->rs_status |= ATH9K_RXERR_CRC;
695 if (ads.ds_rxstatus8 & AR_PHYErr) { 676 else if (ads.ds_rxstatus8 & AR_PHYErr) {
696 rs->rs_status |= ATH9K_RXERR_PHY; 677 rs->rs_status |= ATH9K_RXERR_PHY;
697 phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode); 678 phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
698 rs->rs_phyerr = phyerr; 679 rs->rs_phyerr = phyerr;
699 } 680 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
700 if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
701 rs->rs_status |= ATH9K_RXERR_DECRYPT; 681 rs->rs_status |= ATH9K_RXERR_DECRYPT;
702 if (ads.ds_rxstatus8 & AR_MichaelErr) 682 else if (ads.ds_rxstatus8 & AR_MichaelErr)
703 rs->rs_status |= ATH9K_RXERR_MIC; 683 rs->rs_status |= ATH9K_RXERR_MIC;
684
704 if (ads.ds_rxstatus8 & AR_KeyMiss) 685 if (ads.ds_rxstatus8 & AR_KeyMiss)
705 rs->rs_status |= ATH9K_RXERR_DECRYPT; 686 rs->rs_status |= ATH9K_RXERR_DECRYPT;
706 } 687 }
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 7512f97e8f49..b2b2ff852c32 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -639,6 +639,8 @@ enum ath9k_rx_filter {
639 ATH9K_RX_FILTER_PHYERR = 0x00000100, 639 ATH9K_RX_FILTER_PHYERR = 0x00000100,
640 ATH9K_RX_FILTER_MYBEACON = 0x00000200, 640 ATH9K_RX_FILTER_MYBEACON = 0x00000200,
641 ATH9K_RX_FILTER_COMP_BAR = 0x00000400, 641 ATH9K_RX_FILTER_COMP_BAR = 0x00000400,
642 ATH9K_RX_FILTER_COMP_BA = 0x00000800,
643 ATH9K_RX_FILTER_UNCOMP_BA_BAR = 0x00001000,
642 ATH9K_RX_FILTER_PSPOLL = 0x00004000, 644 ATH9K_RX_FILTER_PSPOLL = 0x00004000,
643 ATH9K_RX_FILTER_PHYRADAR = 0x00002000, 645 ATH9K_RX_FILTER_PHYRADAR = 0x00002000,
644 ATH9K_RX_FILTER_MCAST_BCAST_ALL = 0x00008000, 646 ATH9K_RX_FILTER_MCAST_BCAST_ALL = 0x00008000,
@@ -674,7 +676,8 @@ void ath9k_hw_txstart(struct ath_hw *ah, u32 q);
674void ath9k_hw_cleartxdesc(struct ath_hw *ah, void *ds); 676void ath9k_hw_cleartxdesc(struct ath_hw *ah, void *ds);
675u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q); 677u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q);
676bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel); 678bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel);
677bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q); 679bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q);
680void ath9k_hw_abort_tx_dma(struct ath_hw *ah);
678void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs); 681void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs);
679bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q, 682bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
680 const struct ath9k_tx_queue_info *qinfo); 683 const struct ath9k_tx_queue_info *qinfo);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index a09d15f7aa6e..115f162c617a 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -15,20 +15,10 @@
15 */ 15 */
16 16
17#include <linux/nl80211.h> 17#include <linux/nl80211.h>
18#include <linux/delay.h>
18#include "ath9k.h" 19#include "ath9k.h"
19#include "btcoex.h" 20#include "btcoex.h"
20 21
21static void ath_update_txpow(struct ath_softc *sc)
22{
23 struct ath_hw *ah = sc->sc_ah;
24
25 if (sc->curtxpow != sc->config.txpowlimit) {
26 ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
27 /* read back in case value is clamped */
28 sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
29 }
30}
31
32static u8 parse_mpdudensity(u8 mpdudensity) 22static u8 parse_mpdudensity(u8 mpdudensity)
33{ 23{
34 /* 24 /*
@@ -64,17 +54,19 @@ static u8 parse_mpdudensity(u8 mpdudensity)
64 } 54 }
65} 55}
66 56
67static struct ath9k_channel *ath_get_curchannel(struct ath_softc *sc, 57static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq)
68 struct ieee80211_hw *hw)
69{ 58{
70 struct ieee80211_channel *curchan = hw->conf.channel; 59 bool pending = false;
71 struct ath9k_channel *channel; 60
72 u8 chan_idx; 61 spin_lock_bh(&txq->axq_lock);
73 62
74 chan_idx = curchan->hw_value; 63 if (txq->axq_depth || !list_empty(&txq->axq_acq))
75 channel = &sc->sc_ah->channels[chan_idx]; 64 pending = true;
76 ath9k_update_ichannel(sc, hw, channel); 65 else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
77 return channel; 66 pending = !list_empty(&txq->txq_fifo_pending);
67
68 spin_unlock_bh(&txq->axq_lock);
69 return pending;
78} 70}
79 71
80bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode) 72bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
@@ -177,7 +169,12 @@ static void ath_update_survey_nf(struct ath_softc *sc, int channel)
177 } 169 }
178} 170}
179 171
180static void ath_update_survey_stats(struct ath_softc *sc) 172/*
173 * Updates the survey statistics and returns the busy time since last
174 * update in %, if the measurement duration was long enough for the
175 * result to be useful, -1 otherwise.
176 */
177static int ath_update_survey_stats(struct ath_softc *sc)
181{ 178{
182 struct ath_hw *ah = sc->sc_ah; 179 struct ath_hw *ah = sc->sc_ah;
183 struct ath_common *common = ath9k_hw_common(ah); 180 struct ath_common *common = ath9k_hw_common(ah);
@@ -185,9 +182,10 @@ static void ath_update_survey_stats(struct ath_softc *sc)
185 struct survey_info *survey = &sc->survey[pos]; 182 struct survey_info *survey = &sc->survey[pos];
186 struct ath_cycle_counters *cc = &common->cc_survey; 183 struct ath_cycle_counters *cc = &common->cc_survey;
187 unsigned int div = common->clockrate * 1000; 184 unsigned int div = common->clockrate * 1000;
185 int ret = 0;
188 186
189 if (!ah->curchan) 187 if (!ah->curchan)
190 return; 188 return -1;
191 189
192 if (ah->power_mode == ATH9K_PM_AWAKE) 190 if (ah->power_mode == ATH9K_PM_AWAKE)
193 ath_hw_cycle_counters_update(common); 191 ath_hw_cycle_counters_update(common);
@@ -202,9 +200,18 @@ static void ath_update_survey_stats(struct ath_softc *sc)
202 survey->channel_time_rx += cc->rx_frame / div; 200 survey->channel_time_rx += cc->rx_frame / div;
203 survey->channel_time_tx += cc->tx_frame / div; 201 survey->channel_time_tx += cc->tx_frame / div;
204 } 202 }
203
204 if (cc->cycles < div)
205 return -1;
206
207 if (cc->cycles > 0)
208 ret = cc->rx_busy * 100 / cc->cycles;
209
205 memset(cc, 0, sizeof(*cc)); 210 memset(cc, 0, sizeof(*cc));
206 211
207 ath_update_survey_nf(sc, pos); 212 ath_update_survey_nf(sc, pos);
213
214 return ret;
208} 215}
209 216
210/* 217/*
@@ -215,7 +222,6 @@ static void ath_update_survey_stats(struct ath_softc *sc)
215int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, 222int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
216 struct ath9k_channel *hchan) 223 struct ath9k_channel *hchan)
217{ 224{
218 struct ath_wiphy *aphy = hw->priv;
219 struct ath_hw *ah = sc->sc_ah; 225 struct ath_hw *ah = sc->sc_ah;
220 struct ath_common *common = ath9k_hw_common(ah); 226 struct ath_common *common = ath9k_hw_common(ah);
221 struct ieee80211_conf *conf = &common->hw->conf; 227 struct ieee80211_conf *conf = &common->hw->conf;
@@ -227,10 +233,13 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
227 if (sc->sc_flags & SC_OP_INVALID) 233 if (sc->sc_flags & SC_OP_INVALID)
228 return -EIO; 234 return -EIO;
229 235
236 sc->hw_busy_count = 0;
237
230 del_timer_sync(&common->ani.timer); 238 del_timer_sync(&common->ani.timer);
231 cancel_work_sync(&sc->paprd_work); 239 cancel_work_sync(&sc->paprd_work);
232 cancel_work_sync(&sc->hw_check_work); 240 cancel_work_sync(&sc->hw_check_work);
233 cancel_delayed_work_sync(&sc->tx_complete_work); 241 cancel_delayed_work_sync(&sc->tx_complete_work);
242 cancel_delayed_work_sync(&sc->hw_pll_work);
234 243
235 ath9k_ps_wakeup(sc); 244 ath9k_ps_wakeup(sc);
236 245
@@ -251,6 +260,9 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
251 if (!ath_stoprecv(sc)) 260 if (!ath_stoprecv(sc))
252 stopped = false; 261 stopped = false;
253 262
263 if (!ath9k_hw_check_alive(ah))
264 stopped = false;
265
254 /* XXX: do not flush receive queue here. We don't want 266 /* XXX: do not flush receive queue here. We don't want
255 * to flush data frames already in queue because of 267 * to flush data frames already in queue because of
256 * changing channel. */ 268 * changing channel. */
@@ -259,7 +271,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
259 fastcc = false; 271 fastcc = false;
260 272
261 if (!(sc->sc_flags & SC_OP_OFFCHANNEL)) 273 if (!(sc->sc_flags & SC_OP_OFFCHANNEL))
262 caldata = &aphy->caldata; 274 caldata = &sc->caldata;
263 275
264 ath_dbg(common, ATH_DBG_CONFIG, 276 ath_dbg(common, ATH_DBG_CONFIG,
265 "(%u MHz) -> (%u MHz), conf_is_ht40: %d fastcc: %d\n", 277 "(%u MHz) -> (%u MHz), conf_is_ht40: %d fastcc: %d\n",
@@ -281,17 +293,21 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
281 goto ps_restore; 293 goto ps_restore;
282 } 294 }
283 295
284 ath_update_txpow(sc); 296 ath9k_cmn_update_txpow(ah, sc->curtxpow,
297 sc->config.txpowlimit, &sc->curtxpow);
285 ath9k_hw_set_interrupts(ah, ah->imask); 298 ath9k_hw_set_interrupts(ah, ah->imask);
286 299
287 if (!(sc->sc_flags & (SC_OP_OFFCHANNEL))) { 300 if (!(sc->sc_flags & (SC_OP_OFFCHANNEL))) {
288 if (sc->sc_flags & SC_OP_BEACONS) 301 if (sc->sc_flags & SC_OP_BEACONS)
289 ath_beacon_config(sc, NULL); 302 ath_beacon_config(sc, NULL);
290 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); 303 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
304 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/2);
291 ath_start_ani(common); 305 ath_start_ani(common);
292 } 306 }
293 307
294 ps_restore: 308 ps_restore:
309 ieee80211_wake_queues(hw);
310
295 spin_unlock_bh(&sc->sc_pcu_lock); 311 spin_unlock_bh(&sc->sc_pcu_lock);
296 312
297 ath9k_ps_restore(sc); 313 ath9k_ps_restore(sc);
@@ -549,6 +565,12 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
549 struct ath_hw *ah = sc->sc_ah; 565 struct ath_hw *ah = sc->sc_ah;
550 an = (struct ath_node *)sta->drv_priv; 566 an = (struct ath_node *)sta->drv_priv;
551 567
568#ifdef CONFIG_ATH9K_DEBUGFS
569 spin_lock(&sc->nodes_lock);
570 list_add(&an->list, &sc->nodes);
571 spin_unlock(&sc->nodes_lock);
572 an->sta = sta;
573#endif
552 if ((ah->caps.hw_caps) & ATH9K_HW_CAP_APM) 574 if ((ah->caps.hw_caps) & ATH9K_HW_CAP_APM)
553 sc->sc_flags |= SC_OP_ENABLE_APM; 575 sc->sc_flags |= SC_OP_ENABLE_APM;
554 576
@@ -564,6 +586,13 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
564{ 586{
565 struct ath_node *an = (struct ath_node *)sta->drv_priv; 587 struct ath_node *an = (struct ath_node *)sta->drv_priv;
566 588
589#ifdef CONFIG_ATH9K_DEBUGFS
590 spin_lock(&sc->nodes_lock);
591 list_del(&an->list);
592 spin_unlock(&sc->nodes_lock);
593 an->sta = NULL;
594#endif
595
567 if (sc->sc_flags & SC_OP_TXAGGR) 596 if (sc->sc_flags & SC_OP_TXAGGR)
568 ath_tx_node_cleanup(sc, an); 597 ath_tx_node_cleanup(sc, an);
569} 598}
@@ -571,17 +600,25 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
571void ath_hw_check(struct work_struct *work) 600void ath_hw_check(struct work_struct *work)
572{ 601{
573 struct ath_softc *sc = container_of(work, struct ath_softc, hw_check_work); 602 struct ath_softc *sc = container_of(work, struct ath_softc, hw_check_work);
574 int i; 603 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
604 unsigned long flags;
605 int busy;
575 606
576 ath9k_ps_wakeup(sc); 607 ath9k_ps_wakeup(sc);
608 if (ath9k_hw_check_alive(sc->sc_ah))
609 goto out;
577 610
578 for (i = 0; i < 3; i++) { 611 spin_lock_irqsave(&common->cc_lock, flags);
579 if (ath9k_hw_check_alive(sc->sc_ah)) 612 busy = ath_update_survey_stats(sc);
580 goto out; 613 spin_unlock_irqrestore(&common->cc_lock, flags);
581 614
582 msleep(1); 615 ath_dbg(common, ATH_DBG_RESET, "Possible baseband hang, "
583 } 616 "busy=%d (try %d)\n", busy, sc->hw_busy_count + 1);
584 ath_reset(sc, true); 617 if (busy >= 99) {
618 if (++sc->hw_busy_count >= 3)
619 ath_reset(sc, true);
620 } else if (busy >= 0)
621 sc->hw_busy_count = 0;
585 622
586out: 623out:
587 ath9k_ps_restore(sc); 624 ath9k_ps_restore(sc);
@@ -604,7 +641,15 @@ void ath9k_tasklet(unsigned long data)
604 ath9k_ps_wakeup(sc); 641 ath9k_ps_wakeup(sc);
605 spin_lock(&sc->sc_pcu_lock); 642 spin_lock(&sc->sc_pcu_lock);
606 643
607 if (!ath9k_hw_check_alive(ah)) 644 /*
645 * Only run the baseband hang check if beacons stop working in AP or
646 * IBSS mode, because it has a high false positive rate. For station
647 * mode it should not be necessary, since the upper layers will detect
648 * this through a beacon miss automatically and the following channel
649 * change will trigger a hardware reset anyway
650 */
651 if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0 &&
652 !ath9k_hw_check_alive(ah))
608 ieee80211_queue_work(sc->hw, &sc->hw_check_work); 653 ieee80211_queue_work(sc->hw, &sc->hw_check_work);
609 654
610 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 655 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
@@ -783,54 +828,11 @@ chip_reset:
783#undef SCHED_INTR 828#undef SCHED_INTR
784} 829}
785 830
786static u32 ath_get_extchanmode(struct ath_softc *sc,
787 struct ieee80211_channel *chan,
788 enum nl80211_channel_type channel_type)
789{
790 u32 chanmode = 0;
791
792 switch (chan->band) {
793 case IEEE80211_BAND_2GHZ:
794 switch(channel_type) {
795 case NL80211_CHAN_NO_HT:
796 case NL80211_CHAN_HT20:
797 chanmode = CHANNEL_G_HT20;
798 break;
799 case NL80211_CHAN_HT40PLUS:
800 chanmode = CHANNEL_G_HT40PLUS;
801 break;
802 case NL80211_CHAN_HT40MINUS:
803 chanmode = CHANNEL_G_HT40MINUS;
804 break;
805 }
806 break;
807 case IEEE80211_BAND_5GHZ:
808 switch(channel_type) {
809 case NL80211_CHAN_NO_HT:
810 case NL80211_CHAN_HT20:
811 chanmode = CHANNEL_A_HT20;
812 break;
813 case NL80211_CHAN_HT40PLUS:
814 chanmode = CHANNEL_A_HT40PLUS;
815 break;
816 case NL80211_CHAN_HT40MINUS:
817 chanmode = CHANNEL_A_HT40MINUS;
818 break;
819 }
820 break;
821 default:
822 break;
823 }
824
825 return chanmode;
826}
827
828static void ath9k_bss_assoc_info(struct ath_softc *sc, 831static void ath9k_bss_assoc_info(struct ath_softc *sc,
829 struct ieee80211_hw *hw, 832 struct ieee80211_hw *hw,
830 struct ieee80211_vif *vif, 833 struct ieee80211_vif *vif,
831 struct ieee80211_bss_conf *bss_conf) 834 struct ieee80211_bss_conf *bss_conf)
832{ 835{
833 struct ath_wiphy *aphy = hw->priv;
834 struct ath_hw *ah = sc->sc_ah; 836 struct ath_hw *ah = sc->sc_ah;
835 struct ath_common *common = ath9k_hw_common(ah); 837 struct ath_common *common = ath9k_hw_common(ah);
836 838
@@ -854,7 +856,7 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
854 ath_beacon_config(sc, vif); 856 ath_beacon_config(sc, vif);
855 857
856 /* Reset rssi stats */ 858 /* Reset rssi stats */
857 aphy->last_rssi = ATH_RSSI_DUMMY_MARKER; 859 sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
858 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER; 860 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
859 861
860 sc->sc_flags |= SC_OP_ANI_RUN; 862 sc->sc_flags |= SC_OP_ANI_RUN;
@@ -881,7 +883,7 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
881 ath9k_hw_configpcipowersave(ah, 0, 0); 883 ath9k_hw_configpcipowersave(ah, 0, 0);
882 884
883 if (!ah->curchan) 885 if (!ah->curchan)
884 ah->curchan = ath_get_curchannel(sc, sc->hw); 886 ah->curchan = ath9k_cmn_get_curchannel(sc->hw, ah);
885 887
886 r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); 888 r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
887 if (r) { 889 if (r) {
@@ -890,7 +892,8 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
890 channel->center_freq, r); 892 channel->center_freq, r);
891 } 893 }
892 894
893 ath_update_txpow(sc); 895 ath9k_cmn_update_txpow(ah, sc->curtxpow,
896 sc->config.txpowlimit, &sc->curtxpow);
894 if (ath_startrecv(sc) != 0) { 897 if (ath_startrecv(sc) != 0) {
895 ath_err(common, "Unable to restart recv logic\n"); 898 ath_err(common, "Unable to restart recv logic\n");
896 goto out; 899 goto out;
@@ -907,6 +910,8 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
907 ath9k_hw_set_gpio(ah, ah->led_pin, 0); 910 ath9k_hw_set_gpio(ah, ah->led_pin, 0);
908 911
909 ieee80211_wake_queues(hw); 912 ieee80211_wake_queues(hw);
913 ieee80211_queue_delayed_work(hw, &sc->hw_pll_work, HZ/2);
914
910out: 915out:
911 spin_unlock_bh(&sc->sc_pcu_lock); 916 spin_unlock_bh(&sc->sc_pcu_lock);
912 917
@@ -920,6 +925,8 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
920 int r; 925 int r;
921 926
922 ath9k_ps_wakeup(sc); 927 ath9k_ps_wakeup(sc);
928 cancel_delayed_work_sync(&sc->hw_pll_work);
929
923 spin_lock_bh(&sc->sc_pcu_lock); 930 spin_lock_bh(&sc->sc_pcu_lock);
924 931
925 ieee80211_stop_queues(hw); 932 ieee80211_stop_queues(hw);
@@ -942,7 +949,7 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
942 ath_flushrecv(sc); /* flush recv queue */ 949 ath_flushrecv(sc); /* flush recv queue */
943 950
944 if (!ah->curchan) 951 if (!ah->curchan)
945 ah->curchan = ath_get_curchannel(sc, hw); 952 ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
946 953
947 r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); 954 r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
948 if (r) { 955 if (r) {
@@ -966,6 +973,8 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
966 struct ieee80211_hw *hw = sc->hw; 973 struct ieee80211_hw *hw = sc->hw;
967 int r; 974 int r;
968 975
976 sc->hw_busy_count = 0;
977
969 /* Stop ANI */ 978 /* Stop ANI */
970 del_timer_sync(&common->ani.timer); 979 del_timer_sync(&common->ani.timer);
971 980
@@ -993,7 +1002,8 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
993 * that changes the channel so update any state that 1002 * that changes the channel so update any state that
994 * might change as a result. 1003 * might change as a result.
995 */ 1004 */
996 ath_update_txpow(sc); 1005 ath9k_cmn_update_txpow(ah, sc->curtxpow,
1006 sc->config.txpowlimit, &sc->curtxpow);
997 1007
998 if ((sc->sc_flags & SC_OP_BEACONS) || !(sc->sc_flags & (SC_OP_OFFCHANNEL))) 1008 if ((sc->sc_flags & SC_OP_BEACONS) || !(sc->sc_flags & (SC_OP_OFFCHANNEL)))
999 ath_beacon_config(sc, NULL); /* restart beacons */ 1009 ath_beacon_config(sc, NULL); /* restart beacons */
@@ -1021,38 +1031,13 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
1021 return r; 1031 return r;
1022} 1032}
1023 1033
1024/* XXX: Remove me once we don't depend on ath9k_channel for all
1025 * this redundant data */
1026void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
1027 struct ath9k_channel *ichan)
1028{
1029 struct ieee80211_channel *chan = hw->conf.channel;
1030 struct ieee80211_conf *conf = &hw->conf;
1031
1032 ichan->channel = chan->center_freq;
1033 ichan->chan = chan;
1034
1035 if (chan->band == IEEE80211_BAND_2GHZ) {
1036 ichan->chanmode = CHANNEL_G;
1037 ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM | CHANNEL_G;
1038 } else {
1039 ichan->chanmode = CHANNEL_A;
1040 ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
1041 }
1042
1043 if (conf_is_ht(conf))
1044 ichan->chanmode = ath_get_extchanmode(sc, chan,
1045 conf->channel_type);
1046}
1047
1048/**********************/ 1034/**********************/
1049/* mac80211 callbacks */ 1035/* mac80211 callbacks */
1050/**********************/ 1036/**********************/
1051 1037
1052static int ath9k_start(struct ieee80211_hw *hw) 1038static int ath9k_start(struct ieee80211_hw *hw)
1053{ 1039{
1054 struct ath_wiphy *aphy = hw->priv; 1040 struct ath_softc *sc = hw->priv;
1055 struct ath_softc *sc = aphy->sc;
1056 struct ath_hw *ah = sc->sc_ah; 1041 struct ath_hw *ah = sc->sc_ah;
1057 struct ath_common *common = ath9k_hw_common(ah); 1042 struct ath_common *common = ath9k_hw_common(ah);
1058 struct ieee80211_channel *curchan = hw->conf.channel; 1043 struct ieee80211_channel *curchan = hw->conf.channel;
@@ -1065,32 +1050,10 @@ static int ath9k_start(struct ieee80211_hw *hw)
1065 1050
1066 mutex_lock(&sc->mutex); 1051 mutex_lock(&sc->mutex);
1067 1052
1068 if (ath9k_wiphy_started(sc)) {
1069 if (sc->chan_idx == curchan->hw_value) {
1070 /*
1071 * Already on the operational channel, the new wiphy
1072 * can be marked active.
1073 */
1074 aphy->state = ATH_WIPHY_ACTIVE;
1075 ieee80211_wake_queues(hw);
1076 } else {
1077 /*
1078 * Another wiphy is on another channel, start the new
1079 * wiphy in paused state.
1080 */
1081 aphy->state = ATH_WIPHY_PAUSED;
1082 ieee80211_stop_queues(hw);
1083 }
1084 mutex_unlock(&sc->mutex);
1085 return 0;
1086 }
1087 aphy->state = ATH_WIPHY_ACTIVE;
1088
1089 /* setup initial channel */ 1053 /* setup initial channel */
1090
1091 sc->chan_idx = curchan->hw_value; 1054 sc->chan_idx = curchan->hw_value;
1092 1055
1093 init_channel = ath_get_curchannel(sc, hw); 1056 init_channel = ath9k_cmn_get_curchannel(hw, ah);
1094 1057
1095 /* Reset SERDES registers */ 1058 /* Reset SERDES registers */
1096 ath9k_hw_configpcipowersave(ah, 0, 0); 1059 ath9k_hw_configpcipowersave(ah, 0, 0);
@@ -1116,7 +1079,8 @@ static int ath9k_start(struct ieee80211_hw *hw)
1116 * This is needed only to setup initial state 1079 * This is needed only to setup initial state
1117 * but it's best done after a reset. 1080 * but it's best done after a reset.
1118 */ 1081 */
1119 ath_update_txpow(sc); 1082 ath9k_cmn_update_txpow(ah, sc->curtxpow,
1083 sc->config.txpowlimit, &sc->curtxpow);
1120 1084
1121 /* 1085 /*
1122 * Setup the hardware after reset: 1086 * Setup the hardware after reset:
@@ -1182,22 +1146,13 @@ mutex_unlock:
1182 return r; 1146 return r;
1183} 1147}
1184 1148
1185static int ath9k_tx(struct ieee80211_hw *hw, 1149static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1186 struct sk_buff *skb)
1187{ 1150{
1188 struct ath_wiphy *aphy = hw->priv; 1151 struct ath_softc *sc = hw->priv;
1189 struct ath_softc *sc = aphy->sc;
1190 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1152 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1191 struct ath_tx_control txctl; 1153 struct ath_tx_control txctl;
1192 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1154 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1193 1155
1194 if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) {
1195 ath_dbg(common, ATH_DBG_XMIT,
1196 "ath9k: %s: TX in unexpected wiphy state %d\n",
1197 wiphy_name(hw->wiphy), aphy->state);
1198 goto exit;
1199 }
1200
1201 if (sc->ps_enabled) { 1156 if (sc->ps_enabled) {
1202 /* 1157 /*
1203 * mac80211 does not set PM field for normal data frames, so we 1158 * mac80211 does not set PM field for normal data frames, so we
@@ -1248,52 +1203,30 @@ static int ath9k_tx(struct ieee80211_hw *hw,
1248 goto exit; 1203 goto exit;
1249 } 1204 }
1250 1205
1251 return 0; 1206 return;
1252exit: 1207exit:
1253 dev_kfree_skb_any(skb); 1208 dev_kfree_skb_any(skb);
1254 return 0;
1255} 1209}
1256 1210
1257static void ath9k_stop(struct ieee80211_hw *hw) 1211static void ath9k_stop(struct ieee80211_hw *hw)
1258{ 1212{
1259 struct ath_wiphy *aphy = hw->priv; 1213 struct ath_softc *sc = hw->priv;
1260 struct ath_softc *sc = aphy->sc;
1261 struct ath_hw *ah = sc->sc_ah; 1214 struct ath_hw *ah = sc->sc_ah;
1262 struct ath_common *common = ath9k_hw_common(ah); 1215 struct ath_common *common = ath9k_hw_common(ah);
1263 int i;
1264 1216
1265 mutex_lock(&sc->mutex); 1217 mutex_lock(&sc->mutex);
1266 1218
1267 aphy->state = ATH_WIPHY_INACTIVE;
1268
1269 if (led_blink)
1270 cancel_delayed_work_sync(&sc->ath_led_blink_work);
1271
1272 cancel_delayed_work_sync(&sc->tx_complete_work); 1219 cancel_delayed_work_sync(&sc->tx_complete_work);
1220 cancel_delayed_work_sync(&sc->hw_pll_work);
1273 cancel_work_sync(&sc->paprd_work); 1221 cancel_work_sync(&sc->paprd_work);
1274 cancel_work_sync(&sc->hw_check_work); 1222 cancel_work_sync(&sc->hw_check_work);
1275 1223
1276 for (i = 0; i < sc->num_sec_wiphy; i++) {
1277 if (sc->sec_wiphy[i])
1278 break;
1279 }
1280
1281 if (i == sc->num_sec_wiphy) {
1282 cancel_delayed_work_sync(&sc->wiphy_work);
1283 cancel_work_sync(&sc->chan_work);
1284 }
1285
1286 if (sc->sc_flags & SC_OP_INVALID) { 1224 if (sc->sc_flags & SC_OP_INVALID) {
1287 ath_dbg(common, ATH_DBG_ANY, "Device not present\n"); 1225 ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
1288 mutex_unlock(&sc->mutex); 1226 mutex_unlock(&sc->mutex);
1289 return; 1227 return;
1290 } 1228 }
1291 1229
1292 if (ath9k_wiphy_started(sc)) {
1293 mutex_unlock(&sc->mutex);
1294 return; /* another wiphy still in use */
1295 }
1296
1297 /* Ensure HW is awake when we try to shut it down. */ 1230 /* Ensure HW is awake when we try to shut it down. */
1298 ath9k_ps_wakeup(sc); 1231 ath9k_ps_wakeup(sc);
1299 1232
@@ -1319,6 +1252,11 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1319 } else 1252 } else
1320 sc->rx.rxlink = NULL; 1253 sc->rx.rxlink = NULL;
1321 1254
1255 if (sc->rx.frag) {
1256 dev_kfree_skb_any(sc->rx.frag);
1257 sc->rx.frag = NULL;
1258 }
1259
1322 /* disable HAL and put h/w to sleep */ 1260 /* disable HAL and put h/w to sleep */
1323 ath9k_hw_disable(ah); 1261 ath9k_hw_disable(ah);
1324 ath9k_hw_configpcipowersave(ah, 1, 1); 1262 ath9k_hw_configpcipowersave(ah, 1, 1);
@@ -1334,7 +1272,6 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1334 ath9k_ps_restore(sc); 1272 ath9k_ps_restore(sc);
1335 1273
1336 sc->ps_idle = true; 1274 sc->ps_idle = true;
1337 ath9k_set_wiphy_idle(aphy, true);
1338 ath_radio_disable(sc, hw); 1275 ath_radio_disable(sc, hw);
1339 1276
1340 sc->sc_flags |= SC_OP_INVALID; 1277 sc->sc_flags |= SC_OP_INVALID;
@@ -1344,112 +1281,225 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1344 ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n"); 1281 ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n");
1345} 1282}
1346 1283
1347static int ath9k_add_interface(struct ieee80211_hw *hw, 1284bool ath9k_uses_beacons(int type)
1348 struct ieee80211_vif *vif) 1285{
1286 switch (type) {
1287 case NL80211_IFTYPE_AP:
1288 case NL80211_IFTYPE_ADHOC:
1289 case NL80211_IFTYPE_MESH_POINT:
1290 return true;
1291 default:
1292 return false;
1293 }
1294}
1295
1296static void ath9k_reclaim_beacon(struct ath_softc *sc,
1297 struct ieee80211_vif *vif)
1349{ 1298{
1350 struct ath_wiphy *aphy = hw->priv;
1351 struct ath_softc *sc = aphy->sc;
1352 struct ath_hw *ah = sc->sc_ah;
1353 struct ath_common *common = ath9k_hw_common(ah);
1354 struct ath_vif *avp = (void *)vif->drv_priv; 1299 struct ath_vif *avp = (void *)vif->drv_priv;
1355 enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
1356 int ret = 0;
1357 1300
1358 mutex_lock(&sc->mutex); 1301 ath9k_set_beaconing_status(sc, false);
1302 ath_beacon_return(sc, avp);
1303 ath9k_set_beaconing_status(sc, true);
1304 sc->sc_flags &= ~SC_OP_BEACONS;
1305}
1306
1307static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
1308{
1309 struct ath9k_vif_iter_data *iter_data = data;
1310 int i;
1311
1312 if (iter_data->hw_macaddr)
1313 for (i = 0; i < ETH_ALEN; i++)
1314 iter_data->mask[i] &=
1315 ~(iter_data->hw_macaddr[i] ^ mac[i]);
1359 1316
1360 switch (vif->type) { 1317 switch (vif->type) {
1361 case NL80211_IFTYPE_STATION: 1318 case NL80211_IFTYPE_AP:
1362 ic_opmode = NL80211_IFTYPE_STATION; 1319 iter_data->naps++;
1363 break; 1320 break;
1364 case NL80211_IFTYPE_WDS: 1321 case NL80211_IFTYPE_STATION:
1365 ic_opmode = NL80211_IFTYPE_WDS; 1322 iter_data->nstations++;
1366 break; 1323 break;
1367 case NL80211_IFTYPE_ADHOC: 1324 case NL80211_IFTYPE_ADHOC:
1368 case NL80211_IFTYPE_AP: 1325 iter_data->nadhocs++;
1326 break;
1369 case NL80211_IFTYPE_MESH_POINT: 1327 case NL80211_IFTYPE_MESH_POINT:
1370 if (sc->nbcnvifs >= ATH_BCBUF) { 1328 iter_data->nmeshes++;
1371 ret = -ENOBUFS; 1329 break;
1372 goto out; 1330 case NL80211_IFTYPE_WDS:
1373 } 1331 iter_data->nwds++;
1374 ic_opmode = vif->type;
1375 break; 1332 break;
1376 default: 1333 default:
1377 ath_err(common, "Interface type %d not yet supported\n", 1334 iter_data->nothers++;
1378 vif->type); 1335 break;
1379 ret = -EOPNOTSUPP;
1380 goto out;
1381 } 1336 }
1337}
1382 1338
1383 ath_dbg(common, ATH_DBG_CONFIG, 1339/* Called with sc->mutex held. */
1384 "Attach a VIF of type: %d\n", ic_opmode); 1340void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
1341 struct ieee80211_vif *vif,
1342 struct ath9k_vif_iter_data *iter_data)
1343{
1344 struct ath_softc *sc = hw->priv;
1345 struct ath_hw *ah = sc->sc_ah;
1346 struct ath_common *common = ath9k_hw_common(ah);
1385 1347
1386 /* Set the VIF opmode */ 1348 /*
1387 avp->av_opmode = ic_opmode; 1349 * Use the hardware MAC address as reference, the hardware uses it
1388 avp->av_bslot = -1; 1350 * together with the BSSID mask when matching addresses.
1351 */
1352 memset(iter_data, 0, sizeof(*iter_data));
1353 iter_data->hw_macaddr = common->macaddr;
1354 memset(&iter_data->mask, 0xff, ETH_ALEN);
1389 1355
1390 sc->nvifs++; 1356 if (vif)
1357 ath9k_vif_iter(iter_data, vif->addr, vif);
1358
1359 /* Get list of all active MAC addresses */
1360 ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter,
1361 iter_data);
1362}
1391 1363
1392 ath9k_set_bssid_mask(hw, vif); 1364/* Called with sc->mutex held. */
1365static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
1366 struct ieee80211_vif *vif)
1367{
1368 struct ath_softc *sc = hw->priv;
1369 struct ath_hw *ah = sc->sc_ah;
1370 struct ath_common *common = ath9k_hw_common(ah);
1371 struct ath9k_vif_iter_data iter_data;
1393 1372
1394 if (sc->nvifs > 1) 1373 ath9k_calculate_iter_data(hw, vif, &iter_data);
1395 goto out; /* skip global settings for secondary vif */ 1374
1375 ath9k_ps_wakeup(sc);
1376 /* Set BSSID mask. */
1377 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
1378 ath_hw_setbssidmask(common);
1396 1379
1397 if (ic_opmode == NL80211_IFTYPE_AP) { 1380 /* Set op-mode & TSF */
1381 if (iter_data.naps > 0) {
1398 ath9k_hw_set_tsfadjust(ah, 1); 1382 ath9k_hw_set_tsfadjust(ah, 1);
1399 sc->sc_flags |= SC_OP_TSF_RESET; 1383 sc->sc_flags |= SC_OP_TSF_RESET;
1400 } 1384 ah->opmode = NL80211_IFTYPE_AP;
1385 } else {
1386 ath9k_hw_set_tsfadjust(ah, 0);
1387 sc->sc_flags &= ~SC_OP_TSF_RESET;
1401 1388
1402 /* Set the device opmode */ 1389 if (iter_data.nwds + iter_data.nmeshes)
1403 ah->opmode = ic_opmode; 1390 ah->opmode = NL80211_IFTYPE_AP;
1391 else if (iter_data.nadhocs)
1392 ah->opmode = NL80211_IFTYPE_ADHOC;
1393 else
1394 ah->opmode = NL80211_IFTYPE_STATION;
1395 }
1404 1396
1405 /* 1397 /*
1406 * Enable MIB interrupts when there are hardware phy counters. 1398 * Enable MIB interrupts when there are hardware phy counters.
1407 * Note we only do this (at the moment) for station mode.
1408 */ 1399 */
1409 if ((vif->type == NL80211_IFTYPE_STATION) || 1400 if ((iter_data.nstations + iter_data.nadhocs + iter_data.nmeshes) > 0) {
1410 (vif->type == NL80211_IFTYPE_ADHOC) ||
1411 (vif->type == NL80211_IFTYPE_MESH_POINT)) {
1412 if (ah->config.enable_ani) 1401 if (ah->config.enable_ani)
1413 ah->imask |= ATH9K_INT_MIB; 1402 ah->imask |= ATH9K_INT_MIB;
1414 ah->imask |= ATH9K_INT_TSFOOR; 1403 ah->imask |= ATH9K_INT_TSFOOR;
1404 } else {
1405 ah->imask &= ~ATH9K_INT_MIB;
1406 ah->imask &= ~ATH9K_INT_TSFOOR;
1415 } 1407 }
1416 1408
1417 ath9k_hw_set_interrupts(ah, ah->imask); 1409 ath9k_hw_set_interrupts(ah, ah->imask);
1410 ath9k_ps_restore(sc);
1418 1411
1419 if (vif->type == NL80211_IFTYPE_AP || 1412 /* Set up ANI */
1420 vif->type == NL80211_IFTYPE_ADHOC) { 1413 if ((iter_data.naps + iter_data.nadhocs) > 0) {
1421 sc->sc_flags |= SC_OP_ANI_RUN; 1414 sc->sc_flags |= SC_OP_ANI_RUN;
1422 ath_start_ani(common); 1415 ath_start_ani(common);
1416 } else {
1417 sc->sc_flags &= ~SC_OP_ANI_RUN;
1418 del_timer_sync(&common->ani.timer);
1423 } 1419 }
1420}
1424 1421
1425out: 1422/* Called with sc->mutex held, vif counts set up properly. */
1426 mutex_unlock(&sc->mutex); 1423static void ath9k_do_vif_add_setup(struct ieee80211_hw *hw,
1427 return ret; 1424 struct ieee80211_vif *vif)
1425{
1426 struct ath_softc *sc = hw->priv;
1427
1428 ath9k_calculate_summary_state(hw, vif);
1429
1430 if (ath9k_uses_beacons(vif->type)) {
1431 int error;
1432 /* This may fail because upper levels do not have beacons
1433 * properly configured yet. That's OK, we assume it
1434 * will be properly configured and then we will be notified
1435 * in the info_changed method and set up beacons properly
1436 * there.
1437 */
1438 ath9k_set_beaconing_status(sc, false);
1439 error = ath_beacon_alloc(sc, vif);
1440 if (!error)
1441 ath_beacon_config(sc, vif);
1442 ath9k_set_beaconing_status(sc, true);
1443 }
1428} 1444}
1429 1445
1430static void ath9k_reclaim_beacon(struct ath_softc *sc, 1446
1431 struct ieee80211_vif *vif) 1447static int ath9k_add_interface(struct ieee80211_hw *hw,
1448 struct ieee80211_vif *vif)
1432{ 1449{
1450 struct ath_softc *sc = hw->priv;
1451 struct ath_hw *ah = sc->sc_ah;
1452 struct ath_common *common = ath9k_hw_common(ah);
1433 struct ath_vif *avp = (void *)vif->drv_priv; 1453 struct ath_vif *avp = (void *)vif->drv_priv;
1454 int ret = 0;
1434 1455
1435 /* Disable SWBA interrupt */ 1456 mutex_lock(&sc->mutex);
1436 sc->sc_ah->imask &= ~ATH9K_INT_SWBA;
1437 ath9k_ps_wakeup(sc);
1438 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask);
1439 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1440 tasklet_kill(&sc->bcon_tasklet);
1441 ath9k_ps_restore(sc);
1442 1457
1443 ath_beacon_return(sc, avp); 1458 switch (vif->type) {
1444 sc->sc_flags &= ~SC_OP_BEACONS; 1459 case NL80211_IFTYPE_STATION:
1460 case NL80211_IFTYPE_WDS:
1461 case NL80211_IFTYPE_ADHOC:
1462 case NL80211_IFTYPE_AP:
1463 case NL80211_IFTYPE_MESH_POINT:
1464 break;
1465 default:
1466 ath_err(common, "Interface type %d not yet supported\n",
1467 vif->type);
1468 ret = -EOPNOTSUPP;
1469 goto out;
1470 }
1445 1471
1446 if (sc->nbcnvifs > 0) { 1472 if (ath9k_uses_beacons(vif->type)) {
1447 /* Re-enable beaconing */ 1473 if (sc->nbcnvifs >= ATH_BCBUF) {
1448 sc->sc_ah->imask |= ATH9K_INT_SWBA; 1474 ath_err(common, "Not enough beacon buffers when adding"
1449 ath9k_ps_wakeup(sc); 1475 " new interface of type: %i\n",
1450 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask); 1476 vif->type);
1451 ath9k_ps_restore(sc); 1477 ret = -ENOBUFS;
1478 goto out;
1479 }
1452 } 1480 }
1481
1482 if ((vif->type == NL80211_IFTYPE_ADHOC) &&
1483 sc->nvifs > 0) {
1484 ath_err(common, "Cannot create ADHOC interface when other"
1485 " interfaces already exist.\n");
1486 ret = -EINVAL;
1487 goto out;
1488 }
1489
1490 ath_dbg(common, ATH_DBG_CONFIG,
1491 "Attach a VIF of type: %d\n", vif->type);
1492
1493 /* Set the VIF opmode */
1494 avp->av_opmode = vif->type;
1495 avp->av_bslot = -1;
1496
1497 sc->nvifs++;
1498
1499 ath9k_do_vif_add_setup(hw, vif);
1500out:
1501 mutex_unlock(&sc->mutex);
1502 return ret;
1453} 1503}
1454 1504
1455static int ath9k_change_interface(struct ieee80211_hw *hw, 1505static int ath9k_change_interface(struct ieee80211_hw *hw,
@@ -1457,40 +1507,40 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
1457 enum nl80211_iftype new_type, 1507 enum nl80211_iftype new_type,
1458 bool p2p) 1508 bool p2p)
1459{ 1509{
1460 struct ath_wiphy *aphy = hw->priv; 1510 struct ath_softc *sc = hw->priv;
1461 struct ath_softc *sc = aphy->sc;
1462 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1511 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1463 int ret = 0; 1512 int ret = 0;
1464 1513
1465 ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n"); 1514 ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n");
1466 mutex_lock(&sc->mutex); 1515 mutex_lock(&sc->mutex);
1467 1516
1468 switch (new_type) { 1517 /* See if new interface type is valid. */
1469 case NL80211_IFTYPE_AP: 1518 if ((new_type == NL80211_IFTYPE_ADHOC) &&
1470 case NL80211_IFTYPE_ADHOC: 1519 (sc->nvifs > 1)) {
1520 ath_err(common, "When using ADHOC, it must be the only"
1521 " interface.\n");
1522 ret = -EINVAL;
1523 goto out;
1524 }
1525
1526 if (ath9k_uses_beacons(new_type) &&
1527 !ath9k_uses_beacons(vif->type)) {
1471 if (sc->nbcnvifs >= ATH_BCBUF) { 1528 if (sc->nbcnvifs >= ATH_BCBUF) {
1472 ath_err(common, "No beacon slot available\n"); 1529 ath_err(common, "No beacon slot available\n");
1473 ret = -ENOBUFS; 1530 ret = -ENOBUFS;
1474 goto out; 1531 goto out;
1475 } 1532 }
1476 break;
1477 case NL80211_IFTYPE_STATION:
1478 /* Stop ANI */
1479 sc->sc_flags &= ~SC_OP_ANI_RUN;
1480 del_timer_sync(&common->ani.timer);
1481 if ((vif->type == NL80211_IFTYPE_AP) ||
1482 (vif->type == NL80211_IFTYPE_ADHOC))
1483 ath9k_reclaim_beacon(sc, vif);
1484 break;
1485 default:
1486 ath_err(common, "Interface type %d not yet supported\n",
1487 vif->type);
1488 ret = -ENOTSUPP;
1489 goto out;
1490 } 1533 }
1534
1535 /* Clean up old vif stuff */
1536 if (ath9k_uses_beacons(vif->type))
1537 ath9k_reclaim_beacon(sc, vif);
1538
1539 /* Add new settings */
1491 vif->type = new_type; 1540 vif->type = new_type;
1492 vif->p2p = p2p; 1541 vif->p2p = p2p;
1493 1542
1543 ath9k_do_vif_add_setup(hw, vif);
1494out: 1544out:
1495 mutex_unlock(&sc->mutex); 1545 mutex_unlock(&sc->mutex);
1496 return ret; 1546 return ret;
@@ -1499,25 +1549,20 @@ out:
1499static void ath9k_remove_interface(struct ieee80211_hw *hw, 1549static void ath9k_remove_interface(struct ieee80211_hw *hw,
1500 struct ieee80211_vif *vif) 1550 struct ieee80211_vif *vif)
1501{ 1551{
1502 struct ath_wiphy *aphy = hw->priv; 1552 struct ath_softc *sc = hw->priv;
1503 struct ath_softc *sc = aphy->sc;
1504 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1553 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1505 1554
1506 ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n"); 1555 ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n");
1507 1556
1508 mutex_lock(&sc->mutex); 1557 mutex_lock(&sc->mutex);
1509 1558
1510 /* Stop ANI */ 1559 sc->nvifs--;
1511 sc->sc_flags &= ~SC_OP_ANI_RUN;
1512 del_timer_sync(&common->ani.timer);
1513 1560
1514 /* Reclaim beacon resources */ 1561 /* Reclaim beacon resources */
1515 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || 1562 if (ath9k_uses_beacons(vif->type))
1516 (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
1517 (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT))
1518 ath9k_reclaim_beacon(sc, vif); 1563 ath9k_reclaim_beacon(sc, vif);
1519 1564
1520 sc->nvifs--; 1565 ath9k_calculate_summary_state(hw, NULL);
1521 1566
1522 mutex_unlock(&sc->mutex); 1567 mutex_unlock(&sc->mutex);
1523} 1568}
@@ -1558,12 +1603,11 @@ static void ath9k_disable_ps(struct ath_softc *sc)
1558 1603
1559static int ath9k_config(struct ieee80211_hw *hw, u32 changed) 1604static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1560{ 1605{
1561 struct ath_wiphy *aphy = hw->priv; 1606 struct ath_softc *sc = hw->priv;
1562 struct ath_softc *sc = aphy->sc;
1563 struct ath_hw *ah = sc->sc_ah; 1607 struct ath_hw *ah = sc->sc_ah;
1564 struct ath_common *common = ath9k_hw_common(ah); 1608 struct ath_common *common = ath9k_hw_common(ah);
1565 struct ieee80211_conf *conf = &hw->conf; 1609 struct ieee80211_conf *conf = &hw->conf;
1566 bool disable_radio; 1610 bool disable_radio = false;
1567 1611
1568 mutex_lock(&sc->mutex); 1612 mutex_lock(&sc->mutex);
1569 1613
@@ -1574,29 +1618,13 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1574 * the end. 1618 * the end.
1575 */ 1619 */
1576 if (changed & IEEE80211_CONF_CHANGE_IDLE) { 1620 if (changed & IEEE80211_CONF_CHANGE_IDLE) {
1577 bool enable_radio; 1621 sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
1578 bool all_wiphys_idle; 1622 if (!sc->ps_idle) {
1579 bool idle = !!(conf->flags & IEEE80211_CONF_IDLE);
1580
1581 spin_lock_bh(&sc->wiphy_lock);
1582 all_wiphys_idle = ath9k_all_wiphys_idle(sc);
1583 ath9k_set_wiphy_idle(aphy, idle);
1584
1585 enable_radio = (!idle && all_wiphys_idle);
1586
1587 /*
1588 * After we unlock here its possible another wiphy
1589 * can be re-renabled so to account for that we will
1590 * only disable the radio toward the end of this routine
1591 * if by then all wiphys are still idle.
1592 */
1593 spin_unlock_bh(&sc->wiphy_lock);
1594
1595 if (enable_radio) {
1596 sc->ps_idle = false;
1597 ath_radio_enable(sc, hw); 1623 ath_radio_enable(sc, hw);
1598 ath_dbg(common, ATH_DBG_CONFIG, 1624 ath_dbg(common, ATH_DBG_CONFIG,
1599 "not-idle: enabling radio\n"); 1625 "not-idle: enabling radio\n");
1626 } else {
1627 disable_radio = true;
1600 } 1628 }
1601 } 1629 }
1602 1630
@@ -1637,29 +1665,17 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1637 if (ah->curchan) 1665 if (ah->curchan)
1638 old_pos = ah->curchan - &ah->channels[0]; 1666 old_pos = ah->curchan - &ah->channels[0];
1639 1667
1640 aphy->chan_idx = pos;
1641 aphy->chan_is_ht = conf_is_ht(conf);
1642 if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) 1668 if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
1643 sc->sc_flags |= SC_OP_OFFCHANNEL; 1669 sc->sc_flags |= SC_OP_OFFCHANNEL;
1644 else 1670 else
1645 sc->sc_flags &= ~SC_OP_OFFCHANNEL; 1671 sc->sc_flags &= ~SC_OP_OFFCHANNEL;
1646 1672
1647 if (aphy->state == ATH_WIPHY_SCAN || 1673 ath_dbg(common, ATH_DBG_CONFIG,
1648 aphy->state == ATH_WIPHY_ACTIVE) 1674 "Set channel: %d MHz type: %d\n",
1649 ath9k_wiphy_pause_all_forced(sc, aphy); 1675 curchan->center_freq, conf->channel_type);
1650 else {
1651 /*
1652 * Do not change operational channel based on a paused
1653 * wiphy changes.
1654 */
1655 goto skip_chan_change;
1656 }
1657
1658 ath_dbg(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
1659 curchan->center_freq);
1660 1676
1661 /* XXX: remove me eventualy */ 1677 ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
1662 ath9k_update_ichannel(sc, hw, &sc->sc_ah->channels[pos]); 1678 curchan, conf->channel_type);
1663 1679
1664 /* update survey stats for the old channel before switching */ 1680 /* update survey stats for the old channel before switching */
1665 spin_lock_irqsave(&common->cc_lock, flags); 1681 spin_lock_irqsave(&common->cc_lock, flags);
@@ -1701,21 +1717,18 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1701 ath_update_survey_nf(sc, old_pos); 1717 ath_update_survey_nf(sc, old_pos);
1702 } 1718 }
1703 1719
1704skip_chan_change:
1705 if (changed & IEEE80211_CONF_CHANGE_POWER) { 1720 if (changed & IEEE80211_CONF_CHANGE_POWER) {
1721 ath_dbg(common, ATH_DBG_CONFIG,
1722 "Set power: %d\n", conf->power_level);
1706 sc->config.txpowlimit = 2 * conf->power_level; 1723 sc->config.txpowlimit = 2 * conf->power_level;
1707 ath9k_ps_wakeup(sc); 1724 ath9k_ps_wakeup(sc);
1708 ath_update_txpow(sc); 1725 ath9k_cmn_update_txpow(ah, sc->curtxpow,
1726 sc->config.txpowlimit, &sc->curtxpow);
1709 ath9k_ps_restore(sc); 1727 ath9k_ps_restore(sc);
1710 } 1728 }
1711 1729
1712 spin_lock_bh(&sc->wiphy_lock);
1713 disable_radio = ath9k_all_wiphys_idle(sc);
1714 spin_unlock_bh(&sc->wiphy_lock);
1715
1716 if (disable_radio) { 1730 if (disable_radio) {
1717 ath_dbg(common, ATH_DBG_CONFIG, "idle: disabling radio\n"); 1731 ath_dbg(common, ATH_DBG_CONFIG, "idle: disabling radio\n");
1718 sc->ps_idle = true;
1719 ath_radio_disable(sc, hw); 1732 ath_radio_disable(sc, hw);
1720 } 1733 }
1721 1734
@@ -1740,8 +1753,7 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw,
1740 unsigned int *total_flags, 1753 unsigned int *total_flags,
1741 u64 multicast) 1754 u64 multicast)
1742{ 1755{
1743 struct ath_wiphy *aphy = hw->priv; 1756 struct ath_softc *sc = hw->priv;
1744 struct ath_softc *sc = aphy->sc;
1745 u32 rfilt; 1757 u32 rfilt;
1746 1758
1747 changed_flags &= SUPPORTED_FILTERS; 1759 changed_flags &= SUPPORTED_FILTERS;
@@ -1761,8 +1773,7 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
1761 struct ieee80211_vif *vif, 1773 struct ieee80211_vif *vif,
1762 struct ieee80211_sta *sta) 1774 struct ieee80211_sta *sta)
1763{ 1775{
1764 struct ath_wiphy *aphy = hw->priv; 1776 struct ath_softc *sc = hw->priv;
1765 struct ath_softc *sc = aphy->sc;
1766 1777
1767 ath_node_attach(sc, sta); 1778 ath_node_attach(sc, sta);
1768 1779
@@ -1773,8 +1784,7 @@ static int ath9k_sta_remove(struct ieee80211_hw *hw,
1773 struct ieee80211_vif *vif, 1784 struct ieee80211_vif *vif,
1774 struct ieee80211_sta *sta) 1785 struct ieee80211_sta *sta)
1775{ 1786{
1776 struct ath_wiphy *aphy = hw->priv; 1787 struct ath_softc *sc = hw->priv;
1777 struct ath_softc *sc = aphy->sc;
1778 1788
1779 ath_node_detach(sc, sta); 1789 ath_node_detach(sc, sta);
1780 1790
@@ -1784,8 +1794,7 @@ static int ath9k_sta_remove(struct ieee80211_hw *hw,
1784static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue, 1794static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
1785 const struct ieee80211_tx_queue_params *params) 1795 const struct ieee80211_tx_queue_params *params)
1786{ 1796{
1787 struct ath_wiphy *aphy = hw->priv; 1797 struct ath_softc *sc = hw->priv;
1788 struct ath_softc *sc = aphy->sc;
1789 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1798 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1790 struct ath_txq *txq; 1799 struct ath_txq *txq;
1791 struct ath9k_tx_queue_info qi; 1800 struct ath9k_tx_queue_info qi;
@@ -1829,8 +1838,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
1829 struct ieee80211_sta *sta, 1838 struct ieee80211_sta *sta,
1830 struct ieee80211_key_conf *key) 1839 struct ieee80211_key_conf *key)
1831{ 1840{
1832 struct ath_wiphy *aphy = hw->priv; 1841 struct ath_softc *sc = hw->priv;
1833 struct ath_softc *sc = aphy->sc;
1834 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1842 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1835 int ret = 0; 1843 int ret = 0;
1836 1844
@@ -1874,8 +1882,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1874 struct ieee80211_bss_conf *bss_conf, 1882 struct ieee80211_bss_conf *bss_conf,
1875 u32 changed) 1883 u32 changed)
1876{ 1884{
1877 struct ath_wiphy *aphy = hw->priv; 1885 struct ath_softc *sc = hw->priv;
1878 struct ath_softc *sc = aphy->sc; 1886 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
1879 struct ath_hw *ah = sc->sc_ah; 1887 struct ath_hw *ah = sc->sc_ah;
1880 struct ath_common *common = ath9k_hw_common(ah); 1888 struct ath_common *common = ath9k_hw_common(ah);
1881 struct ath_vif *avp = (void *)vif->drv_priv; 1889 struct ath_vif *avp = (void *)vif->drv_priv;
@@ -1904,10 +1912,11 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1904 /* Enable transmission of beacons (AP, IBSS, MESH) */ 1912 /* Enable transmission of beacons (AP, IBSS, MESH) */
1905 if ((changed & BSS_CHANGED_BEACON) || 1913 if ((changed & BSS_CHANGED_BEACON) ||
1906 ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon)) { 1914 ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon)) {
1907 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); 1915 ath9k_set_beaconing_status(sc, false);
1908 error = ath_beacon_alloc(aphy, vif); 1916 error = ath_beacon_alloc(sc, vif);
1909 if (!error) 1917 if (!error)
1910 ath_beacon_config(sc, vif); 1918 ath_beacon_config(sc, vif);
1919 ath9k_set_beaconing_status(sc, true);
1911 } 1920 }
1912 1921
1913 if (changed & BSS_CHANGED_ERP_SLOT) { 1922 if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -1930,21 +1939,26 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1930 } 1939 }
1931 1940
1932 /* Disable transmission of beacons */ 1941 /* Disable transmission of beacons */
1933 if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon) 1942 if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
1934 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); 1943 !bss_conf->enable_beacon) {
1944 ath9k_set_beaconing_status(sc, false);
1945 avp->is_bslot_active = false;
1946 ath9k_set_beaconing_status(sc, true);
1947 }
1935 1948
1936 if (changed & BSS_CHANGED_BEACON_INT) { 1949 if (changed & BSS_CHANGED_BEACON_INT) {
1937 sc->beacon_interval = bss_conf->beacon_int; 1950 cur_conf->beacon_interval = bss_conf->beacon_int;
1938 /* 1951 /*
1939 * In case of AP mode, the HW TSF has to be reset 1952 * In case of AP mode, the HW TSF has to be reset
1940 * when the beacon interval changes. 1953 * when the beacon interval changes.
1941 */ 1954 */
1942 if (vif->type == NL80211_IFTYPE_AP) { 1955 if (vif->type == NL80211_IFTYPE_AP) {
1943 sc->sc_flags |= SC_OP_TSF_RESET; 1956 sc->sc_flags |= SC_OP_TSF_RESET;
1944 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); 1957 ath9k_set_beaconing_status(sc, false);
1945 error = ath_beacon_alloc(aphy, vif); 1958 error = ath_beacon_alloc(sc, vif);
1946 if (!error) 1959 if (!error)
1947 ath_beacon_config(sc, vif); 1960 ath_beacon_config(sc, vif);
1961 ath9k_set_beaconing_status(sc, true);
1948 } else { 1962 } else {
1949 ath_beacon_config(sc, vif); 1963 ath_beacon_config(sc, vif);
1950 } 1964 }
@@ -1980,9 +1994,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1980 1994
1981static u64 ath9k_get_tsf(struct ieee80211_hw *hw) 1995static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
1982{ 1996{
1997 struct ath_softc *sc = hw->priv;
1983 u64 tsf; 1998 u64 tsf;
1984 struct ath_wiphy *aphy = hw->priv;
1985 struct ath_softc *sc = aphy->sc;
1986 1999
1987 mutex_lock(&sc->mutex); 2000 mutex_lock(&sc->mutex);
1988 ath9k_ps_wakeup(sc); 2001 ath9k_ps_wakeup(sc);
@@ -1995,8 +2008,7 @@ static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
1995 2008
1996static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf) 2009static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
1997{ 2010{
1998 struct ath_wiphy *aphy = hw->priv; 2011 struct ath_softc *sc = hw->priv;
1999 struct ath_softc *sc = aphy->sc;
2000 2012
2001 mutex_lock(&sc->mutex); 2013 mutex_lock(&sc->mutex);
2002 ath9k_ps_wakeup(sc); 2014 ath9k_ps_wakeup(sc);
@@ -2007,8 +2019,7 @@ static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
2007 2019
2008static void ath9k_reset_tsf(struct ieee80211_hw *hw) 2020static void ath9k_reset_tsf(struct ieee80211_hw *hw)
2009{ 2021{
2010 struct ath_wiphy *aphy = hw->priv; 2022 struct ath_softc *sc = hw->priv;
2011 struct ath_softc *sc = aphy->sc;
2012 2023
2013 mutex_lock(&sc->mutex); 2024 mutex_lock(&sc->mutex);
2014 2025
@@ -2023,10 +2034,9 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
2023 struct ieee80211_vif *vif, 2034 struct ieee80211_vif *vif,
2024 enum ieee80211_ampdu_mlme_action action, 2035 enum ieee80211_ampdu_mlme_action action,
2025 struct ieee80211_sta *sta, 2036 struct ieee80211_sta *sta,
2026 u16 tid, u16 *ssn) 2037 u16 tid, u16 *ssn, u8 buf_size)
2027{ 2038{
2028 struct ath_wiphy *aphy = hw->priv; 2039 struct ath_softc *sc = hw->priv;
2029 struct ath_softc *sc = aphy->sc;
2030 int ret = 0; 2040 int ret = 0;
2031 2041
2032 local_bh_disable(); 2042 local_bh_disable();
@@ -2071,8 +2081,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
2071static int ath9k_get_survey(struct ieee80211_hw *hw, int idx, 2081static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
2072 struct survey_info *survey) 2082 struct survey_info *survey)
2073{ 2083{
2074 struct ath_wiphy *aphy = hw->priv; 2084 struct ath_softc *sc = hw->priv;
2075 struct ath_softc *sc = aphy->sc;
2076 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2085 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2077 struct ieee80211_supported_band *sband; 2086 struct ieee80211_supported_band *sband;
2078 struct ieee80211_channel *chan; 2087 struct ieee80211_channel *chan;
@@ -2106,53 +2115,55 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
2106 return 0; 2115 return 0;
2107} 2116}
2108 2117
2109static void ath9k_sw_scan_start(struct ieee80211_hw *hw) 2118static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
2110{ 2119{
2111 struct ath_wiphy *aphy = hw->priv; 2120 struct ath_softc *sc = hw->priv;
2112 struct ath_softc *sc = aphy->sc; 2121 struct ath_hw *ah = sc->sc_ah;
2113 2122
2114 mutex_lock(&sc->mutex); 2123 mutex_lock(&sc->mutex);
2115 if (ath9k_wiphy_scanning(sc)) { 2124 ah->coverage_class = coverage_class;
2116 /* 2125 ath9k_hw_init_global_settings(ah);
2117 * There is a race here in mac80211 but fixing it requires
2118 * we revisit how we handle the scan complete callback.
2119 * After mac80211 fixes we will not have configured hardware
2120 * to the home channel nor would we have configured the RX
2121 * filter yet.
2122 */
2123 mutex_unlock(&sc->mutex);
2124 return;
2125 }
2126
2127 aphy->state = ATH_WIPHY_SCAN;
2128 ath9k_wiphy_pause_all_forced(sc, aphy);
2129 mutex_unlock(&sc->mutex); 2126 mutex_unlock(&sc->mutex);
2130} 2127}
2131 2128
2132/* 2129static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
2133 * XXX: this requires a revisit after the driver
2134 * scan_complete gets moved to another place/removed in mac80211.
2135 */
2136static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
2137{ 2130{
2138 struct ath_wiphy *aphy = hw->priv; 2131 struct ath_softc *sc = hw->priv;
2139 struct ath_softc *sc = aphy->sc; 2132 int timeout = 200; /* ms */
2133 int i, j;
2140 2134
2135 ath9k_ps_wakeup(sc);
2141 mutex_lock(&sc->mutex); 2136 mutex_lock(&sc->mutex);
2142 aphy->state = ATH_WIPHY_ACTIVE;
2143 mutex_unlock(&sc->mutex);
2144}
2145 2137
2146static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class) 2138 cancel_delayed_work_sync(&sc->tx_complete_work);
2147{
2148 struct ath_wiphy *aphy = hw->priv;
2149 struct ath_softc *sc = aphy->sc;
2150 struct ath_hw *ah = sc->sc_ah;
2151 2139
2152 mutex_lock(&sc->mutex); 2140 if (drop)
2153 ah->coverage_class = coverage_class; 2141 timeout = 1;
2154 ath9k_hw_init_global_settings(ah); 2142
2143 for (j = 0; j < timeout; j++) {
2144 int npend = 0;
2145
2146 if (j)
2147 usleep_range(1000, 2000);
2148
2149 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2150 if (!ATH_TXQ_SETUP(sc, i))
2151 continue;
2152
2153 npend += ath9k_has_pending_frames(sc, &sc->tx.txq[i]);
2154 }
2155
2156 if (!npend)
2157 goto out;
2158 }
2159
2160 if (!ath_drain_all_txq(sc, false))
2161 ath_reset(sc, false);
2162
2163out:
2164 ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
2155 mutex_unlock(&sc->mutex); 2165 mutex_unlock(&sc->mutex);
2166 ath9k_ps_restore(sc);
2156} 2167}
2157 2168
2158struct ieee80211_ops ath9k_ops = { 2169struct ieee80211_ops ath9k_ops = {
@@ -2174,8 +2185,7 @@ struct ieee80211_ops ath9k_ops = {
2174 .reset_tsf = ath9k_reset_tsf, 2185 .reset_tsf = ath9k_reset_tsf,
2175 .ampdu_action = ath9k_ampdu_action, 2186 .ampdu_action = ath9k_ampdu_action,
2176 .get_survey = ath9k_get_survey, 2187 .get_survey = ath9k_get_survey,
2177 .sw_scan_start = ath9k_sw_scan_start,
2178 .sw_scan_complete = ath9k_sw_scan_complete,
2179 .rfkill_poll = ath9k_rfkill_poll_state, 2188 .rfkill_poll = ath9k_rfkill_poll_state,
2180 .set_coverage_class = ath9k_set_coverage_class, 2189 .set_coverage_class = ath9k_set_coverage_class,
2190 .flush = ath9k_flush,
2181}; 2191};
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 78ef1f13386f..e83128c50f7b 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -126,7 +126,6 @@ static const struct ath_bus_ops ath_pci_bus_ops = {
126static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 126static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
127{ 127{
128 void __iomem *mem; 128 void __iomem *mem;
129 struct ath_wiphy *aphy;
130 struct ath_softc *sc; 129 struct ath_softc *sc;
131 struct ieee80211_hw *hw; 130 struct ieee80211_hw *hw;
132 u8 csz; 131 u8 csz;
@@ -198,8 +197,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
198 goto err_iomap; 197 goto err_iomap;
199 } 198 }
200 199
201 hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) + 200 hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
202 sizeof(struct ath_softc), &ath9k_ops);
203 if (!hw) { 201 if (!hw) {
204 dev_err(&pdev->dev, "No memory for ieee80211_hw\n"); 202 dev_err(&pdev->dev, "No memory for ieee80211_hw\n");
205 ret = -ENOMEM; 203 ret = -ENOMEM;
@@ -209,11 +207,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
209 SET_IEEE80211_DEV(hw, &pdev->dev); 207 SET_IEEE80211_DEV(hw, &pdev->dev);
210 pci_set_drvdata(pdev, hw); 208 pci_set_drvdata(pdev, hw);
211 209
212 aphy = hw->priv; 210 sc = hw->priv;
213 sc = (struct ath_softc *) (aphy + 1);
214 aphy->sc = sc;
215 aphy->hw = hw;
216 sc->pri_wiphy = aphy;
217 sc->hw = hw; 211 sc->hw = hw;
218 sc->dev = &pdev->dev; 212 sc->dev = &pdev->dev;
219 sc->mem = mem; 213 sc->mem = mem;
@@ -260,8 +254,7 @@ err_dma:
260static void ath_pci_remove(struct pci_dev *pdev) 254static void ath_pci_remove(struct pci_dev *pdev)
261{ 255{
262 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 256 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
263 struct ath_wiphy *aphy = hw->priv; 257 struct ath_softc *sc = hw->priv;
264 struct ath_softc *sc = aphy->sc;
265 void __iomem *mem = sc->mem; 258 void __iomem *mem = sc->mem;
266 259
267 if (!is_ath9k_unloaded) 260 if (!is_ath9k_unloaded)
@@ -281,8 +274,7 @@ static int ath_pci_suspend(struct device *device)
281{ 274{
282 struct pci_dev *pdev = to_pci_dev(device); 275 struct pci_dev *pdev = to_pci_dev(device);
283 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 276 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
284 struct ath_wiphy *aphy = hw->priv; 277 struct ath_softc *sc = hw->priv;
285 struct ath_softc *sc = aphy->sc;
286 278
287 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1); 279 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
288 280
@@ -293,8 +285,7 @@ static int ath_pci_resume(struct device *device)
293{ 285{
294 struct pci_dev *pdev = to_pci_dev(device); 286 struct pci_dev *pdev = to_pci_dev(device);
295 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 287 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
296 struct ath_wiphy *aphy = hw->priv; 288 struct ath_softc *sc = hw->priv;
297 struct ath_softc *sc = aphy->sc;
298 u32 val; 289 u32 val;
299 290
300 /* 291 /*
@@ -320,7 +311,6 @@ static int ath_pci_resume(struct device *device)
320 ath9k_ps_restore(sc); 311 ath9k_ps_restore(sc);
321 312
322 sc->ps_idle = true; 313 sc->ps_idle = true;
323 ath9k_set_wiphy_idle(aphy, true);
324 ath_radio_disable(sc, hw); 314 ath_radio_disable(sc, hw);
325 315
326 return 0; 316 return 0;
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index e45147820eae..960d717ca7c2 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1560,8 +1560,7 @@ static void ath_rate_add_sta_debugfs(void *priv, void *priv_sta,
1560 1560
1561static void *ath_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) 1561static void *ath_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
1562{ 1562{
1563 struct ath_wiphy *aphy = hw->priv; 1563 return hw->priv;
1564 return aphy->sc;
1565} 1564}
1566 1565
1567static void ath_rate_free(void *priv) 1566static void ath_rate_free(void *priv)
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index b2497b8601e5..a9c3f4672aa0 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -34,27 +34,6 @@ static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
34 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); 34 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP);
35} 35}
36 36
37static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc,
38 struct ieee80211_hdr *hdr)
39{
40 struct ieee80211_hw *hw = sc->pri_wiphy->hw;
41 int i;
42
43 spin_lock_bh(&sc->wiphy_lock);
44 for (i = 0; i < sc->num_sec_wiphy; i++) {
45 struct ath_wiphy *aphy = sc->sec_wiphy[i];
46 if (aphy == NULL)
47 continue;
48 if (compare_ether_addr(hdr->addr1, aphy->hw->wiphy->perm_addr)
49 == 0) {
50 hw = aphy->hw;
51 break;
52 }
53 }
54 spin_unlock_bh(&sc->wiphy_lock);
55 return hw;
56}
57
58/* 37/*
59 * Setup and link descriptors. 38 * Setup and link descriptors.
60 * 39 *
@@ -230,11 +209,6 @@ static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
230 int error = 0, i; 209 int error = 0, i;
231 u32 size; 210 u32 size;
232 211
233
234 common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN +
235 ah->caps.rx_status_len,
236 min(common->cachelsz, (u16)64));
237
238 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - 212 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
239 ah->caps.rx_status_len); 213 ah->caps.rx_status_len);
240 214
@@ -321,12 +295,12 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
321 sc->sc_flags &= ~SC_OP_RXFLUSH; 295 sc->sc_flags &= ~SC_OP_RXFLUSH;
322 spin_lock_init(&sc->rx.rxbuflock); 296 spin_lock_init(&sc->rx.rxbuflock);
323 297
298 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
299 sc->sc_ah->caps.rx_status_len;
300
324 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 301 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
325 return ath_rx_edma_init(sc, nbufs); 302 return ath_rx_edma_init(sc, nbufs);
326 } else { 303 } else {
327 common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
328 min(common->cachelsz, (u16)64));
329
330 ath_dbg(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", 304 ath_dbg(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
331 common->cachelsz, common->rx_bufsize); 305 common->cachelsz, common->rx_bufsize);
332 306
@@ -439,9 +413,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
439 * mode interface or when in monitor mode. AP mode does not need this 413 * mode interface or when in monitor mode. AP mode does not need this
440 * since it receives all in-BSS frames anyway. 414 * since it receives all in-BSS frames anyway.
441 */ 415 */
442 if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) && 416 if (sc->sc_ah->is_monitoring)
443 (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) ||
444 (sc->sc_ah->is_monitoring))
445 rfilt |= ATH9K_RX_FILTER_PROM; 417 rfilt |= ATH9K_RX_FILTER_PROM;
446 418
447 if (sc->rx.rxfilter & FIF_CONTROL) 419 if (sc->rx.rxfilter & FIF_CONTROL)
@@ -463,8 +435,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
463 if (conf_is_ht(&sc->hw->conf)) 435 if (conf_is_ht(&sc->hw->conf))
464 rfilt |= ATH9K_RX_FILTER_COMP_BAR; 436 rfilt |= ATH9K_RX_FILTER_COMP_BAR;
465 437
466 if (sc->sec_wiphy || (sc->nvifs > 1) || 438 if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
467 (sc->rx.rxfilter & FIF_OTHER_BSS)) {
468 /* The following may also be needed for other older chips */ 439 /* The following may also be needed for other older chips */
469 if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160) 440 if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160)
470 rfilt |= ATH9K_RX_FILTER_PROM; 441 rfilt |= ATH9K_RX_FILTER_PROM;
@@ -588,8 +559,14 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
588 return; 559 return;
589 560
590 mgmt = (struct ieee80211_mgmt *)skb->data; 561 mgmt = (struct ieee80211_mgmt *)skb->data;
591 if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) 562 if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) {
563 /* TODO: This doesn't work well if you have stations
564 * associated to two different APs because curbssid
565 * is just the last AP that any of the stations associated
566 * with.
567 */
592 return; /* not from our current AP */ 568 return; /* not from our current AP */
569 }
593 570
594 sc->ps_flags &= ~PS_WAIT_FOR_BEACON; 571 sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
595 572
@@ -662,37 +639,6 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
662 } 639 }
663} 640}
664 641
665static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw,
666 struct ath_softc *sc, struct sk_buff *skb)
667{
668 struct ieee80211_hdr *hdr;
669
670 hdr = (struct ieee80211_hdr *)skb->data;
671
672 /* Send the frame to mac80211 */
673 if (is_multicast_ether_addr(hdr->addr1)) {
674 int i;
675 /*
676 * Deliver broadcast/multicast frames to all suitable
677 * virtual wiphys.
678 */
679 /* TODO: filter based on channel configuration */
680 for (i = 0; i < sc->num_sec_wiphy; i++) {
681 struct ath_wiphy *aphy = sc->sec_wiphy[i];
682 struct sk_buff *nskb;
683 if (aphy == NULL)
684 continue;
685 nskb = skb_copy(skb, GFP_ATOMIC);
686 if (!nskb)
687 continue;
688 ieee80211_rx(aphy->hw, nskb);
689 }
690 ieee80211_rx(sc->hw, skb);
691 } else
692 /* Deliver unicast frames based on receiver address */
693 ieee80211_rx(hw, skb);
694}
695
696static bool ath_edma_get_buffers(struct ath_softc *sc, 642static bool ath_edma_get_buffers(struct ath_softc *sc,
697 enum ath9k_rx_qtype qtype) 643 enum ath9k_rx_qtype qtype)
698{ 644{
@@ -862,15 +808,9 @@ static bool ath9k_rx_accept(struct ath_common *common,
862 if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) 808 if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len))
863 return false; 809 return false;
864 810
865 /* 811 /* Only use error bits from the last fragment */
866 * rs_more indicates chained descriptors which can be used
867 * to link buffers together for a sort of scatter-gather
868 * operation.
869 * reject the frame, we don't support scatter-gather yet and
870 * the frame is probably corrupt anyway
871 */
872 if (rx_stats->rs_more) 812 if (rx_stats->rs_more)
873 return false; 813 return true;
874 814
875 /* 815 /*
876 * The rx_stats->rs_status will not be set until the end of the 816 * The rx_stats->rs_status will not be set until the end of the
@@ -974,7 +914,7 @@ static void ath9k_process_rssi(struct ath_common *common,
974 struct ieee80211_hdr *hdr, 914 struct ieee80211_hdr *hdr,
975 struct ath_rx_status *rx_stats) 915 struct ath_rx_status *rx_stats)
976{ 916{
977 struct ath_wiphy *aphy = hw->priv; 917 struct ath_softc *sc = hw->priv;
978 struct ath_hw *ah = common->ah; 918 struct ath_hw *ah = common->ah;
979 int last_rssi; 919 int last_rssi;
980 __le16 fc; 920 __le16 fc;
@@ -984,13 +924,19 @@ static void ath9k_process_rssi(struct ath_common *common,
984 924
985 fc = hdr->frame_control; 925 fc = hdr->frame_control;
986 if (!ieee80211_is_beacon(fc) || 926 if (!ieee80211_is_beacon(fc) ||
987 compare_ether_addr(hdr->addr3, common->curbssid)) 927 compare_ether_addr(hdr->addr3, common->curbssid)) {
928 /* TODO: This doesn't work well if you have stations
929 * associated to two different APs because curbssid
930 * is just the last AP that any of the stations associated
931 * with.
932 */
988 return; 933 return;
934 }
989 935
990 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr) 936 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr)
991 ATH_RSSI_LPF(aphy->last_rssi, rx_stats->rs_rssi); 937 ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
992 938
993 last_rssi = aphy->last_rssi; 939 last_rssi = sc->last_rssi;
994 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) 940 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
995 rx_stats->rs_rssi = ATH_EP_RND(last_rssi, 941 rx_stats->rs_rssi = ATH_EP_RND(last_rssi,
996 ATH_RSSI_EP_MULTIPLIER); 942 ATH_RSSI_EP_MULTIPLIER);
@@ -1022,6 +968,10 @@ static int ath9k_rx_skb_preprocess(struct ath_common *common,
1022 if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) 968 if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
1023 return -EINVAL; 969 return -EINVAL;
1024 970
971 /* Only use status info from the last fragment */
972 if (rx_stats->rs_more)
973 return 0;
974
1025 ath9k_process_rssi(common, hw, hdr, rx_stats); 975 ath9k_process_rssi(common, hw, hdr, rx_stats);
1026 976
1027 if (ath9k_process_rate(common, hw, rx_stats, rx_status)) 977 if (ath9k_process_rate(common, hw, rx_stats, rx_status))
@@ -1031,7 +981,7 @@ static int ath9k_rx_skb_preprocess(struct ath_common *common,
1031 rx_status->freq = hw->conf.channel->center_freq; 981 rx_status->freq = hw->conf.channel->center_freq;
1032 rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi; 982 rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi;
1033 rx_status->antenna = rx_stats->rs_antenna; 983 rx_status->antenna = rx_stats->rs_antenna;
1034 rx_status->flag |= RX_FLAG_TSFT; 984 rx_status->flag |= RX_FLAG_MACTIME_MPDU;
1035 985
1036 return 0; 986 return 0;
1037} 987}
@@ -1623,7 +1573,7 @@ div_comb_done:
1623int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) 1573int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1624{ 1574{
1625 struct ath_buf *bf; 1575 struct ath_buf *bf;
1626 struct sk_buff *skb = NULL, *requeue_skb; 1576 struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
1627 struct ieee80211_rx_status *rxs; 1577 struct ieee80211_rx_status *rxs;
1628 struct ath_hw *ah = sc->sc_ah; 1578 struct ath_hw *ah = sc->sc_ah;
1629 struct ath_common *common = ath9k_hw_common(ah); 1579 struct ath_common *common = ath9k_hw_common(ah);
@@ -1632,7 +1582,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1632 * virtual wiphy so to account for that we iterate over the active 1582 * virtual wiphy so to account for that we iterate over the active
1633 * wiphys and find the appropriate wiphy and therefore hw. 1583 * wiphys and find the appropriate wiphy and therefore hw.
1634 */ 1584 */
1635 struct ieee80211_hw *hw = NULL; 1585 struct ieee80211_hw *hw = sc->hw;
1636 struct ieee80211_hdr *hdr; 1586 struct ieee80211_hdr *hdr;
1637 int retval; 1587 int retval;
1638 bool decrypt_error = false; 1588 bool decrypt_error = false;
@@ -1674,10 +1624,17 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1674 if (!skb) 1624 if (!skb)
1675 continue; 1625 continue;
1676 1626
1677 hdr = (struct ieee80211_hdr *) (skb->data + rx_status_len); 1627 /*
1678 rxs = IEEE80211_SKB_RXCB(skb); 1628 * Take frame header from the first fragment and RX status from
1629 * the last one.
1630 */
1631 if (sc->rx.frag)
1632 hdr_skb = sc->rx.frag;
1633 else
1634 hdr_skb = skb;
1679 1635
1680 hw = ath_get_virt_hw(sc, hdr); 1636 hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
1637 rxs = IEEE80211_SKB_RXCB(hdr_skb);
1681 1638
1682 ath_debug_stat_rx(sc, &rs); 1639 ath_debug_stat_rx(sc, &rs);
1683 1640
@@ -1686,12 +1643,12 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1686 * chain it back at the queue without processing it. 1643 * chain it back at the queue without processing it.
1687 */ 1644 */
1688 if (flush) 1645 if (flush)
1689 goto requeue; 1646 goto requeue_drop_frag;
1690 1647
1691 retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, 1648 retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
1692 rxs, &decrypt_error); 1649 rxs, &decrypt_error);
1693 if (retval) 1650 if (retval)
1694 goto requeue; 1651 goto requeue_drop_frag;
1695 1652
1696 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; 1653 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
1697 if (rs.rs_tstamp > tsf_lower && 1654 if (rs.rs_tstamp > tsf_lower &&
@@ -1711,7 +1668,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1711 * skb and put it at the tail of the sc->rx.rxbuf list for 1668 * skb and put it at the tail of the sc->rx.rxbuf list for
1712 * processing. */ 1669 * processing. */
1713 if (!requeue_skb) 1670 if (!requeue_skb)
1714 goto requeue; 1671 goto requeue_drop_frag;
1715 1672
1716 /* Unmap the frame */ 1673 /* Unmap the frame */
1717 dma_unmap_single(sc->dev, bf->bf_buf_addr, 1674 dma_unmap_single(sc->dev, bf->bf_buf_addr,
@@ -1722,8 +1679,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1722 if (ah->caps.rx_status_len) 1679 if (ah->caps.rx_status_len)
1723 skb_pull(skb, ah->caps.rx_status_len); 1680 skb_pull(skb, ah->caps.rx_status_len);
1724 1681
1725 ath9k_rx_skb_postprocess(common, skb, &rs, 1682 if (!rs.rs_more)
1726 rxs, decrypt_error); 1683 ath9k_rx_skb_postprocess(common, hdr_skb, &rs,
1684 rxs, decrypt_error);
1727 1685
1728 /* We will now give hardware our shiny new allocated skb */ 1686 /* We will now give hardware our shiny new allocated skb */
1729 bf->bf_mpdu = requeue_skb; 1687 bf->bf_mpdu = requeue_skb;
@@ -1736,10 +1694,42 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1736 bf->bf_mpdu = NULL; 1694 bf->bf_mpdu = NULL;
1737 bf->bf_buf_addr = 0; 1695 bf->bf_buf_addr = 0;
1738 ath_err(common, "dma_mapping_error() on RX\n"); 1696 ath_err(common, "dma_mapping_error() on RX\n");
1739 ath_rx_send_to_mac80211(hw, sc, skb); 1697 ieee80211_rx(hw, skb);
1740 break; 1698 break;
1741 } 1699 }
1742 1700
1701 if (rs.rs_more) {
1702 /*
1703 * rs_more indicates chained descriptors which can be
1704 * used to link buffers together for a sort of
1705 * scatter-gather operation.
1706 */
1707 if (sc->rx.frag) {
1708 /* too many fragments - cannot handle frame */
1709 dev_kfree_skb_any(sc->rx.frag);
1710 dev_kfree_skb_any(skb);
1711 skb = NULL;
1712 }
1713 sc->rx.frag = skb;
1714 goto requeue;
1715 }
1716
1717 if (sc->rx.frag) {
1718 int space = skb->len - skb_tailroom(hdr_skb);
1719
1720 sc->rx.frag = NULL;
1721
1722 if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
1723 dev_kfree_skb(skb);
1724 goto requeue_drop_frag;
1725 }
1726
1727 skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len),
1728 skb->len);
1729 dev_kfree_skb_any(skb);
1730 skb = hdr_skb;
1731 }
1732
1743 /* 1733 /*
1744 * change the default rx antenna if rx diversity chooses the 1734 * change the default rx antenna if rx diversity chooses the
1745 * other antenna 3 times in a row. 1735 * other antenna 3 times in a row.
@@ -1763,8 +1753,13 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1763 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) 1753 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
1764 ath_ant_comb_scan(sc, &rs); 1754 ath_ant_comb_scan(sc, &rs);
1765 1755
1766 ath_rx_send_to_mac80211(hw, sc, skb); 1756 ieee80211_rx(hw, skb);
1767 1757
1758requeue_drop_frag:
1759 if (sc->rx.frag) {
1760 dev_kfree_skb_any(sc->rx.frag);
1761 sc->rx.frag = NULL;
1762 }
1768requeue: 1763requeue:
1769 if (edma) { 1764 if (edma) {
1770 list_add_tail(&bf->list, &sc->rx.rxbuf); 1765 list_add_tail(&bf->list, &sc->rx.rxbuf);
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 4df5659c6c16..8fa8acfde62e 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -789,6 +789,7 @@
789#define AR_SREV_REVISION_9300_20 2 /* 2.0 and 2.1 */ 789#define AR_SREV_REVISION_9300_20 2 /* 2.0 and 2.1 */
790#define AR_SREV_VERSION_9485 0x240 790#define AR_SREV_VERSION_9485 0x240
791#define AR_SREV_REVISION_9485_10 0 791#define AR_SREV_REVISION_9485_10 0
792#define AR_SREV_REVISION_9485_11 1
792 793
793#define AR_SREV_5416(_ah) \ 794#define AR_SREV_5416(_ah) \
794 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \ 795 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \
@@ -866,6 +867,9 @@
866#define AR_SREV_9485_10(_ah) \ 867#define AR_SREV_9485_10(_ah) \
867 (AR_SREV_9485(_ah) && \ 868 (AR_SREV_9485(_ah) && \
868 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9485_10)) 869 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9485_10))
870#define AR_SREV_9485_11(_ah) \
871 (AR_SREV_9485(_ah) && \
872 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9485_11))
869 873
870#define AR_SREV_9285E_20(_ah) \ 874#define AR_SREV_9285E_20(_ah) \
871 (AR_SREV_9285_12_OR_LATER(_ah) && \ 875 (AR_SREV_9285_12_OR_LATER(_ah) && \
@@ -874,6 +878,7 @@
874enum ath_usb_dev { 878enum ath_usb_dev {
875 AR9280_USB = 1, /* AR7010 + AR9280, UB94 */ 879 AR9280_USB = 1, /* AR7010 + AR9280, UB94 */
876 AR9287_USB = 2, /* AR7010 + AR9287, UB95 */ 880 AR9287_USB = 2, /* AR7010 + AR9287, UB95 */
881 STORAGE_DEVICE = 3,
877}; 882};
878 883
879#define AR_DEVID_7010(_ah) \ 884#define AR_DEVID_7010(_ah) \
@@ -1083,6 +1088,17 @@ enum {
1083#define AR_ENT_OTP 0x40d8 1088#define AR_ENT_OTP 0x40d8
1084#define AR_ENT_OTP_CHAIN2_DISABLE 0x00020000 1089#define AR_ENT_OTP_CHAIN2_DISABLE 0x00020000
1085#define AR_ENT_OTP_MPSD 0x00800000 1090#define AR_ENT_OTP_MPSD 0x00800000
1091#define AR_CH0_BB_DPLL2 0x16184
1092#define AR_CH0_BB_DPLL3 0x16188
1093#define AR_CH0_DDR_DPLL2 0x16244
1094#define AR_CH0_DDR_DPLL3 0x16248
1095#define AR_CH0_DPLL2_KD 0x03F80000
1096#define AR_CH0_DPLL2_KD_S 19
1097#define AR_CH0_DPLL2_KI 0x3C000000
1098#define AR_CH0_DPLL2_KI_S 26
1099#define AR_CH0_DPLL3_PHASE_SHIFT 0x3F800000
1100#define AR_CH0_DPLL3_PHASE_SHIFT_S 23
1101#define AR_PHY_CCA_NOM_VAL_2GHZ -118
1086 1102
1087#define AR_RTC_9300_PLL_DIV 0x000003ff 1103#define AR_RTC_9300_PLL_DIV 0x000003ff
1088#define AR_RTC_9300_PLL_DIV_S 0 1104#define AR_RTC_9300_PLL_DIV_S 0
@@ -1129,6 +1145,12 @@ enum {
1129#define AR_RTC_PLL_CLKSEL 0x00000300 1145#define AR_RTC_PLL_CLKSEL 0x00000300
1130#define AR_RTC_PLL_CLKSEL_S 8 1146#define AR_RTC_PLL_CLKSEL_S 8
1131 1147
1148#define PLL3 0x16188
1149#define PLL3_DO_MEAS_MASK 0x40000000
1150#define PLL4 0x1618c
1151#define PLL4_MEAS_DONE 0x8
1152#define SQSUM_DVC_MASK 0x007ffff8
1153
1132#define AR_RTC_RESET \ 1154#define AR_RTC_RESET \
1133 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0040) : 0x7040) 1155 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0040) : 0x7040)
1134#define AR_RTC_RESET_EN (0x00000001) 1156#define AR_RTC_RESET_EN (0x00000001)
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
deleted file mode 100644
index 2dc7095e56d1..000000000000
--- a/drivers/net/wireless/ath/ath9k/virtual.c
+++ /dev/null
@@ -1,717 +0,0 @@
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/slab.h>
18
19#include "ath9k.h"
20
21struct ath9k_vif_iter_data {
22 const u8 *hw_macaddr;
23 u8 mask[ETH_ALEN];
24};
25
26static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
27{
28 struct ath9k_vif_iter_data *iter_data = data;
29 int i;
30
31 for (i = 0; i < ETH_ALEN; i++)
32 iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
33}
34
35void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
36{
37 struct ath_wiphy *aphy = hw->priv;
38 struct ath_softc *sc = aphy->sc;
39 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
40 struct ath9k_vif_iter_data iter_data;
41 int i;
42
43 /*
44 * Use the hardware MAC address as reference, the hardware uses it
45 * together with the BSSID mask when matching addresses.
46 */
47 iter_data.hw_macaddr = common->macaddr;
48 memset(&iter_data.mask, 0xff, ETH_ALEN);
49
50 if (vif)
51 ath9k_vif_iter(&iter_data, vif->addr, vif);
52
53 /* Get list of all active MAC addresses */
54 spin_lock_bh(&sc->wiphy_lock);
55 ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter,
56 &iter_data);
57 for (i = 0; i < sc->num_sec_wiphy; i++) {
58 if (sc->sec_wiphy[i] == NULL)
59 continue;
60 ieee80211_iterate_active_interfaces_atomic(
61 sc->sec_wiphy[i]->hw, ath9k_vif_iter, &iter_data);
62 }
63 spin_unlock_bh(&sc->wiphy_lock);
64
65 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
66 ath_hw_setbssidmask(common);
67}
68
69int ath9k_wiphy_add(struct ath_softc *sc)
70{
71 int i, error;
72 struct ath_wiphy *aphy;
73 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
74 struct ieee80211_hw *hw;
75 u8 addr[ETH_ALEN];
76
77 hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy), &ath9k_ops);
78 if (hw == NULL)
79 return -ENOMEM;
80
81 spin_lock_bh(&sc->wiphy_lock);
82 for (i = 0; i < sc->num_sec_wiphy; i++) {
83 if (sc->sec_wiphy[i] == NULL)
84 break;
85 }
86
87 if (i == sc->num_sec_wiphy) {
88 /* No empty slot available; increase array length */
89 struct ath_wiphy **n;
90 n = krealloc(sc->sec_wiphy,
91 (sc->num_sec_wiphy + 1) *
92 sizeof(struct ath_wiphy *),
93 GFP_ATOMIC);
94 if (n == NULL) {
95 spin_unlock_bh(&sc->wiphy_lock);
96 ieee80211_free_hw(hw);
97 return -ENOMEM;
98 }
99 n[i] = NULL;
100 sc->sec_wiphy = n;
101 sc->num_sec_wiphy++;
102 }
103
104 SET_IEEE80211_DEV(hw, sc->dev);
105
106 aphy = hw->priv;
107 aphy->sc = sc;
108 aphy->hw = hw;
109 sc->sec_wiphy[i] = aphy;
110 aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
111 spin_unlock_bh(&sc->wiphy_lock);
112
113 memcpy(addr, common->macaddr, ETH_ALEN);
114 addr[0] |= 0x02; /* Locally managed address */
115 /*
116 * XOR virtual wiphy index into the least significant bits to generate
117 * a different MAC address for each virtual wiphy.
118 */
119 addr[5] ^= i & 0xff;
120 addr[4] ^= (i & 0xff00) >> 8;
121 addr[3] ^= (i & 0xff0000) >> 16;
122
123 SET_IEEE80211_PERM_ADDR(hw, addr);
124
125 ath9k_set_hw_capab(sc, hw);
126
127 error = ieee80211_register_hw(hw);
128
129 if (error == 0) {
130 /* Make sure wiphy scheduler is started (if enabled) */
131 ath9k_wiphy_set_scheduler(sc, sc->wiphy_scheduler_int);
132 }
133
134 return error;
135}
136
137int ath9k_wiphy_del(struct ath_wiphy *aphy)
138{
139 struct ath_softc *sc = aphy->sc;
140 int i;
141
142 spin_lock_bh(&sc->wiphy_lock);
143 for (i = 0; i < sc->num_sec_wiphy; i++) {
144 if (aphy == sc->sec_wiphy[i]) {
145 sc->sec_wiphy[i] = NULL;
146 spin_unlock_bh(&sc->wiphy_lock);
147 ieee80211_unregister_hw(aphy->hw);
148 ieee80211_free_hw(aphy->hw);
149 return 0;
150 }
151 }
152 spin_unlock_bh(&sc->wiphy_lock);
153 return -ENOENT;
154}
155
156static int ath9k_send_nullfunc(struct ath_wiphy *aphy,
157 struct ieee80211_vif *vif, const u8 *bssid,
158 int ps)
159{
160 struct ath_softc *sc = aphy->sc;
161 struct ath_tx_control txctl;
162 struct sk_buff *skb;
163 struct ieee80211_hdr *hdr;
164 __le16 fc;
165 struct ieee80211_tx_info *info;
166
167 skb = dev_alloc_skb(24);
168 if (skb == NULL)
169 return -ENOMEM;
170 hdr = (struct ieee80211_hdr *) skb_put(skb, 24);
171 memset(hdr, 0, 24);
172 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
173 IEEE80211_FCTL_TODS);
174 if (ps)
175 fc |= cpu_to_le16(IEEE80211_FCTL_PM);
176 hdr->frame_control = fc;
177 memcpy(hdr->addr1, bssid, ETH_ALEN);
178 memcpy(hdr->addr2, aphy->hw->wiphy->perm_addr, ETH_ALEN);
179 memcpy(hdr->addr3, bssid, ETH_ALEN);
180
181 info = IEEE80211_SKB_CB(skb);
182 memset(info, 0, sizeof(*info));
183 info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS;
184 info->control.vif = vif;
185 info->control.rates[0].idx = 0;
186 info->control.rates[0].count = 4;
187 info->control.rates[1].idx = -1;
188
189 memset(&txctl, 0, sizeof(struct ath_tx_control));
190 txctl.txq = sc->tx.txq_map[WME_AC_VO];
191 txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE;
192
193 if (ath_tx_start(aphy->hw, skb, &txctl) != 0)
194 goto exit;
195
196 return 0;
197exit:
198 dev_kfree_skb_any(skb);
199 return -1;
200}
201
202static bool __ath9k_wiphy_pausing(struct ath_softc *sc)
203{
204 int i;
205 if (sc->pri_wiphy->state == ATH_WIPHY_PAUSING)
206 return true;
207 for (i = 0; i < sc->num_sec_wiphy; i++) {
208 if (sc->sec_wiphy[i] &&
209 sc->sec_wiphy[i]->state == ATH_WIPHY_PAUSING)
210 return true;
211 }
212 return false;
213}
214
215static bool ath9k_wiphy_pausing(struct ath_softc *sc)
216{
217 bool ret;
218 spin_lock_bh(&sc->wiphy_lock);
219 ret = __ath9k_wiphy_pausing(sc);
220 spin_unlock_bh(&sc->wiphy_lock);
221 return ret;
222}
223
224static bool __ath9k_wiphy_scanning(struct ath_softc *sc)
225{
226 int i;
227 if (sc->pri_wiphy->state == ATH_WIPHY_SCAN)
228 return true;
229 for (i = 0; i < sc->num_sec_wiphy; i++) {
230 if (sc->sec_wiphy[i] &&
231 sc->sec_wiphy[i]->state == ATH_WIPHY_SCAN)
232 return true;
233 }
234 return false;
235}
236
237bool ath9k_wiphy_scanning(struct ath_softc *sc)
238{
239 bool ret;
240 spin_lock_bh(&sc->wiphy_lock);
241 ret = __ath9k_wiphy_scanning(sc);
242 spin_unlock_bh(&sc->wiphy_lock);
243 return ret;
244}
245
246static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy);
247
248/* caller must hold wiphy_lock */
249static void __ath9k_wiphy_unpause_ch(struct ath_wiphy *aphy)
250{
251 if (aphy == NULL)
252 return;
253 if (aphy->chan_idx != aphy->sc->chan_idx)
254 return; /* wiphy not on the selected channel */
255 __ath9k_wiphy_unpause(aphy);
256}
257
258static void ath9k_wiphy_unpause_channel(struct ath_softc *sc)
259{
260 int i;
261 spin_lock_bh(&sc->wiphy_lock);
262 __ath9k_wiphy_unpause_ch(sc->pri_wiphy);
263 for (i = 0; i < sc->num_sec_wiphy; i++)
264 __ath9k_wiphy_unpause_ch(sc->sec_wiphy[i]);
265 spin_unlock_bh(&sc->wiphy_lock);
266}
267
268void ath9k_wiphy_chan_work(struct work_struct *work)
269{
270 struct ath_softc *sc = container_of(work, struct ath_softc, chan_work);
271 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
272 struct ath_wiphy *aphy = sc->next_wiphy;
273
274 if (aphy == NULL)
275 return;
276
277 /*
278 * All pending interfaces paused; ready to change
279 * channels.
280 */
281
282 /* Change channels */
283 mutex_lock(&sc->mutex);
284 /* XXX: remove me eventually */
285 ath9k_update_ichannel(sc, aphy->hw,
286 &sc->sc_ah->channels[sc->chan_idx]);
287
288 /* sync hw configuration for hw code */
289 common->hw = aphy->hw;
290
291 if (ath_set_channel(sc, aphy->hw,
292 &sc->sc_ah->channels[sc->chan_idx]) < 0) {
293 printk(KERN_DEBUG "ath9k: Failed to set channel for new "
294 "virtual wiphy\n");
295 mutex_unlock(&sc->mutex);
296 return;
297 }
298 mutex_unlock(&sc->mutex);
299
300 ath9k_wiphy_unpause_channel(sc);
301}
302
303/*
304 * ath9k version of ieee80211_tx_status() for TX frames that are generated
305 * internally in the driver.
306 */
307void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, int ftype)
308{
309 struct ath_wiphy *aphy = hw->priv;
310 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
311
312 if (ftype == ATH9K_IFT_PAUSE && aphy->state == ATH_WIPHY_PAUSING) {
313 if (!(tx_info->flags & IEEE80211_TX_STAT_ACK)) {
314 printk(KERN_DEBUG "ath9k: %s: no ACK for pause "
315 "frame\n", wiphy_name(hw->wiphy));
316 /*
317 * The AP did not reply; ignore this to allow us to
318 * continue.
319 */
320 }
321 aphy->state = ATH_WIPHY_PAUSED;
322 if (!ath9k_wiphy_pausing(aphy->sc)) {
323 /*
324 * Drop from tasklet to work to allow mutex for channel
325 * change.
326 */
327 ieee80211_queue_work(aphy->sc->hw,
328 &aphy->sc->chan_work);
329 }
330 }
331
332 dev_kfree_skb(skb);
333}
334
335static void ath9k_mark_paused(struct ath_wiphy *aphy)
336{
337 struct ath_softc *sc = aphy->sc;
338 aphy->state = ATH_WIPHY_PAUSED;
339 if (!__ath9k_wiphy_pausing(sc))
340 ieee80211_queue_work(sc->hw, &sc->chan_work);
341}
342
343static void ath9k_pause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
344{
345 struct ath_wiphy *aphy = data;
346 struct ath_vif *avp = (void *) vif->drv_priv;
347
348 switch (vif->type) {
349 case NL80211_IFTYPE_STATION:
350 if (!vif->bss_conf.assoc) {
351 ath9k_mark_paused(aphy);
352 break;
353 }
354 /* TODO: could avoid this if already in PS mode */
355 if (ath9k_send_nullfunc(aphy, vif, avp->bssid, 1)) {
356 printk(KERN_DEBUG "%s: failed to send PS nullfunc\n",
357 __func__);
358 ath9k_mark_paused(aphy);
359 }
360 break;
361 case NL80211_IFTYPE_AP:
362 /* Beacon transmission is paused by aphy->state change */
363 ath9k_mark_paused(aphy);
364 break;
365 default:
366 break;
367 }
368}
369
370/* caller must hold wiphy_lock */
371static int __ath9k_wiphy_pause(struct ath_wiphy *aphy)
372{
373 ieee80211_stop_queues(aphy->hw);
374 aphy->state = ATH_WIPHY_PAUSING;
375 /*
376 * TODO: handle PAUSING->PAUSED for the case where there are multiple
377 * active vifs (now we do it on the first vif getting ready; should be
378 * on the last)
379 */
380 ieee80211_iterate_active_interfaces_atomic(aphy->hw, ath9k_pause_iter,
381 aphy);
382 return 0;
383}
384
385int ath9k_wiphy_pause(struct ath_wiphy *aphy)
386{
387 int ret;
388 spin_lock_bh(&aphy->sc->wiphy_lock);
389 ret = __ath9k_wiphy_pause(aphy);
390 spin_unlock_bh(&aphy->sc->wiphy_lock);
391 return ret;
392}
393
394static void ath9k_unpause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
395{
396 struct ath_wiphy *aphy = data;
397 struct ath_vif *avp = (void *) vif->drv_priv;
398
399 switch (vif->type) {
400 case NL80211_IFTYPE_STATION:
401 if (!vif->bss_conf.assoc)
402 break;
403 ath9k_send_nullfunc(aphy, vif, avp->bssid, 0);
404 break;
405 case NL80211_IFTYPE_AP:
406 /* Beacon transmission is re-enabled by aphy->state change */
407 break;
408 default:
409 break;
410 }
411}
412
413/* caller must hold wiphy_lock */
414static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy)
415{
416 ieee80211_iterate_active_interfaces_atomic(aphy->hw,
417 ath9k_unpause_iter, aphy);
418 aphy->state = ATH_WIPHY_ACTIVE;
419 ieee80211_wake_queues(aphy->hw);
420 return 0;
421}
422
423int ath9k_wiphy_unpause(struct ath_wiphy *aphy)
424{
425 int ret;
426 spin_lock_bh(&aphy->sc->wiphy_lock);
427 ret = __ath9k_wiphy_unpause(aphy);
428 spin_unlock_bh(&aphy->sc->wiphy_lock);
429 return ret;
430}
431
432static void __ath9k_wiphy_mark_all_paused(struct ath_softc *sc)
433{
434 int i;
435 if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE)
436 sc->pri_wiphy->state = ATH_WIPHY_PAUSED;
437 for (i = 0; i < sc->num_sec_wiphy; i++) {
438 if (sc->sec_wiphy[i] &&
439 sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE)
440 sc->sec_wiphy[i]->state = ATH_WIPHY_PAUSED;
441 }
442}
443
444/* caller must hold wiphy_lock */
445static void __ath9k_wiphy_pause_all(struct ath_softc *sc)
446{
447 int i;
448 if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
449 __ath9k_wiphy_pause(sc->pri_wiphy);
450 for (i = 0; i < sc->num_sec_wiphy; i++) {
451 if (sc->sec_wiphy[i] &&
452 sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
453 __ath9k_wiphy_pause(sc->sec_wiphy[i]);
454 }
455}
456
457int ath9k_wiphy_select(struct ath_wiphy *aphy)
458{
459 struct ath_softc *sc = aphy->sc;
460 bool now;
461
462 spin_lock_bh(&sc->wiphy_lock);
463 if (__ath9k_wiphy_scanning(sc)) {
464 /*
465 * For now, we are using mac80211 sw scan and it expects to
466 * have full control over channel changes, so avoid wiphy
467 * scheduling during a scan. This could be optimized if the
468 * scanning control were moved into the driver.
469 */
470 spin_unlock_bh(&sc->wiphy_lock);
471 return -EBUSY;
472 }
473 if (__ath9k_wiphy_pausing(sc)) {
474 if (sc->wiphy_select_failures == 0)
475 sc->wiphy_select_first_fail = jiffies;
476 sc->wiphy_select_failures++;
477 if (time_after(jiffies, sc->wiphy_select_first_fail + HZ / 2))
478 {
479 printk(KERN_DEBUG "ath9k: Previous wiphy select timed "
480 "out; disable/enable hw to recover\n");
481 __ath9k_wiphy_mark_all_paused(sc);
482 /*
483 * TODO: this workaround to fix hardware is unlikely to
484 * be specific to virtual wiphy changes. It can happen
485 * on normal channel change, too, and as such, this
486 * should really be made more generic. For example,
487 * tricker radio disable/enable on GTT interrupt burst
488 * (say, 10 GTT interrupts received without any TX
489 * frame being completed)
490 */
491 spin_unlock_bh(&sc->wiphy_lock);
492 ath_radio_disable(sc, aphy->hw);
493 ath_radio_enable(sc, aphy->hw);
494 /* Only the primary wiphy hw is used for queuing work */
495 ieee80211_queue_work(aphy->sc->hw,
496 &aphy->sc->chan_work);
497 return -EBUSY; /* previous select still in progress */
498 }
499 spin_unlock_bh(&sc->wiphy_lock);
500 return -EBUSY; /* previous select still in progress */
501 }
502 sc->wiphy_select_failures = 0;
503
504 /* Store the new channel */
505 sc->chan_idx = aphy->chan_idx;
506 sc->chan_is_ht = aphy->chan_is_ht;
507 sc->next_wiphy = aphy;
508
509 __ath9k_wiphy_pause_all(sc);
510 now = !__ath9k_wiphy_pausing(aphy->sc);
511 spin_unlock_bh(&sc->wiphy_lock);
512
513 if (now) {
514 /* Ready to request channel change immediately */
515 ieee80211_queue_work(aphy->sc->hw, &aphy->sc->chan_work);
516 }
517
518 /*
519 * wiphys will be unpaused in ath9k_tx_status() once channel has been
520 * changed if any wiphy needs time to become paused.
521 */
522
523 return 0;
524}
525
526bool ath9k_wiphy_started(struct ath_softc *sc)
527{
528 int i;
529 spin_lock_bh(&sc->wiphy_lock);
530 if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE) {
531 spin_unlock_bh(&sc->wiphy_lock);
532 return true;
533 }
534 for (i = 0; i < sc->num_sec_wiphy; i++) {
535 if (sc->sec_wiphy[i] &&
536 sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE) {
537 spin_unlock_bh(&sc->wiphy_lock);
538 return true;
539 }
540 }
541 spin_unlock_bh(&sc->wiphy_lock);
542 return false;
543}
544
545static void ath9k_wiphy_pause_chan(struct ath_wiphy *aphy,
546 struct ath_wiphy *selected)
547{
548 if (selected->state == ATH_WIPHY_SCAN) {
549 if (aphy == selected)
550 return;
551 /*
552 * Pause all other wiphys for the duration of the scan even if
553 * they are on the current channel now.
554 */
555 } else if (aphy->chan_idx == selected->chan_idx)
556 return;
557 aphy->state = ATH_WIPHY_PAUSED;
558 ieee80211_stop_queues(aphy->hw);
559}
560
561void ath9k_wiphy_pause_all_forced(struct ath_softc *sc,
562 struct ath_wiphy *selected)
563{
564 int i;
565 spin_lock_bh(&sc->wiphy_lock);
566 if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
567 ath9k_wiphy_pause_chan(sc->pri_wiphy, selected);
568 for (i = 0; i < sc->num_sec_wiphy; i++) {
569 if (sc->sec_wiphy[i] &&
570 sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
571 ath9k_wiphy_pause_chan(sc->sec_wiphy[i], selected);
572 }
573 spin_unlock_bh(&sc->wiphy_lock);
574}
575
576void ath9k_wiphy_work(struct work_struct *work)
577{
578 struct ath_softc *sc = container_of(work, struct ath_softc,
579 wiphy_work.work);
580 struct ath_wiphy *aphy = NULL;
581 bool first = true;
582
583 spin_lock_bh(&sc->wiphy_lock);
584
585 if (sc->wiphy_scheduler_int == 0) {
586 /* wiphy scheduler is disabled */
587 spin_unlock_bh(&sc->wiphy_lock);
588 return;
589 }
590
591try_again:
592 sc->wiphy_scheduler_index++;
593 while (sc->wiphy_scheduler_index <= sc->num_sec_wiphy) {
594 aphy = sc->sec_wiphy[sc->wiphy_scheduler_index - 1];
595 if (aphy && aphy->state != ATH_WIPHY_INACTIVE)
596 break;
597
598 sc->wiphy_scheduler_index++;
599 aphy = NULL;
600 }
601 if (aphy == NULL) {
602 sc->wiphy_scheduler_index = 0;
603 if (sc->pri_wiphy->state == ATH_WIPHY_INACTIVE) {
604 if (first) {
605 first = false;
606 goto try_again;
607 }
608 /* No wiphy is ready to be scheduled */
609 } else
610 aphy = sc->pri_wiphy;
611 }
612
613 spin_unlock_bh(&sc->wiphy_lock);
614
615 if (aphy &&
616 aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN &&
617 ath9k_wiphy_select(aphy)) {
618 printk(KERN_DEBUG "ath9k: Failed to schedule virtual wiphy "
619 "change\n");
620 }
621
622 ieee80211_queue_delayed_work(sc->hw,
623 &sc->wiphy_work,
624 sc->wiphy_scheduler_int);
625}
626
627void ath9k_wiphy_set_scheduler(struct ath_softc *sc, unsigned int msec_int)
628{
629 cancel_delayed_work_sync(&sc->wiphy_work);
630 sc->wiphy_scheduler_int = msecs_to_jiffies(msec_int);
631 if (sc->wiphy_scheduler_int)
632 ieee80211_queue_delayed_work(sc->hw, &sc->wiphy_work,
633 sc->wiphy_scheduler_int);
634}
635
636/* caller must hold wiphy_lock */
637bool ath9k_all_wiphys_idle(struct ath_softc *sc)
638{
639 unsigned int i;
640 if (!sc->pri_wiphy->idle)
641 return false;
642 for (i = 0; i < sc->num_sec_wiphy; i++) {
643 struct ath_wiphy *aphy = sc->sec_wiphy[i];
644 if (!aphy)
645 continue;
646 if (!aphy->idle)
647 return false;
648 }
649 return true;
650}
651
652/* caller must hold wiphy_lock */
653void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle)
654{
655 struct ath_softc *sc = aphy->sc;
656
657 aphy->idle = idle;
658 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
659 "Marking %s as %sidle\n",
660 wiphy_name(aphy->hw->wiphy), idle ? "" : "not-");
661}
662/* Only bother starting a queue on an active virtual wiphy */
663bool ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue)
664{
665 struct ieee80211_hw *hw = sc->pri_wiphy->hw;
666 unsigned int i;
667 bool txq_started = false;
668
669 spin_lock_bh(&sc->wiphy_lock);
670
671 /* Start the primary wiphy */
672 if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE) {
673 ieee80211_wake_queue(hw, skb_queue);
674 txq_started = true;
675 goto unlock;
676 }
677
678 /* Now start the secondary wiphy queues */
679 for (i = 0; i < sc->num_sec_wiphy; i++) {
680 struct ath_wiphy *aphy = sc->sec_wiphy[i];
681 if (!aphy)
682 continue;
683 if (aphy->state != ATH_WIPHY_ACTIVE)
684 continue;
685
686 hw = aphy->hw;
687 ieee80211_wake_queue(hw, skb_queue);
688 txq_started = true;
689 break;
690 }
691
692unlock:
693 spin_unlock_bh(&sc->wiphy_lock);
694 return txq_started;
695}
696
697/* Go ahead and propagate information to all virtual wiphys, it won't hurt */
698void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue)
699{
700 struct ieee80211_hw *hw = sc->pri_wiphy->hw;
701 unsigned int i;
702
703 spin_lock_bh(&sc->wiphy_lock);
704
705 /* Stop the primary wiphy */
706 ieee80211_stop_queue(hw, skb_queue);
707
708 /* Now stop the secondary wiphy queues */
709 for (i = 0; i < sc->num_sec_wiphy; i++) {
710 struct ath_wiphy *aphy = sc->sec_wiphy[i];
711 if (!aphy)
712 continue;
713 hw = aphy->hw;
714 ieee80211_stop_queue(hw, skb_queue);
715 }
716 spin_unlock_bh(&sc->wiphy_lock);
717}
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index dc862f5e1162..d3d24904f62f 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -123,12 +123,8 @@ void ath9k_deinit_wmi(struct ath9k_htc_priv *priv)
123void ath9k_swba_tasklet(unsigned long data) 123void ath9k_swba_tasklet(unsigned long data)
124{ 124{
125 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data; 125 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
126 struct ath_common *common = ath9k_hw_common(priv->ah);
127
128 ath_dbg(common, ATH_DBG_WMI, "SWBA Event received\n");
129 126
130 ath9k_htc_swba(priv, priv->wmi->beacon_pending); 127 ath9k_htc_swba(priv, priv->wmi->beacon_pending);
131
132} 128}
133 129
134void ath9k_fatal_work(struct work_struct *work) 130void ath9k_fatal_work(struct work_struct *work)
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 07b7804aec5b..ef22096d40c9 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -19,7 +19,6 @@
19 19
20#define BITS_PER_BYTE 8 20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22 21#define OFDM_PLCP_BITS 22
22#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
23#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1) 22#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8 23#define L_STF 8
25#define L_LTF 8 24#define L_LTF 8
@@ -32,7 +31,6 @@
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2) 31#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18) 32#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34 33
35#define OFDM_SIFS_TIME 16
36 34
37static u16 bits_per_symbol[][2] = { 35static u16 bits_per_symbol[][2] = {
38 /* 20MHz 40MHz */ 36 /* 20MHz 40MHz */
@@ -57,8 +55,9 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
57static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 55static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58 struct list_head *head); 56 struct list_head *head);
59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len); 57static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
60static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts, 58static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
61 int nframes, int nbad, int txok, bool update_rc); 59 struct ath_tx_status *ts, int nframes, int nbad,
60 int txok, bool update_rc);
62static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 61static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno); 62 int seqno);
64 63
@@ -167,9 +166,9 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
167 fi = get_frame_info(bf->bf_mpdu); 166 fi = get_frame_info(bf->bf_mpdu);
168 if (fi->retries) { 167 if (fi->retries) {
169 ath_tx_update_baw(sc, tid, fi->seqno); 168 ath_tx_update_baw(sc, tid, fi->seqno);
170 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); 169 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
171 } else { 170 } else {
172 ath_tx_send_normal(sc, txq, tid, &bf_head); 171 ath_tx_send_normal(sc, txq, NULL, &bf_head);
173 } 172 }
174 spin_lock_bh(&txq->axq_lock); 173 spin_lock_bh(&txq->axq_lock);
175 } 174 }
@@ -297,7 +296,6 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
297 296
298 ATH_TXBUF_RESET(tbf); 297 ATH_TXBUF_RESET(tbf);
299 298
300 tbf->aphy = bf->aphy;
301 tbf->bf_mpdu = bf->bf_mpdu; 299 tbf->bf_mpdu = bf->bf_mpdu;
302 tbf->bf_buf_addr = bf->bf_buf_addr; 300 tbf->bf_buf_addr = bf->bf_buf_addr;
303 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len); 301 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
@@ -345,7 +343,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
345 struct ath_node *an = NULL; 343 struct ath_node *an = NULL;
346 struct sk_buff *skb; 344 struct sk_buff *skb;
347 struct ieee80211_sta *sta; 345 struct ieee80211_sta *sta;
348 struct ieee80211_hw *hw; 346 struct ieee80211_hw *hw = sc->hw;
349 struct ieee80211_hdr *hdr; 347 struct ieee80211_hdr *hdr;
350 struct ieee80211_tx_info *tx_info; 348 struct ieee80211_tx_info *tx_info;
351 struct ath_atx_tid *tid = NULL; 349 struct ath_atx_tid *tid = NULL;
@@ -364,7 +362,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
364 hdr = (struct ieee80211_hdr *)skb->data; 362 hdr = (struct ieee80211_hdr *)skb->data;
365 363
366 tx_info = IEEE80211_SKB_CB(skb); 364 tx_info = IEEE80211_SKB_CB(skb);
367 hw = bf->aphy->hw;
368 365
369 memcpy(rates, tx_info->control.rates, sizeof(rates)); 366 memcpy(rates, tx_info->control.rates, sizeof(rates));
370 367
@@ -383,7 +380,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
383 !bf->bf_stale || bf_next != NULL) 380 !bf->bf_stale || bf_next != NULL)
384 list_move_tail(&bf->list, &bf_head); 381 list_move_tail(&bf->list, &bf_head);
385 382
386 ath_tx_rc_status(bf, ts, 1, 1, 0, false); 383 ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
387 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 384 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
388 0, 0); 385 0, 0);
389 386
@@ -429,7 +426,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
429 426
430 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad); 427 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
431 while (bf) { 428 while (bf) {
432 txfail = txpending = 0; 429 txfail = txpending = sendbar = 0;
433 bf_next = bf->bf_next; 430 bf_next = bf->bf_next;
434 431
435 skb = bf->bf_mpdu; 432 skb = bf->bf_mpdu;
@@ -489,10 +486,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
489 486
490 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { 487 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
491 memcpy(tx_info->control.rates, rates, sizeof(rates)); 488 memcpy(tx_info->control.rates, rates, sizeof(rates));
492 ath_tx_rc_status(bf, ts, nframes, nbad, txok, true); 489 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
493 rc_update = false; 490 rc_update = false;
494 } else { 491 } else {
495 ath_tx_rc_status(bf, ts, nframes, nbad, txok, false); 492 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
496 } 493 }
497 494
498 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 495 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
@@ -516,7 +513,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
516 513
517 bf->bf_state.bf_type |= 514 bf->bf_state.bf_type |=
518 BUF_XRETRY; 515 BUF_XRETRY;
519 ath_tx_rc_status(bf, ts, nframes, 516 ath_tx_rc_status(sc, bf, ts, nframes,
520 nbad, 0, false); 517 nbad, 0, false);
521 ath_tx_complete_buf(sc, bf, txq, 518 ath_tx_complete_buf(sc, bf, txq,
522 &bf_head, 519 &bf_head,
@@ -566,8 +563,11 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
566 563
567 rcu_read_unlock(); 564 rcu_read_unlock();
568 565
569 if (needreset) 566 if (needreset) {
567 spin_unlock_bh(&sc->sc_pcu_lock);
570 ath_reset(sc, false); 568 ath_reset(sc, false);
569 spin_lock_bh(&sc->sc_pcu_lock);
570 }
571} 571}
572 572
573static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, 573static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
@@ -856,7 +856,10 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
856 856
857 txtid->state |= AGGR_ADDBA_PROGRESS; 857 txtid->state |= AGGR_ADDBA_PROGRESS;
858 txtid->paused = true; 858 txtid->paused = true;
859 *ssn = txtid->seq_start; 859 *ssn = txtid->seq_start = txtid->seq_next;
860
861 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
862 txtid->baw_head = txtid->baw_tail = 0;
860 863
861 return 0; 864 return 0;
862} 865}
@@ -942,7 +945,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
942 [WME_AC_VI] = ATH_TXQ_AC_VI, 945 [WME_AC_VI] = ATH_TXQ_AC_VI,
943 [WME_AC_VO] = ATH_TXQ_AC_VO, 946 [WME_AC_VO] = ATH_TXQ_AC_VO,
944 }; 947 };
945 int qnum, i; 948 int axq_qnum, i;
946 949
947 memset(&qi, 0, sizeof(qi)); 950 memset(&qi, 0, sizeof(qi));
948 qi.tqi_subtype = subtype_txq_to_hwq[subtype]; 951 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
@@ -976,24 +979,25 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
976 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | 979 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
977 TXQ_FLAG_TXDESCINT_ENABLE; 980 TXQ_FLAG_TXDESCINT_ENABLE;
978 } 981 }
979 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi); 982 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
980 if (qnum == -1) { 983 if (axq_qnum == -1) {
981 /* 984 /*
982 * NB: don't print a message, this happens 985 * NB: don't print a message, this happens
983 * normally on parts with too few tx queues 986 * normally on parts with too few tx queues
984 */ 987 */
985 return NULL; 988 return NULL;
986 } 989 }
987 if (qnum >= ARRAY_SIZE(sc->tx.txq)) { 990 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
988 ath_err(common, "qnum %u out of range, max %zu!\n", 991 ath_err(common, "qnum %u out of range, max %zu!\n",
989 qnum, ARRAY_SIZE(sc->tx.txq)); 992 axq_qnum, ARRAY_SIZE(sc->tx.txq));
990 ath9k_hw_releasetxqueue(ah, qnum); 993 ath9k_hw_releasetxqueue(ah, axq_qnum);
991 return NULL; 994 return NULL;
992 } 995 }
993 if (!ATH_TXQ_SETUP(sc, qnum)) { 996 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
994 struct ath_txq *txq = &sc->tx.txq[qnum]; 997 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
995 998
996 txq->axq_qnum = qnum; 999 txq->axq_qnum = axq_qnum;
1000 txq->mac80211_qnum = -1;
997 txq->axq_link = NULL; 1001 txq->axq_link = NULL;
998 INIT_LIST_HEAD(&txq->axq_q); 1002 INIT_LIST_HEAD(&txq->axq_q);
999 INIT_LIST_HEAD(&txq->axq_acq); 1003 INIT_LIST_HEAD(&txq->axq_acq);
@@ -1001,14 +1005,14 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1001 txq->axq_depth = 0; 1005 txq->axq_depth = 0;
1002 txq->axq_ampdu_depth = 0; 1006 txq->axq_ampdu_depth = 0;
1003 txq->axq_tx_inprogress = false; 1007 txq->axq_tx_inprogress = false;
1004 sc->tx.txqsetup |= 1<<qnum; 1008 sc->tx.txqsetup |= 1<<axq_qnum;
1005 1009
1006 txq->txq_headidx = txq->txq_tailidx = 0; 1010 txq->txq_headidx = txq->txq_tailidx = 0;
1007 for (i = 0; i < ATH_TXFIFO_DEPTH; i++) 1011 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1008 INIT_LIST_HEAD(&txq->txq_fifo[i]); 1012 INIT_LIST_HEAD(&txq->txq_fifo[i]);
1009 INIT_LIST_HEAD(&txq->txq_fifo_pending); 1013 INIT_LIST_HEAD(&txq->txq_fifo_pending);
1010 } 1014 }
1011 return &sc->tx.txq[qnum]; 1015 return &sc->tx.txq[axq_qnum];
1012} 1016}
1013 1017
1014int ath_txq_update(struct ath_softc *sc, int qnum, 1018int ath_txq_update(struct ath_softc *sc, int qnum,
@@ -1051,6 +1055,7 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
1051int ath_cabq_update(struct ath_softc *sc) 1055int ath_cabq_update(struct ath_softc *sc)
1052{ 1056{
1053 struct ath9k_tx_queue_info qi; 1057 struct ath9k_tx_queue_info qi;
1058 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
1054 int qnum = sc->beacon.cabq->axq_qnum; 1059 int qnum = sc->beacon.cabq->axq_qnum;
1055 1060
1056 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi); 1061 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
@@ -1062,7 +1067,7 @@ int ath_cabq_update(struct ath_softc *sc)
1062 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND) 1067 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1063 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND; 1068 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
1064 1069
1065 qi.tqi_readyTime = (sc->beacon_interval * 1070 qi.tqi_readyTime = (cur_conf->beacon_interval *
1066 sc->config.cabqReadytime) / 100; 1071 sc->config.cabqReadytime) / 100;
1067 ath_txq_update(sc, qnum, &qi); 1072 ath_txq_update(sc, qnum, &qi);
1068 1073
@@ -1189,24 +1194,31 @@ bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1189 if (sc->sc_flags & SC_OP_INVALID) 1194 if (sc->sc_flags & SC_OP_INVALID)
1190 return true; 1195 return true;
1191 1196
1192 /* Stop beacon queue */ 1197 ath9k_hw_abort_tx_dma(ah);
1193 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1194 1198
1195 /* Stop data queues */ 1199 /* Check if any queue remains active */
1196 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1200 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1197 if (ATH_TXQ_SETUP(sc, i)) { 1201 if (!ATH_TXQ_SETUP(sc, i))
1198 txq = &sc->tx.txq[i]; 1202 continue;
1199 ath9k_hw_stoptxdma(ah, txq->axq_qnum); 1203
1200 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum); 1204 npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
1201 }
1202 } 1205 }
1203 1206
1204 if (npend) 1207 if (npend)
1205 ath_err(common, "Failed to stop TX DMA!\n"); 1208 ath_err(common, "Failed to stop TX DMA!\n");
1206 1209
1207 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1210 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1208 if (ATH_TXQ_SETUP(sc, i)) 1211 if (!ATH_TXQ_SETUP(sc, i))
1209 ath_draintxq(sc, &sc->tx.txq[i], retry_tx); 1212 continue;
1213
1214 /*
1215 * The caller will resume queues with ieee80211_wake_queues.
1216 * Mark the queue as not stopped to prevent ath_tx_complete
1217 * from waking the queue too early.
1218 */
1219 txq = &sc->tx.txq[i];
1220 txq->stopped = false;
1221 ath_draintxq(sc, txq, retry_tx);
1210 } 1222 }
1211 1223
1212 return !npend; 1224 return !npend;
@@ -1218,46 +1230,59 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1218 sc->tx.txqsetup &= ~(1<<txq->axq_qnum); 1230 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1219} 1231}
1220 1232
1233/* For each axq_acq entry, for each tid, try to schedule packets
1234 * for transmit until ampdu_depth has reached min Q depth.
1235 */
1221void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) 1236void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1222{ 1237{
1223 struct ath_atx_ac *ac; 1238 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1224 struct ath_atx_tid *tid; 1239 struct ath_atx_tid *tid, *last_tid;
1225 1240
1226 if (list_empty(&txq->axq_acq)) 1241 if (list_empty(&txq->axq_acq) ||
1242 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1227 return; 1243 return;
1228 1244
1229 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); 1245 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1230 list_del(&ac->list); 1246 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
1231 ac->sched = false;
1232 1247
1233 do { 1248 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1234 if (list_empty(&ac->tid_q)) 1249 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1235 return; 1250 list_del(&ac->list);
1251 ac->sched = false;
1236 1252
1237 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list); 1253 while (!list_empty(&ac->tid_q)) {
1238 list_del(&tid->list); 1254 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1239 tid->sched = false; 1255 list);
1256 list_del(&tid->list);
1257 tid->sched = false;
1240 1258
1241 if (tid->paused) 1259 if (tid->paused)
1242 continue; 1260 continue;
1243 1261
1244 ath_tx_sched_aggr(sc, txq, tid); 1262 ath_tx_sched_aggr(sc, txq, tid);
1245 1263
1246 /* 1264 /*
1247 * add tid to round-robin queue if more frames 1265 * add tid to round-robin queue if more frames
1248 * are pending for the tid 1266 * are pending for the tid
1249 */ 1267 */
1250 if (!list_empty(&tid->buf_q)) 1268 if (!list_empty(&tid->buf_q))
1251 ath_tx_queue_tid(txq, tid); 1269 ath_tx_queue_tid(txq, tid);
1252 1270
1253 break; 1271 if (tid == last_tid ||
1254 } while (!list_empty(&ac->tid_q)); 1272 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1273 break;
1274 }
1255 1275
1256 if (!list_empty(&ac->tid_q)) { 1276 if (!list_empty(&ac->tid_q)) {
1257 if (!ac->sched) { 1277 if (!ac->sched) {
1258 ac->sched = true; 1278 ac->sched = true;
1259 list_add_tail(&ac->list, &txq->axq_acq); 1279 list_add_tail(&ac->list, &txq->axq_acq);
1280 }
1260 } 1281 }
1282
1283 if (ac == last_ac ||
1284 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1285 return;
1261 } 1286 }
1262} 1287}
1263 1288
@@ -1301,6 +1326,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1301 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]); 1326 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1302 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]); 1327 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1303 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH); 1328 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
1329 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1304 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 1330 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1305 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n", 1331 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1306 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc); 1332 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
@@ -1308,6 +1334,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1308 list_splice_tail_init(head, &txq->axq_q); 1334 list_splice_tail_init(head, &txq->axq_q);
1309 1335
1310 if (txq->axq_link == NULL) { 1336 if (txq->axq_link == NULL) {
1337 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1311 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 1338 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1312 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n", 1339 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1313 txq->axq_qnum, ito64(bf->bf_daddr), 1340 txq->axq_qnum, ito64(bf->bf_daddr),
@@ -1321,6 +1348,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1321 } 1348 }
1322 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc, 1349 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1323 &txq->axq_link); 1350 &txq->axq_link);
1351 TX_STAT_INC(txq->axq_qnum, txstart);
1324 ath9k_hw_txstart(ah, txq->axq_qnum); 1352 ath9k_hw_txstart(ah, txq->axq_qnum);
1325 } 1353 }
1326 txq->axq_depth++; 1354 txq->axq_depth++;
@@ -1335,7 +1363,6 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1335 struct list_head bf_head; 1363 struct list_head bf_head;
1336 1364
1337 bf->bf_state.bf_type |= BUF_AMPDU; 1365 bf->bf_state.bf_type |= BUF_AMPDU;
1338 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
1339 1366
1340 /* 1367 /*
1341 * Do not queue to h/w when any of the following conditions is true: 1368 * Do not queue to h/w when any of the following conditions is true:
@@ -1351,6 +1378,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1351 * Add this frame to software queue for scheduling later 1378 * Add this frame to software queue for scheduling later
1352 * for aggregation. 1379 * for aggregation.
1353 */ 1380 */
1381 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
1354 list_add_tail(&bf->list, &tid->buf_q); 1382 list_add_tail(&bf->list, &tid->buf_q);
1355 ath_tx_queue_tid(txctl->txq, tid); 1383 ath_tx_queue_tid(txctl->txq, tid);
1356 return; 1384 return;
@@ -1364,6 +1392,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1364 ath_tx_addto_baw(sc, tid, fi->seqno); 1392 ath_tx_addto_baw(sc, tid, fi->seqno);
1365 1393
1366 /* Queue to h/w without aggregation */ 1394 /* Queue to h/w without aggregation */
1395 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
1367 bf->bf_lastbf = bf; 1396 bf->bf_lastbf = bf;
1368 ath_buf_set_rate(sc, bf, fi->framelen); 1397 ath_buf_set_rate(sc, bf, fi->framelen);
1369 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head); 1398 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
@@ -1416,8 +1445,7 @@ static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1416static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb, 1445static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1417 int framelen) 1446 int framelen)
1418{ 1447{
1419 struct ath_wiphy *aphy = hw->priv; 1448 struct ath_softc *sc = hw->priv;
1420 struct ath_softc *sc = aphy->sc;
1421 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1449 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1422 struct ieee80211_sta *sta = tx_info->control.sta; 1450 struct ieee80211_sta *sta = tx_info->control.sta;
1423 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; 1451 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
@@ -1635,8 +1663,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
1635 struct ath_txq *txq, 1663 struct ath_txq *txq,
1636 struct sk_buff *skb) 1664 struct sk_buff *skb)
1637{ 1665{
1638 struct ath_wiphy *aphy = hw->priv; 1666 struct ath_softc *sc = hw->priv;
1639 struct ath_softc *sc = aphy->sc;
1640 struct ath_hw *ah = sc->sc_ah; 1667 struct ath_hw *ah = sc->sc_ah;
1641 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1668 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1642 struct ath_frame_info *fi = get_frame_info(skb); 1669 struct ath_frame_info *fi = get_frame_info(skb);
@@ -1652,7 +1679,6 @@ static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
1652 1679
1653 ATH_TXBUF_RESET(bf); 1680 ATH_TXBUF_RESET(bf);
1654 1681
1655 bf->aphy = aphy;
1656 bf->bf_flags = setup_tx_flags(skb); 1682 bf->bf_flags = setup_tx_flags(skb);
1657 bf->bf_mpdu = skb; 1683 bf->bf_mpdu = skb;
1658 1684
@@ -1741,8 +1767,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1741 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1767 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1742 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1768 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1743 struct ieee80211_sta *sta = info->control.sta; 1769 struct ieee80211_sta *sta = info->control.sta;
1744 struct ath_wiphy *aphy = hw->priv; 1770 struct ath_softc *sc = hw->priv;
1745 struct ath_softc *sc = aphy->sc;
1746 struct ath_txq *txq = txctl->txq; 1771 struct ath_txq *txq = txctl->txq;
1747 struct ath_buf *bf; 1772 struct ath_buf *bf;
1748 int padpos, padsize; 1773 int padpos, padsize;
@@ -1794,7 +1819,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1794 spin_lock_bh(&txq->axq_lock); 1819 spin_lock_bh(&txq->axq_lock);
1795 if (txq == sc->tx.txq_map[q] && 1820 if (txq == sc->tx.txq_map[q] &&
1796 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) { 1821 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
1797 ath_mac80211_stop_queue(sc, q); 1822 ieee80211_stop_queue(sc->hw, q);
1798 txq->stopped = 1; 1823 txq->stopped = 1;
1799 } 1824 }
1800 spin_unlock_bh(&txq->axq_lock); 1825 spin_unlock_bh(&txq->axq_lock);
@@ -1809,8 +1834,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1809/*****************/ 1834/*****************/
1810 1835
1811static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, 1836static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1812 struct ath_wiphy *aphy, int tx_flags, int ftype, 1837 int tx_flags, int ftype, struct ath_txq *txq)
1813 struct ath_txq *txq)
1814{ 1838{
1815 struct ieee80211_hw *hw = sc->hw; 1839 struct ieee80211_hw *hw = sc->hw;
1816 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1840 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -1820,9 +1844,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1820 1844
1821 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb); 1845 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
1822 1846
1823 if (aphy)
1824 hw = aphy->hw;
1825
1826 if (tx_flags & ATH_TX_BAR) 1847 if (tx_flags & ATH_TX_BAR)
1827 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; 1848 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1828 1849
@@ -1852,19 +1873,20 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1852 PS_WAIT_FOR_TX_ACK)); 1873 PS_WAIT_FOR_TX_ACK));
1853 } 1874 }
1854 1875
1855 if (unlikely(ftype)) 1876 q = skb_get_queue_mapping(skb);
1856 ath9k_tx_status(hw, skb, ftype); 1877 if (txq == sc->tx.txq_map[q]) {
1857 else { 1878 spin_lock_bh(&txq->axq_lock);
1858 q = skb_get_queue_mapping(skb); 1879 if (WARN_ON(--txq->pending_frames < 0))
1859 if (txq == sc->tx.txq_map[q]) { 1880 txq->pending_frames = 0;
1860 spin_lock_bh(&txq->axq_lock);
1861 if (WARN_ON(--txq->pending_frames < 0))
1862 txq->pending_frames = 0;
1863 spin_unlock_bh(&txq->axq_lock);
1864 }
1865 1881
1866 ieee80211_tx_status(hw, skb); 1882 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1883 ieee80211_wake_queue(sc->hw, q);
1884 txq->stopped = 0;
1885 }
1886 spin_unlock_bh(&txq->axq_lock);
1867 } 1887 }
1888
1889 ieee80211_tx_status(hw, skb);
1868} 1890}
1869 1891
1870static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 1892static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
@@ -1896,8 +1918,8 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1896 else 1918 else
1897 complete(&sc->paprd_complete); 1919 complete(&sc->paprd_complete);
1898 } else { 1920 } else {
1899 ath_debug_stat_tx(sc, bf, ts); 1921 ath_debug_stat_tx(sc, bf, ts, txq);
1900 ath_tx_complete(sc, skb, bf->aphy, tx_flags, 1922 ath_tx_complete(sc, skb, tx_flags,
1901 bf->bf_state.bfs_ftype, txq); 1923 bf->bf_state.bfs_ftype, txq);
1902 } 1924 }
1903 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't 1925 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
@@ -1913,14 +1935,14 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1913 spin_unlock_irqrestore(&sc->tx.txbuflock, flags); 1935 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1914} 1936}
1915 1937
1916static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts, 1938static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
1917 int nframes, int nbad, int txok, bool update_rc) 1939 struct ath_tx_status *ts, int nframes, int nbad,
1940 int txok, bool update_rc)
1918{ 1941{
1919 struct sk_buff *skb = bf->bf_mpdu; 1942 struct sk_buff *skb = bf->bf_mpdu;
1920 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1943 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1921 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1944 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1922 struct ieee80211_hw *hw = bf->aphy->hw; 1945 struct ieee80211_hw *hw = sc->hw;
1923 struct ath_softc *sc = bf->aphy->sc;
1924 struct ath_hw *ah = sc->sc_ah; 1946 struct ath_hw *ah = sc->sc_ah;
1925 u8 i, tx_rateindex; 1947 u8 i, tx_rateindex;
1926 1948
@@ -1971,19 +1993,6 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
1971 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; 1993 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
1972} 1994}
1973 1995
1974static void ath_wake_mac80211_queue(struct ath_softc *sc, int qnum)
1975{
1976 struct ath_txq *txq;
1977
1978 txq = sc->tx.txq_map[qnum];
1979 spin_lock_bh(&txq->axq_lock);
1980 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1981 if (ath_mac80211_start_queue(sc, qnum))
1982 txq->stopped = 0;
1983 }
1984 spin_unlock_bh(&txq->axq_lock);
1985}
1986
1987static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) 1996static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1988{ 1997{
1989 struct ath_hw *ah = sc->sc_ah; 1998 struct ath_hw *ah = sc->sc_ah;
@@ -1994,7 +2003,6 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1994 struct ath_tx_status ts; 2003 struct ath_tx_status ts;
1995 int txok; 2004 int txok;
1996 int status; 2005 int status;
1997 int qnum;
1998 2006
1999 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n", 2007 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2000 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), 2008 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
@@ -2004,6 +2012,8 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2004 spin_lock_bh(&txq->axq_lock); 2012 spin_lock_bh(&txq->axq_lock);
2005 if (list_empty(&txq->axq_q)) { 2013 if (list_empty(&txq->axq_q)) {
2006 txq->axq_link = NULL; 2014 txq->axq_link = NULL;
2015 if (sc->sc_flags & SC_OP_TXAGGR)
2016 ath_txq_schedule(sc, txq);
2007 spin_unlock_bh(&txq->axq_lock); 2017 spin_unlock_bh(&txq->axq_lock);
2008 break; 2018 break;
2009 } 2019 }
@@ -2038,6 +2048,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2038 spin_unlock_bh(&txq->axq_lock); 2048 spin_unlock_bh(&txq->axq_lock);
2039 break; 2049 break;
2040 } 2050 }
2051 TX_STAT_INC(txq->axq_qnum, txprocdesc);
2041 2052
2042 /* 2053 /*
2043 * Remove ath_buf's of the same transmit unit from txq, 2054 * Remove ath_buf's of the same transmit unit from txq,
@@ -2058,6 +2069,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2058 2069
2059 if (bf_is_ampdu_not_probing(bf)) 2070 if (bf_is_ampdu_not_probing(bf))
2060 txq->axq_ampdu_depth--; 2071 txq->axq_ampdu_depth--;
2072
2061 spin_unlock_bh(&txq->axq_lock); 2073 spin_unlock_bh(&txq->axq_lock);
2062 2074
2063 if (bf_held) 2075 if (bf_held)
@@ -2070,27 +2082,45 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2070 */ 2082 */
2071 if (ts.ts_status & ATH9K_TXERR_XRETRY) 2083 if (ts.ts_status & ATH9K_TXERR_XRETRY)
2072 bf->bf_state.bf_type |= BUF_XRETRY; 2084 bf->bf_state.bf_type |= BUF_XRETRY;
2073 ath_tx_rc_status(bf, &ts, 1, txok ? 0 : 1, txok, true); 2085 ath_tx_rc_status(sc, bf, &ts, 1, txok ? 0 : 1, txok, true);
2074 } 2086 }
2075 2087
2076 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2077
2078 if (bf_isampdu(bf)) 2088 if (bf_isampdu(bf))
2079 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok, 2089 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
2080 true); 2090 true);
2081 else 2091 else
2082 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0); 2092 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
2083 2093
2084 if (txq == sc->tx.txq_map[qnum])
2085 ath_wake_mac80211_queue(sc, qnum);
2086
2087 spin_lock_bh(&txq->axq_lock); 2094 spin_lock_bh(&txq->axq_lock);
2095
2088 if (sc->sc_flags & SC_OP_TXAGGR) 2096 if (sc->sc_flags & SC_OP_TXAGGR)
2089 ath_txq_schedule(sc, txq); 2097 ath_txq_schedule(sc, txq);
2090 spin_unlock_bh(&txq->axq_lock); 2098 spin_unlock_bh(&txq->axq_lock);
2091 } 2099 }
2092} 2100}
2093 2101
2102static void ath_hw_pll_work(struct work_struct *work)
2103{
2104 struct ath_softc *sc = container_of(work, struct ath_softc,
2105 hw_pll_work.work);
2106 static int count;
2107
2108 if (AR_SREV_9485(sc->sc_ah)) {
2109 if (ar9003_get_pll_sqsum_dvc(sc->sc_ah) >= 0x40000) {
2110 count++;
2111
2112 if (count == 3) {
2113 /* Rx is hung for more than 500ms. Reset it */
2114 ath_reset(sc, true);
2115 count = 0;
2116 }
2117 } else
2118 count = 0;
2119
2120 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/5);
2121 }
2122}
2123
2094static void ath_tx_complete_poll_work(struct work_struct *work) 2124static void ath_tx_complete_poll_work(struct work_struct *work)
2095{ 2125{
2096 struct ath_softc *sc = container_of(work, struct ath_softc, 2126 struct ath_softc *sc = container_of(work, struct ath_softc,
@@ -2098,6 +2128,9 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
2098 struct ath_txq *txq; 2128 struct ath_txq *txq;
2099 int i; 2129 int i;
2100 bool needreset = false; 2130 bool needreset = false;
2131#ifdef CONFIG_ATH9K_DEBUGFS
2132 sc->tx_complete_poll_work_seen++;
2133#endif
2101 2134
2102 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 2135 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2103 if (ATH_TXQ_SETUP(sc, i)) { 2136 if (ATH_TXQ_SETUP(sc, i)) {
@@ -2111,6 +2144,33 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
2111 } else { 2144 } else {
2112 txq->axq_tx_inprogress = true; 2145 txq->axq_tx_inprogress = true;
2113 } 2146 }
2147 } else {
2148 /* If the queue has pending buffers, then it
2149 * should be doing tx work (and have axq_depth).
2150 * Shouldn't get to this state I think..but
2151 * we do.
2152 */
2153 if (!(sc->sc_flags & (SC_OP_OFFCHANNEL)) &&
2154 (txq->pending_frames > 0 ||
2155 !list_empty(&txq->axq_acq) ||
2156 txq->stopped)) {
2157 ath_err(ath9k_hw_common(sc->sc_ah),
2158 "txq: %p axq_qnum: %u,"
2159 " mac80211_qnum: %i"
2160 " axq_link: %p"
2161 " pending frames: %i"
2162 " axq_acq empty: %i"
2163 " stopped: %i"
2164 " axq_depth: 0 Attempting to"
2165 " restart tx logic.\n",
2166 txq, txq->axq_qnum,
2167 txq->mac80211_qnum,
2168 txq->axq_link,
2169 txq->pending_frames,
2170 list_empty(&txq->axq_acq),
2171 txq->stopped);
2172 ath_txq_schedule(sc, txq);
2173 }
2114 } 2174 }
2115 spin_unlock_bh(&txq->axq_lock); 2175 spin_unlock_bh(&txq->axq_lock);
2116 } 2176 }
@@ -2150,7 +2210,6 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
2150 struct list_head bf_head; 2210 struct list_head bf_head;
2151 int status; 2211 int status;
2152 int txok; 2212 int txok;
2153 int qnum;
2154 2213
2155 for (;;) { 2214 for (;;) {
2156 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs); 2215 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
@@ -2193,11 +2252,9 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
2193 if (!bf_isampdu(bf)) { 2252 if (!bf_isampdu(bf)) {
2194 if (txs.ts_status & ATH9K_TXERR_XRETRY) 2253 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2195 bf->bf_state.bf_type |= BUF_XRETRY; 2254 bf->bf_state.bf_type |= BUF_XRETRY;
2196 ath_tx_rc_status(bf, &txs, 1, txok ? 0 : 1, txok, true); 2255 ath_tx_rc_status(sc, bf, &txs, 1, txok ? 0 : 1, txok, true);
2197 } 2256 }
2198 2257
2199 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2200
2201 if (bf_isampdu(bf)) 2258 if (bf_isampdu(bf))
2202 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, 2259 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
2203 txok, true); 2260 txok, true);
@@ -2205,19 +2262,19 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
2205 ath_tx_complete_buf(sc, bf, txq, &bf_head, 2262 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2206 &txs, txok, 0); 2263 &txs, txok, 0);
2207 2264
2208 if (txq == sc->tx.txq_map[qnum])
2209 ath_wake_mac80211_queue(sc, qnum);
2210
2211 spin_lock_bh(&txq->axq_lock); 2265 spin_lock_bh(&txq->axq_lock);
2266
2212 if (!list_empty(&txq->txq_fifo_pending)) { 2267 if (!list_empty(&txq->txq_fifo_pending)) {
2213 INIT_LIST_HEAD(&bf_head); 2268 INIT_LIST_HEAD(&bf_head);
2214 bf = list_first_entry(&txq->txq_fifo_pending, 2269 bf = list_first_entry(&txq->txq_fifo_pending,
2215 struct ath_buf, list); 2270 struct ath_buf, list);
2216 list_cut_position(&bf_head, &txq->txq_fifo_pending, 2271 list_cut_position(&bf_head,
2217 &bf->bf_lastbf->list); 2272 &txq->txq_fifo_pending,
2273 &bf->bf_lastbf->list);
2218 ath_tx_txqaddbuf(sc, txq, &bf_head); 2274 ath_tx_txqaddbuf(sc, txq, &bf_head);
2219 } else if (sc->sc_flags & SC_OP_TXAGGR) 2275 } else if (sc->sc_flags & SC_OP_TXAGGR)
2220 ath_txq_schedule(sc, txq); 2276 ath_txq_schedule(sc, txq);
2277
2221 spin_unlock_bh(&txq->axq_lock); 2278 spin_unlock_bh(&txq->axq_lock);
2222 } 2279 }
2223} 2280}
@@ -2285,6 +2342,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
2285 } 2342 }
2286 2343
2287 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work); 2344 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2345 INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
2288 2346
2289 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 2347 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2290 error = ath_tx_edma_init(sc); 2348 error = ath_tx_edma_init(sc);
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index d07ff7f2fd92..c6a5fae634a0 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -283,6 +283,7 @@ struct ar9170 {
283 unsigned int mem_blocks; 283 unsigned int mem_blocks;
284 unsigned int mem_block_size; 284 unsigned int mem_block_size;
285 unsigned int rx_size; 285 unsigned int rx_size;
286 unsigned int tx_seq_table;
286 } fw; 287 } fw;
287 288
288 /* reset / stuck frames/queue detection */ 289 /* reset / stuck frames/queue detection */
@@ -533,7 +534,7 @@ void carl9170_rx(struct ar9170 *ar, void *buf, unsigned int len);
533void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len); 534void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len);
534 535
535/* TX */ 536/* TX */
536int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb); 537void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
537void carl9170_tx_janitor(struct work_struct *work); 538void carl9170_tx_janitor(struct work_struct *work);
538void carl9170_tx_process_status(struct ar9170 *ar, 539void carl9170_tx_process_status(struct ar9170 *ar,
539 const struct carl9170_rsp *cmd); 540 const struct carl9170_rsp *cmd);
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index 546b4e4ec5ea..9517ede9e2df 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -150,6 +150,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
150 const struct carl9170fw_otus_desc *otus_desc; 150 const struct carl9170fw_otus_desc *otus_desc;
151 const struct carl9170fw_chk_desc *chk_desc; 151 const struct carl9170fw_chk_desc *chk_desc;
152 const struct carl9170fw_last_desc *last_desc; 152 const struct carl9170fw_last_desc *last_desc;
153 const struct carl9170fw_txsq_desc *txsq_desc;
153 154
154 last_desc = carl9170_fw_find_desc(ar, LAST_MAGIC, 155 last_desc = carl9170_fw_find_desc(ar, LAST_MAGIC,
155 sizeof(*last_desc), CARL9170FW_LAST_DESC_CUR_VER); 156 sizeof(*last_desc), CARL9170FW_LAST_DESC_CUR_VER);
@@ -264,6 +265,9 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
264 FIF_PROMISC_IN_BSS; 265 FIF_PROMISC_IN_BSS;
265 } 266 }
266 267
268 if (SUPP(CARL9170FW_WOL))
269 device_set_wakeup_enable(&ar->udev->dev, true);
270
267 ar->fw.vif_num = otus_desc->vif_num; 271 ar->fw.vif_num = otus_desc->vif_num;
268 ar->fw.cmd_bufs = otus_desc->cmd_bufs; 272 ar->fw.cmd_bufs = otus_desc->cmd_bufs;
269 ar->fw.address = le32_to_cpu(otus_desc->fw_address); 273 ar->fw.address = le32_to_cpu(otus_desc->fw_address);
@@ -296,6 +300,17 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
296 } 300 }
297 } 301 }
298 302
303 txsq_desc = carl9170_fw_find_desc(ar, TXSQ_MAGIC,
304 sizeof(*txsq_desc), CARL9170FW_TXSQ_DESC_CUR_VER);
305
306 if (txsq_desc) {
307 ar->fw.tx_seq_table = le32_to_cpu(txsq_desc->seq_table_addr);
308 if (!valid_cpu_addr(ar->fw.tx_seq_table))
309 return -EINVAL;
310 } else {
311 ar->fw.tx_seq_table = 0;
312 }
313
299#undef SUPPORTED 314#undef SUPPORTED
300 return 0; 315 return 0;
301} 316}
diff --git a/drivers/net/wireless/ath/carl9170/fwcmd.h b/drivers/net/wireless/ath/carl9170/fwcmd.h
index 3680dfc70f46..30449d21b762 100644
--- a/drivers/net/wireless/ath/carl9170/fwcmd.h
+++ b/drivers/net/wireless/ath/carl9170/fwcmd.h
@@ -167,6 +167,7 @@ struct carl9170_rx_filter_cmd {
167#define CARL9170_RX_FILTER_CTL_BACKR 0x20 167#define CARL9170_RX_FILTER_CTL_BACKR 0x20
168#define CARL9170_RX_FILTER_MGMT 0x40 168#define CARL9170_RX_FILTER_MGMT 0x40
169#define CARL9170_RX_FILTER_DATA 0x80 169#define CARL9170_RX_FILTER_DATA 0x80
170#define CARL9170_RX_FILTER_EVERYTHING (~0)
170 171
171struct carl9170_bcn_ctrl_cmd { 172struct carl9170_bcn_ctrl_cmd {
172 __le32 vif_id; 173 __le32 vif_id;
diff --git a/drivers/net/wireless/ath/carl9170/fwdesc.h b/drivers/net/wireless/ath/carl9170/fwdesc.h
index 71f3821f6058..921066822dd5 100644
--- a/drivers/net/wireless/ath/carl9170/fwdesc.h
+++ b/drivers/net/wireless/ath/carl9170/fwdesc.h
@@ -69,6 +69,9 @@ enum carl9170fw_feature_list {
69 /* Firmware RX filter | CARL9170_CMD_RX_FILTER */ 69 /* Firmware RX filter | CARL9170_CMD_RX_FILTER */
70 CARL9170FW_RX_FILTER, 70 CARL9170FW_RX_FILTER,
71 71
72 /* Wake up on WLAN */
73 CARL9170FW_WOL,
74
72 /* KEEP LAST */ 75 /* KEEP LAST */
73 __CARL9170FW_FEATURE_NUM 76 __CARL9170FW_FEATURE_NUM
74}; 77};
@@ -78,6 +81,7 @@ enum carl9170fw_feature_list {
78#define FIX_MAGIC "FIX\0" 81#define FIX_MAGIC "FIX\0"
79#define DBG_MAGIC "DBG\0" 82#define DBG_MAGIC "DBG\0"
80#define CHK_MAGIC "CHK\0" 83#define CHK_MAGIC "CHK\0"
84#define TXSQ_MAGIC "TXSQ"
81#define LAST_MAGIC "LAST" 85#define LAST_MAGIC "LAST"
82 86
83#define CARL9170FW_SET_DAY(d) (((d) - 1) % 31) 87#define CARL9170FW_SET_DAY(d) (((d) - 1) % 31)
@@ -88,8 +92,10 @@ enum carl9170fw_feature_list {
88#define CARL9170FW_GET_MONTH(m) ((((m) / 31) % 12) + 1) 92#define CARL9170FW_GET_MONTH(m) ((((m) / 31) % 12) + 1)
89#define CARL9170FW_GET_YEAR(y) ((y) / 372 + 10) 93#define CARL9170FW_GET_YEAR(y) ((y) / 372 + 10)
90 94
95#define CARL9170FW_MAGIC_SIZE 4
96
91struct carl9170fw_desc_head { 97struct carl9170fw_desc_head {
92 u8 magic[4]; 98 u8 magic[CARL9170FW_MAGIC_SIZE];
93 __le16 length; 99 __le16 length;
94 u8 min_ver; 100 u8 min_ver;
95 u8 cur_ver; 101 u8 cur_ver;
@@ -170,6 +176,16 @@ struct carl9170fw_chk_desc {
170#define CARL9170FW_CHK_DESC_SIZE \ 176#define CARL9170FW_CHK_DESC_SIZE \
171 (sizeof(struct carl9170fw_chk_desc)) 177 (sizeof(struct carl9170fw_chk_desc))
172 178
179#define CARL9170FW_TXSQ_DESC_MIN_VER 1
180#define CARL9170FW_TXSQ_DESC_CUR_VER 1
181struct carl9170fw_txsq_desc {
182 struct carl9170fw_desc_head head;
183
184 __le32 seq_table_addr;
185} __packed;
186#define CARL9170FW_TXSQ_DESC_SIZE \
187 (sizeof(struct carl9170fw_txsq_desc))
188
173#define CARL9170FW_LAST_DESC_MIN_VER 1 189#define CARL9170FW_LAST_DESC_MIN_VER 1
174#define CARL9170FW_LAST_DESC_CUR_VER 2 190#define CARL9170FW_LAST_DESC_CUR_VER 2
175struct carl9170fw_last_desc { 191struct carl9170fw_last_desc {
@@ -189,8 +205,8 @@ struct carl9170fw_last_desc {
189 } 205 }
190 206
191static inline void carl9170fw_fill_desc(struct carl9170fw_desc_head *head, 207static inline void carl9170fw_fill_desc(struct carl9170fw_desc_head *head,
192 u8 magic[4], __le16 length, 208 u8 magic[CARL9170FW_MAGIC_SIZE],
193 u8 min_ver, u8 cur_ver) 209 __le16 length, u8 min_ver, u8 cur_ver)
194{ 210{
195 head->magic[0] = magic[0]; 211 head->magic[0] = magic[0];
196 head->magic[1] = magic[1]; 212 head->magic[1] = magic[1];
@@ -204,7 +220,7 @@ static inline void carl9170fw_fill_desc(struct carl9170fw_desc_head *head,
204 220
205#define carl9170fw_for_each_hdr(desc, fw_desc) \ 221#define carl9170fw_for_each_hdr(desc, fw_desc) \
206 for (desc = fw_desc; \ 222 for (desc = fw_desc; \
207 memcmp(desc->magic, LAST_MAGIC, 4) && \ 223 memcmp(desc->magic, LAST_MAGIC, CARL9170FW_MAGIC_SIZE) && \
208 le16_to_cpu(desc->length) >= CARL9170FW_DESC_HEAD_SIZE && \ 224 le16_to_cpu(desc->length) >= CARL9170FW_DESC_HEAD_SIZE && \
209 le16_to_cpu(desc->length) < CARL9170FW_DESC_MAX_LENGTH; \ 225 le16_to_cpu(desc->length) < CARL9170FW_DESC_MAX_LENGTH; \
210 desc = (void *)((unsigned long)desc + le16_to_cpu(desc->length))) 226 desc = (void *)((unsigned long)desc + le16_to_cpu(desc->length)))
@@ -218,8 +234,8 @@ static inline bool carl9170fw_supports(__le32 list, u8 feature)
218} 234}
219 235
220static inline bool carl9170fw_desc_cmp(const struct carl9170fw_desc_head *head, 236static inline bool carl9170fw_desc_cmp(const struct carl9170fw_desc_head *head,
221 const u8 descid[4], u16 min_len, 237 const u8 descid[CARL9170FW_MAGIC_SIZE],
222 u8 compatible_revision) 238 u16 min_len, u8 compatible_revision)
223{ 239{
224 if (descid[0] == head->magic[0] && descid[1] == head->magic[1] && 240 if (descid[0] == head->magic[0] && descid[1] == head->magic[1] &&
225 descid[2] == head->magic[2] && descid[3] == head->magic[3] && 241 descid[2] == head->magic[2] && descid[3] == head->magic[3] &&
diff --git a/drivers/net/wireless/ath/carl9170/hw.h b/drivers/net/wireless/ath/carl9170/hw.h
index e85df6edfed3..4e30762dd903 100644
--- a/drivers/net/wireless/ath/carl9170/hw.h
+++ b/drivers/net/wireless/ath/carl9170/hw.h
@@ -463,6 +463,8 @@
463 463
464#define AR9170_PWR_REG_CHIP_REVISION (AR9170_PWR_REG_BASE + 0x010) 464#define AR9170_PWR_REG_CHIP_REVISION (AR9170_PWR_REG_BASE + 0x010)
465#define AR9170_PWR_REG_PLL_ADDAC (AR9170_PWR_REG_BASE + 0x014) 465#define AR9170_PWR_REG_PLL_ADDAC (AR9170_PWR_REG_BASE + 0x014)
466#define AR9170_PWR_PLL_ADDAC_DIV_S 2
467#define AR9170_PWR_PLL_ADDAC_DIV 0xffc
466#define AR9170_PWR_REG_WATCH_DOG_MAGIC (AR9170_PWR_REG_BASE + 0x020) 468#define AR9170_PWR_REG_WATCH_DOG_MAGIC (AR9170_PWR_REG_BASE + 0x020)
467 469
468/* Faraday USB Controller */ 470/* Faraday USB Controller */
@@ -471,6 +473,9 @@
471#define AR9170_USB_REG_MAIN_CTRL (AR9170_USB_REG_BASE + 0x000) 473#define AR9170_USB_REG_MAIN_CTRL (AR9170_USB_REG_BASE + 0x000)
472#define AR9170_USB_MAIN_CTRL_REMOTE_WAKEUP BIT(0) 474#define AR9170_USB_MAIN_CTRL_REMOTE_WAKEUP BIT(0)
473#define AR9170_USB_MAIN_CTRL_ENABLE_GLOBAL_INT BIT(2) 475#define AR9170_USB_MAIN_CTRL_ENABLE_GLOBAL_INT BIT(2)
476#define AR9170_USB_MAIN_CTRL_GO_TO_SUSPEND BIT(3)
477#define AR9170_USB_MAIN_CTRL_RESET BIT(4)
478#define AR9170_USB_MAIN_CTRL_CHIP_ENABLE BIT(5)
474#define AR9170_USB_MAIN_CTRL_HIGHSPEED BIT(6) 479#define AR9170_USB_MAIN_CTRL_HIGHSPEED BIT(6)
475 480
476#define AR9170_USB_REG_DEVICE_ADDRESS (AR9170_USB_REG_BASE + 0x001) 481#define AR9170_USB_REG_DEVICE_ADDRESS (AR9170_USB_REG_BASE + 0x001)
@@ -499,6 +504,13 @@
499#define AR9170_USB_REG_INTR_GROUP (AR9170_USB_REG_BASE + 0x020) 504#define AR9170_USB_REG_INTR_GROUP (AR9170_USB_REG_BASE + 0x020)
500 505
501#define AR9170_USB_REG_INTR_SOURCE_0 (AR9170_USB_REG_BASE + 0x021) 506#define AR9170_USB_REG_INTR_SOURCE_0 (AR9170_USB_REG_BASE + 0x021)
507#define AR9170_USB_INTR_SRC0_SETUP BIT(0)
508#define AR9170_USB_INTR_SRC0_IN BIT(1)
509#define AR9170_USB_INTR_SRC0_OUT BIT(2)
510#define AR9170_USB_INTR_SRC0_FAIL BIT(3) /* ??? */
511#define AR9170_USB_INTR_SRC0_END BIT(4) /* ??? */
512#define AR9170_USB_INTR_SRC0_ABORT BIT(7)
513
502#define AR9170_USB_REG_INTR_SOURCE_1 (AR9170_USB_REG_BASE + 0x022) 514#define AR9170_USB_REG_INTR_SOURCE_1 (AR9170_USB_REG_BASE + 0x022)
503#define AR9170_USB_REG_INTR_SOURCE_2 (AR9170_USB_REG_BASE + 0x023) 515#define AR9170_USB_REG_INTR_SOURCE_2 (AR9170_USB_REG_BASE + 0x023)
504#define AR9170_USB_REG_INTR_SOURCE_3 (AR9170_USB_REG_BASE + 0x024) 516#define AR9170_USB_REG_INTR_SOURCE_3 (AR9170_USB_REG_BASE + 0x024)
@@ -506,6 +518,15 @@
506#define AR9170_USB_REG_INTR_SOURCE_5 (AR9170_USB_REG_BASE + 0x026) 518#define AR9170_USB_REG_INTR_SOURCE_5 (AR9170_USB_REG_BASE + 0x026)
507#define AR9170_USB_REG_INTR_SOURCE_6 (AR9170_USB_REG_BASE + 0x027) 519#define AR9170_USB_REG_INTR_SOURCE_6 (AR9170_USB_REG_BASE + 0x027)
508#define AR9170_USB_REG_INTR_SOURCE_7 (AR9170_USB_REG_BASE + 0x028) 520#define AR9170_USB_REG_INTR_SOURCE_7 (AR9170_USB_REG_BASE + 0x028)
521#define AR9170_USB_INTR_SRC7_USB_RESET BIT(1)
522#define AR9170_USB_INTR_SRC7_USB_SUSPEND BIT(2)
523#define AR9170_USB_INTR_SRC7_USB_RESUME BIT(3)
524#define AR9170_USB_INTR_SRC7_ISO_SEQ_ERR BIT(4)
525#define AR9170_USB_INTR_SRC7_ISO_SEQ_ABORT BIT(5)
526#define AR9170_USB_INTR_SRC7_TX0BYTE BIT(6)
527#define AR9170_USB_INTR_SRC7_RX0BYTE BIT(7)
528
529#define AR9170_USB_REG_IDLE_COUNT (AR9170_USB_REG_BASE + 0x02f)
509 530
510#define AR9170_USB_REG_EP_MAP (AR9170_USB_REG_BASE + 0x030) 531#define AR9170_USB_REG_EP_MAP (AR9170_USB_REG_BASE + 0x030)
511#define AR9170_USB_REG_EP1_MAP (AR9170_USB_REG_BASE + 0x030) 532#define AR9170_USB_REG_EP1_MAP (AR9170_USB_REG_BASE + 0x030)
@@ -581,6 +602,10 @@
581 602
582#define AR9170_USB_REG_MAX_AGG_UPLOAD (AR9170_USB_REG_BASE + 0x110) 603#define AR9170_USB_REG_MAX_AGG_UPLOAD (AR9170_USB_REG_BASE + 0x110)
583#define AR9170_USB_REG_UPLOAD_TIME_CTL (AR9170_USB_REG_BASE + 0x114) 604#define AR9170_USB_REG_UPLOAD_TIME_CTL (AR9170_USB_REG_BASE + 0x114)
605
606#define AR9170_USB_REG_WAKE_UP (AR9170_USB_REG_BASE + 0x120)
607#define AR9170_USB_WAKE_UP_WAKE BIT(0)
608
584#define AR9170_USB_REG_CBUS_CTRL (AR9170_USB_REG_BASE + 0x1f0) 609#define AR9170_USB_REG_CBUS_CTRL (AR9170_USB_REG_BASE + 0x1f0)
585#define AR9170_USB_CBUS_CTRL_BUFFER_END (BIT(1)) 610#define AR9170_USB_CBUS_CTRL_BUFFER_END (BIT(1))
586 611
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 870df8c42622..ede3d7e5a048 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -662,6 +662,13 @@ init:
662 goto unlock; 662 goto unlock;
663 } 663 }
664 664
665 if (ar->fw.tx_seq_table) {
666 err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4,
667 0);
668 if (err)
669 goto unlock;
670 }
671
665unlock: 672unlock:
666 if (err && (vif_id >= 0)) { 673 if (err && (vif_id >= 0)) {
667 vif_priv->active = false; 674 vif_priv->active = false;
@@ -1279,7 +1286,7 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
1279 struct ieee80211_vif *vif, 1286 struct ieee80211_vif *vif,
1280 enum ieee80211_ampdu_mlme_action action, 1287 enum ieee80211_ampdu_mlme_action action,
1281 struct ieee80211_sta *sta, 1288 struct ieee80211_sta *sta,
1282 u16 tid, u16 *ssn) 1289 u16 tid, u16 *ssn, u8 buf_size)
1283{ 1290{
1284 struct ar9170 *ar = hw->priv; 1291 struct ar9170 *ar = hw->priv;
1285 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv; 1292 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 6cc58e052d10..0ef70b6fc512 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -862,6 +862,9 @@ static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
862 if (unlikely(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)) 862 if (unlikely(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM))
863 txc->s.misc |= CARL9170_TX_SUPER_MISC_CAB; 863 txc->s.misc |= CARL9170_TX_SUPER_MISC_CAB;
864 864
865 if (unlikely(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
866 txc->s.misc |= CARL9170_TX_SUPER_MISC_ASSIGN_SEQ;
867
865 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) 868 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control)))
866 txc->s.misc |= CARL9170_TX_SUPER_MISC_FILL_IN_TSF; 869 txc->s.misc |= CARL9170_TX_SUPER_MISC_FILL_IN_TSF;
867 870
@@ -1336,7 +1339,7 @@ err_unlock_rcu:
1336 return false; 1339 return false;
1337} 1340}
1338 1341
1339int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 1342void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1340{ 1343{
1341 struct ar9170 *ar = hw->priv; 1344 struct ar9170 *ar = hw->priv;
1342 struct ieee80211_tx_info *info; 1345 struct ieee80211_tx_info *info;
@@ -1370,12 +1373,11 @@ int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1370 } 1373 }
1371 1374
1372 carl9170_tx(ar); 1375 carl9170_tx(ar);
1373 return NETDEV_TX_OK; 1376 return;
1374 1377
1375err_free: 1378err_free:
1376 ar->tx_dropped++; 1379 ar->tx_dropped++;
1377 dev_kfree_skb_any(skb); 1380 dev_kfree_skb_any(skb);
1378 return NETDEV_TX_OK;
1379} 1381}
1380 1382
1381void carl9170_tx_scheduler(struct ar9170 *ar) 1383void carl9170_tx_scheduler(struct ar9170 *ar)
diff --git a/drivers/net/wireless/ath/carl9170/version.h b/drivers/net/wireless/ath/carl9170/version.h
index ee0f84f2a2f6..15095c035169 100644
--- a/drivers/net/wireless/ath/carl9170/version.h
+++ b/drivers/net/wireless/ath/carl9170/version.h
@@ -1,7 +1,7 @@
1#ifndef __CARL9170_SHARED_VERSION_H 1#ifndef __CARL9170_SHARED_VERSION_H
2#define __CARL9170_SHARED_VERSION_H 2#define __CARL9170_SHARED_VERSION_H
3#define CARL9170FW_VERSION_YEAR 10 3#define CARL9170FW_VERSION_YEAR 11
4#define CARL9170FW_VERSION_MONTH 10 4#define CARL9170FW_VERSION_MONTH 1
5#define CARL9170FW_VERSION_DAY 29 5#define CARL9170FW_VERSION_DAY 22
6#define CARL9170FW_VERSION_GIT "1.9.0" 6#define CARL9170FW_VERSION_GIT "1.9.2"
7#endif /* __CARL9170_SHARED_VERSION_H */ 7#endif /* __CARL9170_SHARED_VERSION_H */
diff --git a/drivers/net/wireless/ath/carl9170/wlan.h b/drivers/net/wireless/ath/carl9170/wlan.h
index 24d63b583b6b..9e1324b67e08 100644
--- a/drivers/net/wireless/ath/carl9170/wlan.h
+++ b/drivers/net/wireless/ath/carl9170/wlan.h
@@ -251,7 +251,7 @@ struct carl9170_tx_superdesc {
251 u8 ampdu_commit_factor:1; 251 u8 ampdu_commit_factor:1;
252 u8 ampdu_unused_bit:1; 252 u8 ampdu_unused_bit:1;
253 u8 queue:2; 253 u8 queue:2;
254 u8 reserved:1; 254 u8 assign_seq:1;
255 u8 vif_id:3; 255 u8 vif_id:3;
256 u8 fill_in_tsf:1; 256 u8 fill_in_tsf:1;
257 u8 cab:1; 257 u8 cab:1;
@@ -299,6 +299,7 @@ struct _ar9170_tx_hwdesc {
299 299
300#define CARL9170_TX_SUPER_MISC_QUEUE 0x3 300#define CARL9170_TX_SUPER_MISC_QUEUE 0x3
301#define CARL9170_TX_SUPER_MISC_QUEUE_S 0 301#define CARL9170_TX_SUPER_MISC_QUEUE_S 0
302#define CARL9170_TX_SUPER_MISC_ASSIGN_SEQ 0x4
302#define CARL9170_TX_SUPER_MISC_VIF_ID 0x38 303#define CARL9170_TX_SUPER_MISC_VIF_ID 0x38
303#define CARL9170_TX_SUPER_MISC_VIF_ID_S 3 304#define CARL9170_TX_SUPER_MISC_VIF_ID_S 3
304#define CARL9170_TX_SUPER_MISC_FILL_IN_TSF 0x40 305#define CARL9170_TX_SUPER_MISC_FILL_IN_TSF 0x40
@@ -413,6 +414,23 @@ enum ar9170_txq {
413 __AR9170_NUM_TXQ, 414 __AR9170_NUM_TXQ,
414}; 415};
415 416
417/*
418 * This is an workaround for several undocumented bugs.
419 * Don't mess with the QoS/AC <-> HW Queue map, if you don't
420 * know what you are doing.
421 *
422 * Known problems [hardware]:
423 * * The MAC does not aggregate frames on anything other
424 * than the first HW queue.
425 * * when an AMPDU is placed [in the first hw queue] and
426 * additional frames are already queued on a different
427 * hw queue, the MAC will ALWAYS freeze.
428 *
429 * In a nutshell: The hardware can either do QoS or
430 * Aggregation but not both at the same time. As a
431 * result, this makes the device pretty much useless
432 * for any serious 802.11n setup.
433 */
416static const u8 ar9170_qmap[__AR9170_NUM_TXQ] = { 2, 1, 0, 3 }; 434static const u8 ar9170_qmap[__AR9170_NUM_TXQ] = { 2, 1, 0, 3 };
417 435
418#define AR9170_TXQ_DEPTH 32 436#define AR9170_TXQ_DEPTH 32
diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
index 5d465e5fcf24..37b8e115375a 100644
--- a/drivers/net/wireless/ath/key.c
+++ b/drivers/net/wireless/ath/key.c
@@ -58,8 +58,11 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry)
58 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0); 58 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
59 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), 0); 59 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), 0);
60 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0); 60 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
61 if (common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED) 61 if (common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED) {
62 REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0); 62 REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0);
63 REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
64 AR_KEYTABLE_TYPE_CLR);
65 }
63 66
64 } 67 }
65 68
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 2b14775e6bc6..f828f294ba89 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -158,6 +158,13 @@ ieee80211_regdomain *ath_world_regdomain(struct ath_regulatory *reg)
158 } 158 }
159} 159}
160 160
161bool ath_is_49ghz_allowed(u16 regdomain)
162{
163 /* possibly more */
164 return regdomain == MKK9_MKKC;
165}
166EXPORT_SYMBOL(ath_is_49ghz_allowed);
167
161/* Frequency is one where radar detection is required */ 168/* Frequency is one where radar detection is required */
162static bool ath_is_radar_freq(u16 center_freq) 169static bool ath_is_radar_freq(u16 center_freq)
163{ 170{
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
index 345dd9721b41..172f63f671cf 100644
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -250,6 +250,7 @@ enum CountryCode {
250}; 250};
251 251
252bool ath_is_world_regd(struct ath_regulatory *reg); 252bool ath_is_world_regd(struct ath_regulatory *reg);
253bool ath_is_49ghz_allowed(u16 redomain);
253int ath_regd_init(struct ath_regulatory *reg, struct wiphy *wiphy, 254int ath_regd_init(struct ath_regulatory *reg, struct wiphy *wiphy,
254 int (*reg_notifier)(struct wiphy *wiphy, 255 int (*reg_notifier)(struct wiphy *wiphy,
255 struct regulatory_request *request)); 256 struct regulatory_request *request));
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 47033f6a1c2b..480595f04411 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -92,7 +92,7 @@ config B43_PHY_N
92 ---help--- 92 ---help---
93 Support for the N-PHY. 93 Support for the N-PHY.
94 94
95 This enables support for devices with N-PHY revision up to 2. 95 This enables support for devices with N-PHY.
96 96
97 Say N if you expect high stability and performance. Saying Y will not 97 Say N if you expect high stability and performance. Saying Y will not
98 affect other devices support and may provide support for basic needs. 98 affect other devices support and may provide support for basic needs.
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 22bc9f17f634..57eb5b649730 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -3203,7 +3203,7 @@ static void b43_tx_work(struct work_struct *work)
3203 mutex_unlock(&wl->mutex); 3203 mutex_unlock(&wl->mutex);
3204} 3204}
3205 3205
3206static int b43_op_tx(struct ieee80211_hw *hw, 3206static void b43_op_tx(struct ieee80211_hw *hw,
3207 struct sk_buff *skb) 3207 struct sk_buff *skb)
3208{ 3208{
3209 struct b43_wl *wl = hw_to_b43_wl(hw); 3209 struct b43_wl *wl = hw_to_b43_wl(hw);
@@ -3211,14 +3211,12 @@ static int b43_op_tx(struct ieee80211_hw *hw,
3211 if (unlikely(skb->len < 2 + 2 + 6)) { 3211 if (unlikely(skb->len < 2 + 2 + 6)) {
3212 /* Too short, this can't be a valid frame. */ 3212 /* Too short, this can't be a valid frame. */
3213 dev_kfree_skb_any(skb); 3213 dev_kfree_skb_any(skb);
3214 return NETDEV_TX_OK; 3214 return;
3215 } 3215 }
3216 B43_WARN_ON(skb_shinfo(skb)->nr_frags); 3216 B43_WARN_ON(skb_shinfo(skb)->nr_frags);
3217 3217
3218 skb_queue_tail(&wl->tx_queue, skb); 3218 skb_queue_tail(&wl->tx_queue, skb);
3219 ieee80211_queue_work(wl->hw, &wl->tx_work); 3219 ieee80211_queue_work(wl->hw, &wl->tx_work);
3220
3221 return NETDEV_TX_OK;
3222} 3220}
3223 3221
3224static void b43_qos_params_upload(struct b43_wldev *dev, 3222static void b43_qos_params_upload(struct b43_wldev *dev,
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index ab81ed8b19d7..8a00f9a95dbb 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -430,9 +430,9 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
430 bool workaround = false; 430 bool workaround = false;
431 431
432 if (sprom->revision < 4) 432 if (sprom->revision < 4)
433 workaround = (binfo->vendor != PCI_VENDOR_ID_BROADCOM || 433 workaround = (binfo->vendor != PCI_VENDOR_ID_BROADCOM &&
434 binfo->type != 0x46D || 434 binfo->type == 0x46D &&
435 binfo->rev < 0x41); 435 binfo->rev >= 0x41);
436 else 436 else
437 workaround = 437 workaround =
438 !(sprom->boardflags2_lo & B43_BFL2_RXBB_INT_REG_DIS); 438 !(sprom->boardflags2_lo & B43_BFL2_RXBB_INT_REG_DIS);
@@ -1168,23 +1168,98 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
1168static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev) 1168static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
1169{ 1169{
1170 struct b43_phy_n *nphy = dev->phy.n; 1170 struct b43_phy_n *nphy = dev->phy.n;
1171 struct ssb_sprom *sprom = &(dev->dev->bus->sprom);
1172
1173 /* PHY rev 0, 1, 2 */
1171 u8 i, j; 1174 u8 i, j;
1172 u8 code; 1175 u8 code;
1173 u16 tmp; 1176 u16 tmp;
1177 u8 rfseq_events[3] = { 6, 8, 7 };
1178 u8 rfseq_delays[3] = { 10, 30, 1 };
1174 1179
1175 /* TODO: for PHY >= 3 1180 /* PHY rev >= 3 */
1176 s8 *lna1_gain, *lna2_gain; 1181 bool ghz5;
1177 u8 *gain_db, *gain_bits; 1182 bool ext_lna;
1178 u16 *rfseq_init; 1183 u16 rssi_gain;
1184 struct nphy_gain_ctl_workaround_entry *e;
1179 u8 lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 }; 1185 u8 lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 };
1180 u8 lpf_bits[6] = { 0, 1, 2, 3, 3, 3 }; 1186 u8 lpf_bits[6] = { 0, 1, 2, 3, 3, 3 };
1181 */
1182
1183 u8 rfseq_events[3] = { 6, 8, 7 };
1184 u8 rfseq_delays[3] = { 10, 30, 1 };
1185 1187
1186 if (dev->phy.rev >= 3) { 1188 if (dev->phy.rev >= 3) {
1187 /* TODO */ 1189 /* Prepare values */
1190 ghz5 = b43_phy_read(dev, B43_NPHY_BANDCTL)
1191 & B43_NPHY_BANDCTL_5GHZ;
1192 ext_lna = sprom->boardflags_lo & B43_BFL_EXTLNA;
1193 e = b43_nphy_get_gain_ctl_workaround_ent(dev, ghz5, ext_lna);
1194 if (ghz5 && dev->phy.rev >= 5)
1195 rssi_gain = 0x90;
1196 else
1197 rssi_gain = 0x50;
1198
1199 b43_phy_set(dev, B43_NPHY_RXCTL, 0x0040);
1200
1201 /* Set Clip 2 detect */
1202 b43_phy_set(dev, B43_NPHY_C1_CGAINI,
1203 B43_NPHY_C1_CGAINI_CL2DETECT);
1204 b43_phy_set(dev, B43_NPHY_C2_CGAINI,
1205 B43_NPHY_C2_CGAINI_CL2DETECT);
1206
1207 b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAG1_IDAC,
1208 0x17);
1209 b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAG1_IDAC,
1210 0x17);
1211 b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAG2_IDAC, 0xF0);
1212 b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAG2_IDAC, 0xF0);
1213 b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_POLE, 0x00);
1214 b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_POLE, 0x00);
1215 b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_GAIN,
1216 rssi_gain);
1217 b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_GAIN,
1218 rssi_gain);
1219 b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAA1_IDAC,
1220 0x17);
1221 b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAA1_IDAC,
1222 0x17);
1223 b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAA2_IDAC, 0xFF);
1224 b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAA2_IDAC, 0xFF);
1225
1226 b43_ntab_write_bulk(dev, B43_NTAB8(0, 8), 4, e->lna1_gain);
1227 b43_ntab_write_bulk(dev, B43_NTAB8(1, 8), 4, e->lna1_gain);
1228 b43_ntab_write_bulk(dev, B43_NTAB8(0, 16), 4, e->lna2_gain);
1229 b43_ntab_write_bulk(dev, B43_NTAB8(1, 16), 4, e->lna2_gain);
1230 b43_ntab_write_bulk(dev, B43_NTAB8(0, 32), 10, e->gain_db);
1231 b43_ntab_write_bulk(dev, B43_NTAB8(1, 32), 10, e->gain_db);
1232 b43_ntab_write_bulk(dev, B43_NTAB8(2, 32), 10, e->gain_bits);
1233 b43_ntab_write_bulk(dev, B43_NTAB8(3, 32), 10, e->gain_bits);
1234 b43_ntab_write_bulk(dev, B43_NTAB8(0, 0x40), 6, lpf_gain);
1235 b43_ntab_write_bulk(dev, B43_NTAB8(1, 0x40), 6, lpf_gain);
1236 b43_ntab_write_bulk(dev, B43_NTAB8(2, 0x40), 6, lpf_bits);
1237 b43_ntab_write_bulk(dev, B43_NTAB8(3, 0x40), 6, lpf_bits);
1238
1239 b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain);
1240 b43_phy_write(dev, 0x2A7, e->init_gain);
1241 b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x106), 2,
1242 e->rfseq_init);
1243 b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain);
1244
1245 /* TODO: check defines. Do not match variables names */
1246 b43_phy_write(dev, B43_NPHY_C1_CLIP1_MEDGAIN, e->cliphi_gain);
1247 b43_phy_write(dev, 0x2A9, e->cliphi_gain);
1248 b43_phy_write(dev, B43_NPHY_C1_CLIP2_GAIN, e->clipmd_gain);
1249 b43_phy_write(dev, 0x2AB, e->clipmd_gain);
1250 b43_phy_write(dev, B43_NPHY_C2_CLIP1_HIGAIN, e->cliplo_gain);
1251 b43_phy_write(dev, 0x2AD, e->cliplo_gain);
1252
1253 b43_phy_maskset(dev, 0x27D, 0xFF00, e->crsmin);
1254 b43_phy_maskset(dev, 0x280, 0xFF00, e->crsminl);
1255 b43_phy_maskset(dev, 0x283, 0xFF00, e->crsminu);
1256 b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, e->nbclip);
1257 b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, e->nbclip);
1258 b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
1259 ~B43_NPHY_C1_CLIPWBTHRES_CLIP2, e->wlclip);
1260 b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
1261 ~B43_NPHY_C2_CLIPWBTHRES_CLIP2, e->wlclip);
1262 b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
1188 } else { 1263 } else {
1189 /* Set Clip 2 detect */ 1264 /* Set Clip 2 detect */
1190 b43_phy_set(dev, B43_NPHY_C1_CGAINI, 1265 b43_phy_set(dev, B43_NPHY_C1_CGAINI,
@@ -1281,17 +1356,17 @@ static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
1281 B43_NPHY_TABLE_DATALO, tmp); 1356 B43_NPHY_TABLE_DATALO, tmp);
1282 } 1357 }
1283 } 1358 }
1359 }
1284 1360
1285 b43_nphy_set_rf_sequence(dev, 5, 1361 b43_nphy_set_rf_sequence(dev, 5,
1286 rfseq_events, rfseq_delays, 3); 1362 rfseq_events, rfseq_delays, 3);
1287 b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1, 1363 b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1,
1288 ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF, 1364 ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF,
1289 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT); 1365 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT);
1290 1366
1291 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 1367 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
1292 b43_phy_maskset(dev, B43_PHY_N(0xC5D), 1368 b43_phy_maskset(dev, B43_PHY_N(0xC5D),
1293 0xFF80, 4); 1369 0xFF80, 4);
1294 }
1295 } 1370 }
1296} 1371}
1297 1372
@@ -1308,6 +1383,9 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
1308 u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 }; 1383 u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 };
1309 u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 }; 1384 u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
1310 1385
1386 u16 tmp16;
1387 u32 tmp32;
1388
1311 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) 1389 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
1312 b43_nphy_classifier(dev, 1, 0); 1390 b43_nphy_classifier(dev, 1, 0);
1313 else 1391 else
@@ -1320,7 +1398,82 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
1320 B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2); 1398 B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
1321 1399
1322 if (dev->phy.rev >= 3) { 1400 if (dev->phy.rev >= 3) {
1401 tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0));
1402 tmp32 &= 0xffffff;
1403 b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32);
1404
1405 b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x0125);
1406 b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x01B3);
1407 b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x0105);
1408 b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x016E);
1409 b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0x00CD);
1410 b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x0020);
1411
1412 b43_phy_write(dev, B43_NPHY_C2_CLIP1_MEDGAIN, 0x000C);
1413 b43_phy_write(dev, 0x2AE, 0x000C);
1414
1323 /* TODO */ 1415 /* TODO */
1416
1417 tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ?
1418 0x2 : 0x9C40;
1419 b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, tmp16);
1420
1421 b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700);
1422
1423 b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D);
1424 b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D);
1425
1426 b43_nphy_gain_ctrl_workarounds(dev);
1427
1428 b43_ntab_write(dev, B43_NTAB32(8, 0), 2);
1429 b43_ntab_write(dev, B43_NTAB32(8, 16), 2);
1430
1431 /* TODO */
1432
1433 b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00);
1434 b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00);
1435 b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
1436 b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
1437 b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_AUX, 0x07);
1438 b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_AUX, 0x07);
1439 b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_LOB_BIAS, 0x88);
1440 b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_LOB_BIAS, 0x88);
1441 b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
1442 b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
1443
1444 /* N PHY WAR TX Chain Update with hw_phytxchain as argument */
1445
1446 if ((bus->sprom.boardflags2_lo & B43_BFL2_APLL_WAR &&
1447 b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ||
1448 (bus->sprom.boardflags2_lo & B43_BFL2_GPLL_WAR &&
1449 b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
1450 tmp32 = 0x00088888;
1451 else
1452 tmp32 = 0x88888888;
1453 b43_ntab_write(dev, B43_NTAB32(30, 1), tmp32);
1454 b43_ntab_write(dev, B43_NTAB32(30, 2), tmp32);
1455 b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32);
1456
1457 if (dev->phy.rev == 4 &&
1458 b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
1459 b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC,
1460 0x70);
1461 b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC,
1462 0x70);
1463 }
1464
1465 b43_phy_write(dev, 0x224, 0x039C);
1466 b43_phy_write(dev, 0x225, 0x0357);
1467 b43_phy_write(dev, 0x226, 0x0317);
1468 b43_phy_write(dev, 0x227, 0x02D7);
1469 b43_phy_write(dev, 0x228, 0x039C);
1470 b43_phy_write(dev, 0x229, 0x0357);
1471 b43_phy_write(dev, 0x22A, 0x0317);
1472 b43_phy_write(dev, 0x22B, 0x02D7);
1473 b43_phy_write(dev, 0x22C, 0x039C);
1474 b43_phy_write(dev, 0x22D, 0x0357);
1475 b43_phy_write(dev, 0x22E, 0x0317);
1476 b43_phy_write(dev, 0x22F, 0x02D7);
1324 } else { 1477 } else {
1325 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ && 1478 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
1326 nphy->band5g_pwrgain) { 1479 nphy->band5g_pwrgain) {
@@ -2128,7 +2281,7 @@ static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
2128 save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); 2281 save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
2129 save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S0); 2282 save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S0);
2130 save_regs_phy[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B32S1); 2283 save_regs_phy[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B32S1);
2131 } else if (dev->phy.rev == 2) { 2284 } else {
2132 save_regs_phy[0] = b43_phy_read(dev, B43_NPHY_AFECTL_C1); 2285 save_regs_phy[0] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
2133 save_regs_phy[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2); 2286 save_regs_phy[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
2134 save_regs_phy[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); 2287 save_regs_phy[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
@@ -2179,7 +2332,7 @@ static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
2179 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[5]); 2332 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[5]);
2180 b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, save_regs_phy[6]); 2333 b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, save_regs_phy[6]);
2181 b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, save_regs_phy[7]); 2334 b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, save_regs_phy[7]);
2182 } else if (dev->phy.rev == 2) { 2335 } else {
2183 b43_phy_write(dev, B43_NPHY_AFECTL_C1, save_regs_phy[0]); 2336 b43_phy_write(dev, B43_NPHY_AFECTL_C1, save_regs_phy[0]);
2184 b43_phy_write(dev, B43_NPHY_AFECTL_C2, save_regs_phy[1]); 2337 b43_phy_write(dev, B43_NPHY_AFECTL_C2, save_regs_phy[1]);
2185 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[2]); 2338 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[2]);
@@ -3878,10 +4031,14 @@ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
3878 } 4031 }
3879} 4032}
3880 4033
4034/* http://bcm-v4.sipsolutions.net/802.11/PHY/Anacore */
3881static void b43_nphy_op_switch_analog(struct b43_wldev *dev, bool on) 4035static void b43_nphy_op_switch_analog(struct b43_wldev *dev, bool on)
3882{ 4036{
3883 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, 4037 u16 val = on ? 0 : 0x7FFF;
3884 on ? 0 : 0x7FFF); 4038
4039 if (dev->phy.rev >= 3)
4040 b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, val);
4041 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, val);
3885} 4042}
3886 4043
3887static int b43_nphy_op_switch_channel(struct b43_wldev *dev, 4044static int b43_nphy_op_switch_channel(struct b43_wldev *dev,
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index dc8ef09a8552..2de483b3d3ba 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -1097,6 +1097,1080 @@ static const u32 b43_ntab_tmap[] = {
1097 0x00000000, 0x00000000, 0x00000000, 0x00000000, 1097 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1098}; 1098};
1099 1099
1100/* static tables, PHY revision >= 3 */
1101static const u32 b43_ntab_framestruct_r3[] = {
1102 0x08004a04, 0x00100000, 0x01000a05, 0x00100020,
1103 0x09804506, 0x00100030, 0x09804507, 0x00100030,
1104 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1105 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1106 0x08004a0c, 0x00100004, 0x01000a0d, 0x00100024,
1107 0x0980450e, 0x00100034, 0x0980450f, 0x00100034,
1108 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1109 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1110 0x00000a04, 0x00100000, 0x11008a05, 0x00100020,
1111 0x1980c506, 0x00100030, 0x21810506, 0x00100030,
1112 0x21810506, 0x00100030, 0x01800504, 0x00100030,
1113 0x11808505, 0x00100030, 0x29814507, 0x01100030,
1114 0x00000a04, 0x00100000, 0x11008a05, 0x00100020,
1115 0x21810506, 0x00100030, 0x21810506, 0x00100030,
1116 0x29814507, 0x01100030, 0x00000000, 0x00000000,
1117 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1118 0x00000a0c, 0x00100008, 0x11008a0d, 0x00100028,
1119 0x1980c50e, 0x00100038, 0x2181050e, 0x00100038,
1120 0x2181050e, 0x00100038, 0x0180050c, 0x00100038,
1121 0x1180850d, 0x00100038, 0x2981450f, 0x01100038,
1122 0x00000a0c, 0x00100008, 0x11008a0d, 0x00100028,
1123 0x2181050e, 0x00100038, 0x2181050e, 0x00100038,
1124 0x2981450f, 0x01100038, 0x00000000, 0x00000000,
1125 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1126 0x08004a04, 0x00100000, 0x01000a05, 0x00100020,
1127 0x1980c506, 0x00100030, 0x1980c506, 0x00100030,
1128 0x11808504, 0x00100030, 0x3981ca05, 0x00100030,
1129 0x29814507, 0x01100030, 0x00000000, 0x00000000,
1130 0x10008a04, 0x00100000, 0x3981ca05, 0x00100030,
1131 0x1980c506, 0x00100030, 0x29814507, 0x01100030,
1132 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1133 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1134 0x08004a0c, 0x00100008, 0x01000a0d, 0x00100028,
1135 0x1980c50e, 0x00100038, 0x1980c50e, 0x00100038,
1136 0x1180850c, 0x00100038, 0x3981ca0d, 0x00100038,
1137 0x2981450f, 0x01100038, 0x00000000, 0x00000000,
1138 0x10008a0c, 0x00100008, 0x3981ca0d, 0x00100038,
1139 0x1980c50e, 0x00100038, 0x2981450f, 0x01100038,
1140 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1141 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1142 0x40021404, 0x00100000, 0x02001405, 0x00100040,
1143 0x0b004a06, 0x01900060, 0x13008a06, 0x01900060,
1144 0x13008a06, 0x01900060, 0x43020a04, 0x00100060,
1145 0x1b00ca05, 0x00100060, 0x23010a07, 0x01500060,
1146 0x40021404, 0x00100000, 0x1a00d405, 0x00100040,
1147 0x13008a06, 0x01900060, 0x13008a06, 0x01900060,
1148 0x23010a07, 0x01500060, 0x00000000, 0x00000000,
1149 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1150 0x4002140c, 0x00100010, 0x0200140d, 0x00100050,
1151 0x0b004a0e, 0x01900070, 0x13008a0e, 0x01900070,
1152 0x13008a0e, 0x01900070, 0x43020a0c, 0x00100070,
1153 0x1b00ca0d, 0x00100070, 0x23010a0f, 0x01500070,
1154 0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050,
1155 0x13008a0e, 0x01900070, 0x13008a0e, 0x01900070,
1156 0x23010a0f, 0x01500070, 0x00000000, 0x00000000,
1157 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1158 0x50029404, 0x00100000, 0x32019405, 0x00100040,
1159 0x0b004a06, 0x01900060, 0x0b004a06, 0x01900060,
1160 0x5b02ca04, 0x00100060, 0x3b01d405, 0x00100060,
1161 0x23010a07, 0x01500060, 0x00000000, 0x00000000,
1162 0x5802d404, 0x00100000, 0x3b01d405, 0x00100060,
1163 0x0b004a06, 0x01900060, 0x23010a07, 0x01500060,
1164 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1165 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1166 0x5002940c, 0x00100010, 0x3201940d, 0x00100050,
1167 0x0b004a0e, 0x01900070, 0x0b004a0e, 0x01900070,
1168 0x5b02ca0c, 0x00100070, 0x3b01d40d, 0x00100070,
1169 0x23010a0f, 0x01500070, 0x00000000, 0x00000000,
1170 0x5802d40c, 0x00100010, 0x3b01d40d, 0x00100070,
1171 0x0b004a0e, 0x01900070, 0x23010a0f, 0x01500070,
1172 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1173 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1174 0x40021404, 0x000f4800, 0x62031405, 0x00100040,
1175 0x53028a06, 0x01900060, 0x53028a07, 0x01900060,
1176 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1177 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1178 0x4002140c, 0x000f4808, 0x6203140d, 0x00100048,
1179 0x53028a0e, 0x01900068, 0x53028a0f, 0x01900068,
1180 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1181 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1182 0x00000a0c, 0x00100004, 0x11008a0d, 0x00100024,
1183 0x1980c50e, 0x00100034, 0x2181050e, 0x00100034,
1184 0x2181050e, 0x00100034, 0x0180050c, 0x00100038,
1185 0x1180850d, 0x00100038, 0x1181850d, 0x00100038,
1186 0x2981450f, 0x01100038, 0x00000000, 0x00000000,
1187 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1188 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1189 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1190 0x00000a0c, 0x00100008, 0x11008a0d, 0x00100028,
1191 0x2181050e, 0x00100038, 0x2181050e, 0x00100038,
1192 0x1181850d, 0x00100038, 0x2981450f, 0x01100038,
1193 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1194 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1195 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1196 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1197 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1198 0x08004a04, 0x00100000, 0x01000a05, 0x00100020,
1199 0x0180c506, 0x00100030, 0x0180c506, 0x00100030,
1200 0x2180c50c, 0x00100030, 0x49820a0d, 0x0016a130,
1201 0x41824a0d, 0x0016a130, 0x2981450f, 0x01100030,
1202 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1203 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1204 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1205 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1206 0x2000ca0c, 0x00100000, 0x49820a0d, 0x0016a130,
1207 0x1980c50e, 0x00100030, 0x41824a0d, 0x0016a130,
1208 0x2981450f, 0x01100030, 0x00000000, 0x00000000,
1209 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1210 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1211 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1212 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1213 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1214 0x4002140c, 0x00100008, 0x0200140d, 0x00100048,
1215 0x0b004a0e, 0x01900068, 0x13008a0e, 0x01900068,
1216 0x13008a0e, 0x01900068, 0x43020a0c, 0x00100070,
1217 0x1b00ca0d, 0x00100070, 0x1b014a0d, 0x00100070,
1218 0x23010a0f, 0x01500070, 0x00000000, 0x00000000,
1219 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1220 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1221 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1222 0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050,
1223 0x13008a0e, 0x01900070, 0x13008a0e, 0x01900070,
1224 0x1b014a0d, 0x00100070, 0x23010a0f, 0x01500070,
1225 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1226 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1227 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1228 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1229 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1230 0x50029404, 0x00100000, 0x32019405, 0x00100040,
1231 0x03004a06, 0x01900060, 0x03004a06, 0x01900060,
1232 0x6b030a0c, 0x00100060, 0x4b02140d, 0x0016a160,
1233 0x4302540d, 0x0016a160, 0x23010a0f, 0x01500060,
1234 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1235 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1236 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1237 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1238 0x6b03140c, 0x00100060, 0x4b02140d, 0x0016a160,
1239 0x0b004a0e, 0x01900060, 0x4302540d, 0x0016a160,
1240 0x23010a0f, 0x01500060, 0x00000000, 0x00000000,
1241 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1242 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1243 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1244 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1245 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1246 0x40021404, 0x00100000, 0x1a00d405, 0x00100040,
1247 0x53028a06, 0x01900060, 0x5b02ca06, 0x01900060,
1248 0x5b02ca06, 0x01900060, 0x43020a04, 0x00100060,
1249 0x1b00ca05, 0x00100060, 0x53028a07, 0x0190c060,
1250 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1251 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1252 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1253 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1254 0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050,
1255 0x53028a0e, 0x01900070, 0x5b02ca0e, 0x01900070,
1256 0x5b02ca0e, 0x01900070, 0x43020a0c, 0x00100070,
1257 0x1b00ca0d, 0x00100070, 0x53028a0f, 0x0190c070,
1258 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1259 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1260 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1261 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1262 0x40021404, 0x00100000, 0x1a00d405, 0x00100040,
1263 0x5b02ca06, 0x01900060, 0x5b02ca06, 0x01900060,
1264 0x53028a07, 0x0190c060, 0x00000000, 0x00000000,
1265 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1266 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1267 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1268 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1269 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1270 0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050,
1271 0x5b02ca0e, 0x01900070, 0x5b02ca0e, 0x01900070,
1272 0x53028a0f, 0x0190c070, 0x00000000, 0x00000000,
1273 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1274 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1275 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1276 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1277 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1278 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1279 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1280 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1281 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1282 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1283 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1284 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1285 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1286 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1287 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1288 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1289 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1290 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1291 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1292 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1293 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1294 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1295 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1296 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1297 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1298 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1299 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1300 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1301 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1302 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1303 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1304 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1305 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1306 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1307 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1308 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1309 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1310};
1311
1312static const u16 b43_ntab_pilot_r3[] = {
1313 0xff08, 0xff08, 0xff08, 0xff08, 0xff08, 0xff08,
1314 0xff08, 0xff08, 0x80d5, 0x80d5, 0x80d5, 0x80d5,
1315 0x80d5, 0x80d5, 0x80d5, 0x80d5, 0xff0a, 0xff82,
1316 0xffa0, 0xff28, 0xffff, 0xffff, 0xffff, 0xffff,
1317 0xff82, 0xffa0, 0xff28, 0xff0a, 0xffff, 0xffff,
1318 0xffff, 0xffff, 0xf83f, 0xfa1f, 0xfa97, 0xfab5,
1319 0xf2bd, 0xf0bf, 0xffff, 0xffff, 0xf017, 0xf815,
1320 0xf215, 0xf095, 0xf035, 0xf01d, 0xffff, 0xffff,
1321 0xff08, 0xff02, 0xff80, 0xff20, 0xff08, 0xff02,
1322 0xff80, 0xff20, 0xf01f, 0xf817, 0xfa15, 0xf295,
1323 0xf0b5, 0xf03d, 0xffff, 0xffff, 0xf82a, 0xfa0a,
1324 0xfa82, 0xfaa0, 0xf2a8, 0xf0aa, 0xffff, 0xffff,
1325 0xf002, 0xf800, 0xf200, 0xf080, 0xf020, 0xf008,
1326 0xffff, 0xffff, 0xf00a, 0xf802, 0xfa00, 0xf280,
1327 0xf0a0, 0xf028, 0xffff, 0xffff,
1328};
1329
1330static const u32 b43_ntab_tmap_r3[] = {
1331 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
1332 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
1333 0xf1111110, 0x11111111, 0x11f11111, 0x00000111,
1334 0x11000000, 0x1111f111, 0x11111111, 0x111111f1,
1335 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x000aa888,
1336 0x88880000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
1337 0xa1111110, 0x11111111, 0x11c11111, 0x00000111,
1338 0x11000000, 0x1111a111, 0x11111111, 0x111111a1,
1339 0xa2222220, 0x22222222, 0x22c22222, 0x00000222,
1340 0x22000000, 0x2222a222, 0x22222222, 0x222222a2,
1341 0xf1111110, 0x11111111, 0x11f11111, 0x00011111,
1342 0x11110000, 0x1111f111, 0x11111111, 0x111111f1,
1343 0xa8aa88a0, 0xa88888a8, 0xa8a8a88a, 0x00088aaa,
1344 0xaaaa0000, 0xa8a8aa88, 0xa88aaaaa, 0xaaaa8a8a,
1345 0xaaa8aaa0, 0x8aaa8aaa, 0xaa8a8a8a, 0x000aaa88,
1346 0x8aaa0000, 0xaaa8a888, 0x8aa88a8a, 0x8a88a888,
1347 0x08080a00, 0x0a08080a, 0x080a0a08, 0x00080808,
1348 0x080a0000, 0x080a0808, 0x080a0808, 0x0a0a0a08,
1349 0xa0a0a0a0, 0x80a0a080, 0x8080a0a0, 0x00008080,
1350 0x80a00000, 0x80a080a0, 0xa080a0a0, 0x8080a0a0,
1351 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1352 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1353 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1354 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1355 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1356 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1357 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1358 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1359 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1360 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1361 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1362 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1363 0x99999000, 0x9b9b99bb, 0x9bb99999, 0x9999b9b9,
1364 0x9b99bb90, 0x9bbbbb9b, 0x9b9b9bb9, 0x00000999,
1365 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
1366 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00aaa888,
1367 0x22000000, 0x2222b222, 0x22222222, 0x222222b2,
1368 0xb2222220, 0x22222222, 0x22d22222, 0x00000222,
1369 0x11000000, 0x1111a111, 0x11111111, 0x111111a1,
1370 0xa1111110, 0x11111111, 0x11c11111, 0x00000111,
1371 0x33000000, 0x3333b333, 0x33333333, 0x333333b3,
1372 0xb3333330, 0x33333333, 0x33d33333, 0x00000333,
1373 0x22000000, 0x2222a222, 0x22222222, 0x222222a2,
1374 0xa2222220, 0x22222222, 0x22c22222, 0x00000222,
1375 0x99b99b00, 0x9b9b99bb, 0x9bb99999, 0x9999b9b9,
1376 0x9b99bb99, 0x9bbbbb9b, 0x9b9b9bb9, 0x00000999,
1377 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
1378 0x8a88aa88, 0x8aaaaa8a, 0x8a8a8aa8, 0x08aaa888,
1379 0x22222200, 0x2222f222, 0x22222222, 0x222222f2,
1380 0x22222222, 0x22222222, 0x22f22222, 0x00000222,
1381 0x11000000, 0x1111f111, 0x11111111, 0x11111111,
1382 0xf1111111, 0x11111111, 0x11f11111, 0x01111111,
1383 0xbb9bb900, 0xb9b9bb99, 0xb99bbbbb, 0xbbbb9b9b,
1384 0xb9bb99bb, 0xb99999b9, 0xb9b9b99b, 0x00000bbb,
1385 0xaa000000, 0xa8a8aa88, 0xa88aaaaa, 0xaaaa8a8a,
1386 0xa8aa88aa, 0xa88888a8, 0xa8a8a88a, 0x0a888aaa,
1387 0xaa000000, 0xa8a8aa88, 0xa88aaaaa, 0xaaaa8a8a,
1388 0xa8aa88a0, 0xa88888a8, 0xa8a8a88a, 0x00000aaa,
1389 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
1390 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
1391 0xbbbbbb00, 0x999bbbbb, 0x9bb99b9b, 0xb9b9b9bb,
1392 0xb9b99bbb, 0xb9b9b9bb, 0xb9bb9b99, 0x00000999,
1393 0x8a000000, 0xaa88a888, 0xa88888aa, 0xa88a8a88,
1394 0xa88aa88a, 0x88a8aaaa, 0xa8aa8aaa, 0x0888a88a,
1395 0x0b0b0b00, 0x090b0b0b, 0x0b090b0b, 0x0909090b,
1396 0x09090b0b, 0x09090b0b, 0x09090b09, 0x00000909,
1397 0x0a000000, 0x0a080808, 0x080a080a, 0x080a0a08,
1398 0x080a080a, 0x0808080a, 0x0a0a0a08, 0x0808080a,
1399 0xb0b0b000, 0x9090b0b0, 0x90b09090, 0xb0b0b090,
1400 0xb0b090b0, 0x90b0b0b0, 0xb0b09090, 0x00000090,
1401 0x80000000, 0xa080a080, 0xa08080a0, 0xa0808080,
1402 0xa080a080, 0x80a0a0a0, 0xa0a080a0, 0x00a0a0a0,
1403 0x22000000, 0x2222f222, 0x22222222, 0x222222f2,
1404 0xf2222220, 0x22222222, 0x22f22222, 0x00000222,
1405 0x11000000, 0x1111f111, 0x11111111, 0x111111f1,
1406 0xf1111110, 0x11111111, 0x11f11111, 0x00000111,
1407 0x33000000, 0x3333f333, 0x33333333, 0x333333f3,
1408 0xf3333330, 0x33333333, 0x33f33333, 0x00000333,
1409 0x22000000, 0x2222f222, 0x22222222, 0x222222f2,
1410 0xf2222220, 0x22222222, 0x22f22222, 0x00000222,
1411 0x99000000, 0x9b9b99bb, 0x9bb99999, 0x9999b9b9,
1412 0x9b99bb90, 0x9bbbbb9b, 0x9b9b9bb9, 0x00000999,
1413 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
1414 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
1415 0x88888000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
1416 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
1417 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
1418 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00aaa888,
1419 0x88a88a00, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
1420 0x8a88aa88, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
1421 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
1422 0x8a88aa88, 0x8aaaaa8a, 0x8a8a8aa8, 0x08aaa888,
1423 0x11000000, 0x1111a111, 0x11111111, 0x111111a1,
1424 0xa1111110, 0x11111111, 0x11c11111, 0x00000111,
1425 0x11000000, 0x1111a111, 0x11111111, 0x111111a1,
1426 0xa1111110, 0x11111111, 0x11c11111, 0x00000111,
1427 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
1428 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
1429 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
1430 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
1431 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1432 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1433 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1434 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1435 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1436 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1437 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1438 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1439 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1440 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1441 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1442 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1443};
1444
1445static const u32 b43_ntab_intlevel_r3[] = {
1446 0x00802070, 0x0671188d, 0x0a60192c, 0x0a300e46,
1447 0x00c1188d, 0x080024d2, 0x00000070,
1448};
1449
1450static const u32 b43_ntab_tdtrn_r3[] = {
1451 0x061c061c, 0x0050ee68, 0xf592fe36, 0xfe5212f6,
1452 0x00000c38, 0xfe5212f6, 0xf592fe36, 0x0050ee68,
1453 0x061c061c, 0xee680050, 0xfe36f592, 0x12f6fe52,
1454 0x0c380000, 0x12f6fe52, 0xfe36f592, 0xee680050,
1455 0x061c061c, 0x0050ee68, 0xf592fe36, 0xfe5212f6,
1456 0x00000c38, 0xfe5212f6, 0xf592fe36, 0x0050ee68,
1457 0x061c061c, 0xee680050, 0xfe36f592, 0x12f6fe52,
1458 0x0c380000, 0x12f6fe52, 0xfe36f592, 0xee680050,
1459 0x05e305e3, 0x004def0c, 0xf5f3fe47, 0xfe611246,
1460 0x00000bc7, 0xfe611246, 0xf5f3fe47, 0x004def0c,
1461 0x05e305e3, 0xef0c004d, 0xfe47f5f3, 0x1246fe61,
1462 0x0bc70000, 0x1246fe61, 0xfe47f5f3, 0xef0c004d,
1463 0x05e305e3, 0x004def0c, 0xf5f3fe47, 0xfe611246,
1464 0x00000bc7, 0xfe611246, 0xf5f3fe47, 0x004def0c,
1465 0x05e305e3, 0xef0c004d, 0xfe47f5f3, 0x1246fe61,
1466 0x0bc70000, 0x1246fe61, 0xfe47f5f3, 0xef0c004d,
1467 0xfa58fa58, 0xf895043b, 0xff4c09c0, 0xfbc6ffa8,
1468 0xfb84f384, 0x0798f6f9, 0x05760122, 0x058409f6,
1469 0x0b500000, 0x05b7f542, 0x08860432, 0x06ddfee7,
1470 0xfb84f384, 0xf9d90664, 0xf7e8025c, 0x00fff7bd,
1471 0x05a805a8, 0xf7bd00ff, 0x025cf7e8, 0x0664f9d9,
1472 0xf384fb84, 0xfee706dd, 0x04320886, 0xf54205b7,
1473 0x00000b50, 0x09f60584, 0x01220576, 0xf6f90798,
1474 0xf384fb84, 0xffa8fbc6, 0x09c0ff4c, 0x043bf895,
1475 0x02d402d4, 0x07de0270, 0xfc96079c, 0xf90afe94,
1476 0xfe00ff2c, 0x02d4065d, 0x092a0096, 0x0014fbb8,
1477 0xfd2cfd2c, 0x076afb3c, 0x0096f752, 0xf991fd87,
1478 0xfb2c0200, 0xfeb8f960, 0x08e0fc96, 0x049802a8,
1479 0xfd2cfd2c, 0x02a80498, 0xfc9608e0, 0xf960feb8,
1480 0x0200fb2c, 0xfd87f991, 0xf7520096, 0xfb3c076a,
1481 0xfd2cfd2c, 0xfbb80014, 0x0096092a, 0x065d02d4,
1482 0xff2cfe00, 0xfe94f90a, 0x079cfc96, 0x027007de,
1483 0x02d402d4, 0x027007de, 0x079cfc96, 0xfe94f90a,
1484 0xff2cfe00, 0x065d02d4, 0x0096092a, 0xfbb80014,
1485 0xfd2cfd2c, 0xfb3c076a, 0xf7520096, 0xfd87f991,
1486 0x0200fb2c, 0xf960feb8, 0xfc9608e0, 0x02a80498,
1487 0xfd2cfd2c, 0x049802a8, 0x08e0fc96, 0xfeb8f960,
1488 0xfb2c0200, 0xf991fd87, 0x0096f752, 0x076afb3c,
1489 0xfd2cfd2c, 0x0014fbb8, 0x092a0096, 0x02d4065d,
1490 0xfe00ff2c, 0xf90afe94, 0xfc96079c, 0x07de0270,
1491 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1492 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1493 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1494 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1495 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1496 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1497 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1498 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1499 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1500 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1501 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1502 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1503 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1504 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1505 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1506 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1507 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1508 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1509 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1510 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1511 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1512 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1513 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1514 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1515 0x062a0000, 0xfefa0759, 0x08b80908, 0xf396fc2d,
1516 0xf9d6045c, 0xfc4ef608, 0xf748f596, 0x07b207bf,
1517 0x062a062a, 0xf84ef841, 0xf748f596, 0x03b209f8,
1518 0xf9d6045c, 0x0c6a03d3, 0x08b80908, 0x0106f8a7,
1519 0x062a0000, 0xfefaf8a7, 0x08b8f6f8, 0xf39603d3,
1520 0xf9d6fba4, 0xfc4e09f8, 0xf7480a6a, 0x07b2f841,
1521 0x062af9d6, 0xf84e07bf, 0xf7480a6a, 0x03b2f608,
1522 0xf9d6fba4, 0x0c6afc2d, 0x08b8f6f8, 0x01060759,
1523 0x062a0000, 0xfefa0759, 0x08b80908, 0xf396fc2d,
1524 0xf9d6045c, 0xfc4ef608, 0xf748f596, 0x07b207bf,
1525 0x062a062a, 0xf84ef841, 0xf748f596, 0x03b209f8,
1526 0xf9d6045c, 0x0c6a03d3, 0x08b80908, 0x0106f8a7,
1527 0x062a0000, 0xfefaf8a7, 0x08b8f6f8, 0xf39603d3,
1528 0xf9d6fba4, 0xfc4e09f8, 0xf7480a6a, 0x07b2f841,
1529 0x062af9d6, 0xf84e07bf, 0xf7480a6a, 0x03b2f608,
1530 0xf9d6fba4, 0x0c6afc2d, 0x08b8f6f8, 0x01060759,
1531 0x061c061c, 0xff30009d, 0xffb21141, 0xfd87fb54,
1532 0xf65dfe59, 0x02eef99e, 0x0166f03c, 0xfff809b6,
1533 0x000008a4, 0x000af42b, 0x00eff577, 0xfa840bf2,
1534 0xfc02ff51, 0x08260f67, 0xfff0036f, 0x0842f9c3,
1535 0x00000000, 0x063df7be, 0xfc910010, 0xf099f7da,
1536 0x00af03fe, 0xf40e057c, 0x0a89ff11, 0x0bd5fff6,
1537 0xf75c0000, 0xf64a0008, 0x0fc4fe9a, 0x0662fd12,
1538 0x01a709a3, 0x04ac0279, 0xeebf004e, 0xff6300d0,
1539 0xf9e4f9e4, 0x00d0ff63, 0x004eeebf, 0x027904ac,
1540 0x09a301a7, 0xfd120662, 0xfe9a0fc4, 0x0008f64a,
1541 0x0000f75c, 0xfff60bd5, 0xff110a89, 0x057cf40e,
1542 0x03fe00af, 0xf7daf099, 0x0010fc91, 0xf7be063d,
1543 0x00000000, 0xf9c30842, 0x036ffff0, 0x0f670826,
1544 0xff51fc02, 0x0bf2fa84, 0xf57700ef, 0xf42b000a,
1545 0x08a40000, 0x09b6fff8, 0xf03c0166, 0xf99e02ee,
1546 0xfe59f65d, 0xfb54fd87, 0x1141ffb2, 0x009dff30,
1547 0x05e30000, 0xff060705, 0x085408a0, 0xf425fc59,
1548 0xfa1d042a, 0xfc78f67a, 0xf7acf60e, 0x075a0766,
1549 0x05e305e3, 0xf8a6f89a, 0xf7acf60e, 0x03880986,
1550 0xfa1d042a, 0x0bdb03a7, 0x085408a0, 0x00faf8fb,
1551 0x05e30000, 0xff06f8fb, 0x0854f760, 0xf42503a7,
1552 0xfa1dfbd6, 0xfc780986, 0xf7ac09f2, 0x075af89a,
1553 0x05e3fa1d, 0xf8a60766, 0xf7ac09f2, 0x0388f67a,
1554 0xfa1dfbd6, 0x0bdbfc59, 0x0854f760, 0x00fa0705,
1555 0x05e30000, 0xff060705, 0x085408a0, 0xf425fc59,
1556 0xfa1d042a, 0xfc78f67a, 0xf7acf60e, 0x075a0766,
1557 0x05e305e3, 0xf8a6f89a, 0xf7acf60e, 0x03880986,
1558 0xfa1d042a, 0x0bdb03a7, 0x085408a0, 0x00faf8fb,
1559 0x05e30000, 0xff06f8fb, 0x0854f760, 0xf42503a7,
1560 0xfa1dfbd6, 0xfc780986, 0xf7ac09f2, 0x075af89a,
1561 0x05e3fa1d, 0xf8a60766, 0xf7ac09f2, 0x0388f67a,
1562 0xfa1dfbd6, 0x0bdbfc59, 0x0854f760, 0x00fa0705,
1563 0xfa58fa58, 0xf8f0fe00, 0x0448073d, 0xfdc9fe46,
1564 0xf9910258, 0x089d0407, 0xfd5cf71a, 0x02affde0,
1565 0x083e0496, 0xff5a0740, 0xff7afd97, 0x00fe01f1,
1566 0x0009082e, 0xfa94ff75, 0xfecdf8ea, 0xffb0f693,
1567 0xfd2cfa58, 0x0433ff16, 0xfba405dd, 0xfa610341,
1568 0x06a606cb, 0x0039fd2d, 0x0677fa97, 0x01fa05e0,
1569 0xf896003e, 0x075a068b, 0x012cfc3e, 0xfa23f98d,
1570 0xfc7cfd43, 0xff90fc0d, 0x01c10982, 0x00c601d6,
1571 0xfd2cfd2c, 0x01d600c6, 0x098201c1, 0xfc0dff90,
1572 0xfd43fc7c, 0xf98dfa23, 0xfc3e012c, 0x068b075a,
1573 0x003ef896, 0x05e001fa, 0xfa970677, 0xfd2d0039,
1574 0x06cb06a6, 0x0341fa61, 0x05ddfba4, 0xff160433,
1575 0xfa58fd2c, 0xf693ffb0, 0xf8eafecd, 0xff75fa94,
1576 0x082e0009, 0x01f100fe, 0xfd97ff7a, 0x0740ff5a,
1577 0x0496083e, 0xfde002af, 0xf71afd5c, 0x0407089d,
1578 0x0258f991, 0xfe46fdc9, 0x073d0448, 0xfe00f8f0,
1579 0xfd2cfd2c, 0xfce00500, 0xfc09fddc, 0xfe680157,
1580 0x04c70571, 0xfc3aff21, 0xfcd70228, 0x056d0277,
1581 0x0200fe00, 0x0022f927, 0xfe3c032b, 0xfc44ff3c,
1582 0x03e9fbdb, 0x04570313, 0x04c9ff5c, 0x000d03b8,
1583 0xfa580000, 0xfbe900d2, 0xf9d0fe0b, 0x0125fdf9,
1584 0x042501bf, 0x0328fa2b, 0xffa902f0, 0xfa250157,
1585 0x0200fe00, 0x03740438, 0xff0405fd, 0x030cfe52,
1586 0x0037fb39, 0xff6904c5, 0x04f8fd23, 0xfd31fc1b,
1587 0xfd2cfd2c, 0xfc1bfd31, 0xfd2304f8, 0x04c5ff69,
1588 0xfb390037, 0xfe52030c, 0x05fdff04, 0x04380374,
1589 0xfe000200, 0x0157fa25, 0x02f0ffa9, 0xfa2b0328,
1590 0x01bf0425, 0xfdf90125, 0xfe0bf9d0, 0x00d2fbe9,
1591 0x0000fa58, 0x03b8000d, 0xff5c04c9, 0x03130457,
1592 0xfbdb03e9, 0xff3cfc44, 0x032bfe3c, 0xf9270022,
1593 0xfe000200, 0x0277056d, 0x0228fcd7, 0xff21fc3a,
1594 0x057104c7, 0x0157fe68, 0xfddcfc09, 0x0500fce0,
1595 0xfd2cfd2c, 0x0500fce0, 0xfddcfc09, 0x0157fe68,
1596 0x057104c7, 0xff21fc3a, 0x0228fcd7, 0x0277056d,
1597 0xfe000200, 0xf9270022, 0x032bfe3c, 0xff3cfc44,
1598 0xfbdb03e9, 0x03130457, 0xff5c04c9, 0x03b8000d,
1599 0x0000fa58, 0x00d2fbe9, 0xfe0bf9d0, 0xfdf90125,
1600 0x01bf0425, 0xfa2b0328, 0x02f0ffa9, 0x0157fa25,
1601 0xfe000200, 0x04380374, 0x05fdff04, 0xfe52030c,
1602 0xfb390037, 0x04c5ff69, 0xfd2304f8, 0xfc1bfd31,
1603 0xfd2cfd2c, 0xfd31fc1b, 0x04f8fd23, 0xff6904c5,
1604 0x0037fb39, 0x030cfe52, 0xff0405fd, 0x03740438,
1605 0x0200fe00, 0xfa250157, 0xffa902f0, 0x0328fa2b,
1606 0x042501bf, 0x0125fdf9, 0xf9d0fe0b, 0xfbe900d2,
1607 0xfa580000, 0x000d03b8, 0x04c9ff5c, 0x04570313,
1608 0x03e9fbdb, 0xfc44ff3c, 0xfe3c032b, 0x0022f927,
1609 0x0200fe00, 0x056d0277, 0xfcd70228, 0xfc3aff21,
1610 0x04c70571, 0xfe680157, 0xfc09fddc, 0xfce00500,
1611 0x05a80000, 0xff1006be, 0x0800084a, 0xf49cfc7e,
1612 0xfa580400, 0xfc9cf6da, 0xf800f672, 0x0710071c,
1613 0x05a805a8, 0xf8f0f8e4, 0xf800f672, 0x03640926,
1614 0xfa580400, 0x0b640382, 0x0800084a, 0x00f0f942,
1615 0x05a80000, 0xff10f942, 0x0800f7b6, 0xf49c0382,
1616 0xfa58fc00, 0xfc9c0926, 0xf800098e, 0x0710f8e4,
1617 0x05a8fa58, 0xf8f0071c, 0xf800098e, 0x0364f6da,
1618 0xfa58fc00, 0x0b64fc7e, 0x0800f7b6, 0x00f006be,
1619 0x05a80000, 0xff1006be, 0x0800084a, 0xf49cfc7e,
1620 0xfa580400, 0xfc9cf6da, 0xf800f672, 0x0710071c,
1621 0x05a805a8, 0xf8f0f8e4, 0xf800f672, 0x03640926,
1622 0xfa580400, 0x0b640382, 0x0800084a, 0x00f0f942,
1623 0x05a80000, 0xff10f942, 0x0800f7b6, 0xf49c0382,
1624 0xfa58fc00, 0xfc9c0926, 0xf800098e, 0x0710f8e4,
1625 0x05a8fa58, 0xf8f0071c, 0xf800098e, 0x0364f6da,
1626 0xfa58fc00, 0x0b64fc7e, 0x0800f7b6, 0x00f006be,
1627};
1628
1629static const u32 b43_ntab_noisevar0_r3[] = {
1630 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1631 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1632 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1633 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1634 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1635 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1636 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1637 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1638 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1639 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1640 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1641 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1642 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1643 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1644 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1645 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1646 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1647 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1648 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1649 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1650 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1651 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1652 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1653 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1654 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1655 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1656 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1657 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1658 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1659 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1660 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1661 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1662 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1663 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1664 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1665 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1666 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1667 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1668 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1669 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1670 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1671 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1672 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1673 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1674 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1675 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1676 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1677 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1678 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1679 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1680 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1681 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1682 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1683 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1684 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1685 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1686 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1687 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1688 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1689 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1690 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1691 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1692 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1693 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1694};
1695
1696static const u32 b43_ntab_noisevar1_r3[] = {
1697 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1698 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1699 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1700 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1701 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1702 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1703 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1704 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1705 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1706 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1707 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1708 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1709 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1710 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1711 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1712 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1713 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1714 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1715 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1716 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1717 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1718 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1719 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1720 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1721 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1722 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1723 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1724 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1725 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1726 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1727 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1728 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1729 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1730 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1731 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1732 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1733 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1734 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1735 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1736 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1737 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1738 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1739 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1740 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1741 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1742 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1743 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1744 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1745 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1746 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1747 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1748 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1749 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1750 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1751 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1752 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1753 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1754 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1755 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1756 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1757 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1758 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1759 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1760 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1761};
1762
1763static const u16 b43_ntab_mcs_r3[] = {
1764 0x0000, 0x0008, 0x000a, 0x0010, 0x0012, 0x0019,
1765 0x001a, 0x001c, 0x0080, 0x0088, 0x008a, 0x0090,
1766 0x0092, 0x0099, 0x009a, 0x009c, 0x0100, 0x0108,
1767 0x010a, 0x0110, 0x0112, 0x0119, 0x011a, 0x011c,
1768 0x0180, 0x0188, 0x018a, 0x0190, 0x0192, 0x0199,
1769 0x019a, 0x019c, 0x0000, 0x0098, 0x00a0, 0x00a8,
1770 0x009a, 0x00a2, 0x00aa, 0x0120, 0x0128, 0x0128,
1771 0x0130, 0x0138, 0x0138, 0x0140, 0x0122, 0x012a,
1772 0x012a, 0x0132, 0x013a, 0x013a, 0x0142, 0x01a8,
1773 0x01b0, 0x01b8, 0x01b0, 0x01b8, 0x01c0, 0x01c8,
1774 0x01c0, 0x01c8, 0x01d0, 0x01d0, 0x01d8, 0x01aa,
1775 0x01b2, 0x01ba, 0x01b2, 0x01ba, 0x01c2, 0x01ca,
1776 0x01c2, 0x01ca, 0x01d2, 0x01d2, 0x01da, 0x0001,
1777 0x0002, 0x0004, 0x0009, 0x000c, 0x0011, 0x0014,
1778 0x0018, 0x0020, 0x0021, 0x0022, 0x0024, 0x0081,
1779 0x0082, 0x0084, 0x0089, 0x008c, 0x0091, 0x0094,
1780 0x0098, 0x00a0, 0x00a1, 0x00a2, 0x00a4, 0x0007,
1781 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
1782 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
1783 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
1784 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
1785 0x0007, 0x0007,
1786};
1787
1788static const u32 b43_ntab_tdi20a0_r3[] = {
1789 0x00091226, 0x000a1429, 0x000b56ad, 0x000c58b0,
1790 0x000d5ab3, 0x000e9cb6, 0x000f9eba, 0x0000c13d,
1791 0x00020301, 0x00030504, 0x00040708, 0x0005090b,
1792 0x00064b8e, 0x00095291, 0x000a5494, 0x000b9718,
1793 0x000c9927, 0x000d9b2a, 0x000edd2e, 0x000fdf31,
1794 0x000101b4, 0x000243b7, 0x000345bb, 0x000447be,
1795 0x00058982, 0x00068c05, 0x00099309, 0x000a950c,
1796 0x000bd78f, 0x000cd992, 0x000ddb96, 0x000f1d99,
1797 0x00005fa8, 0x0001422c, 0x0002842f, 0x00038632,
1798 0x00048835, 0x0005ca38, 0x0006ccbc, 0x0009d3bf,
1799 0x000b1603, 0x000c1806, 0x000d1a0a, 0x000e1c0d,
1800 0x000f5e10, 0x00008093, 0x00018297, 0x0002c49a,
1801 0x0003c680, 0x0004c880, 0x00060b00, 0x00070d00,
1802 0x00000000, 0x00000000, 0x00000000,
1803};
1804
1805static const u32 b43_ntab_tdi20a1_r3[] = {
1806 0x00014b26, 0x00028d29, 0x000393ad, 0x00049630,
1807 0x0005d833, 0x0006da36, 0x00099c3a, 0x000a9e3d,
1808 0x000bc081, 0x000cc284, 0x000dc488, 0x000f068b,
1809 0x0000488e, 0x00018b91, 0x0002d214, 0x0003d418,
1810 0x0004d6a7, 0x000618aa, 0x00071aae, 0x0009dcb1,
1811 0x000b1eb4, 0x000c0137, 0x000d033b, 0x000e053e,
1812 0x000f4702, 0x00008905, 0x00020c09, 0x0003128c,
1813 0x0004148f, 0x00051712, 0x00065916, 0x00091b19,
1814 0x000a1d28, 0x000b5f2c, 0x000c41af, 0x000d43b2,
1815 0x000e85b5, 0x000f87b8, 0x0000c9bc, 0x00024cbf,
1816 0x00035303, 0x00045506, 0x0005978a, 0x0006998d,
1817 0x00095b90, 0x000a5d93, 0x000b9f97, 0x000c821a,
1818 0x000d8400, 0x000ec600, 0x000fc800, 0x00010a00,
1819 0x00000000, 0x00000000, 0x00000000,
1820};
1821
1822static const u32 b43_ntab_tdi40a0_r3[] = {
1823 0x0011a346, 0x00136ccf, 0x0014f5d9, 0x001641e2,
1824 0x0017cb6b, 0x00195475, 0x001b2383, 0x001cad0c,
1825 0x001e7616, 0x0000821f, 0x00020ba8, 0x0003d4b2,
1826 0x00056447, 0x00072dd0, 0x0008b6da, 0x000a02e3,
1827 0x000b8c6c, 0x000d15f6, 0x0011e484, 0x0013ae0d,
1828 0x00153717, 0x00168320, 0x00180ca9, 0x00199633,
1829 0x001b6548, 0x001ceed1, 0x001eb7db, 0x0000c3e4,
1830 0x00024d6d, 0x000416f7, 0x0005a585, 0x00076f0f,
1831 0x0008f818, 0x000a4421, 0x000bcdab, 0x000d9734,
1832 0x00122649, 0x0013efd2, 0x001578dc, 0x0016c4e5,
1833 0x00184e6e, 0x001a17f8, 0x001ba686, 0x001d3010,
1834 0x001ef999, 0x00010522, 0x00028eac, 0x00045835,
1835 0x0005e74a, 0x0007b0d3, 0x00093a5d, 0x000a85e6,
1836 0x000c0f6f, 0x000dd8f9, 0x00126787, 0x00143111,
1837 0x0015ba9a, 0x00170623, 0x00188fad, 0x001a5936,
1838 0x001be84b, 0x001db1d4, 0x001f3b5e, 0x000146e7,
1839 0x00031070, 0x000499fa, 0x00062888, 0x0007f212,
1840 0x00097b9b, 0x000ac7a4, 0x000c50ae, 0x000e1a37,
1841 0x0012a94c, 0x001472d5, 0x0015fc5f, 0x00174868,
1842 0x0018d171, 0x001a9afb, 0x001c2989, 0x001df313,
1843 0x001f7c9c, 0x000188a5, 0x000351af, 0x0004db38,
1844 0x0006aa4d, 0x000833d7, 0x0009bd60, 0x000b0969,
1845 0x000c9273, 0x000e5bfc, 0x00132a8a, 0x0014b414,
1846 0x00163d9d, 0x001789a6, 0x001912b0, 0x001adc39,
1847 0x001c6bce, 0x001e34d8, 0x001fbe61, 0x0001ca6a,
1848 0x00039374, 0x00051cfd, 0x0006ec0b, 0x00087515,
1849 0x0009fe9e, 0x000b4aa7, 0x000cd3b1, 0x000e9d3a,
1850 0x00000000, 0x00000000,
1851};
1852
1853static const u32 b43_ntab_tdi40a1_r3[] = {
1854 0x001edb36, 0x000129ca, 0x0002b353, 0x00047cdd,
1855 0x0005c8e6, 0x000791ef, 0x00091bf9, 0x000aaa07,
1856 0x000c3391, 0x000dfd1a, 0x00120923, 0x0013d22d,
1857 0x00155c37, 0x0016eacb, 0x00187454, 0x001a3dde,
1858 0x001b89e7, 0x001d12f0, 0x001f1cfa, 0x00016b88,
1859 0x00033492, 0x0004be1b, 0x00060a24, 0x0007d32e,
1860 0x00095d38, 0x000aec4c, 0x000c7555, 0x000e3edf,
1861 0x00124ae8, 0x001413f1, 0x0015a37b, 0x00172c89,
1862 0x0018b593, 0x001a419c, 0x001bcb25, 0x001d942f,
1863 0x001f63b9, 0x0001ad4d, 0x00037657, 0x0004c260,
1864 0x00068be9, 0x000814f3, 0x0009a47c, 0x000b2d8a,
1865 0x000cb694, 0x000e429d, 0x00128c26, 0x001455b0,
1866 0x0015e4ba, 0x00176e4e, 0x0018f758, 0x001a8361,
1867 0x001c0cea, 0x001dd674, 0x001fa57d, 0x0001ee8b,
1868 0x0003b795, 0x0005039e, 0x0006cd27, 0x000856b1,
1869 0x0009e5c6, 0x000b6f4f, 0x000cf859, 0x000e8462,
1870 0x00130deb, 0x00149775, 0x00162603, 0x0017af8c,
1871 0x00193896, 0x001ac49f, 0x001c4e28, 0x001e17b2,
1872 0x0000a6c7, 0x00023050, 0x0003f9da, 0x00054563,
1873 0x00070eec, 0x00089876, 0x000a2704, 0x000bb08d,
1874 0x000d3a17, 0x001185a0, 0x00134f29, 0x0014d8b3,
1875 0x001667c8, 0x0017f151, 0x00197adb, 0x001b0664,
1876 0x001c8fed, 0x001e5977, 0x0000e805, 0x0002718f,
1877 0x00043b18, 0x000586a1, 0x0007502b, 0x0008d9b4,
1878 0x000a68c9, 0x000bf252, 0x000dbbdc, 0x0011c7e5,
1879 0x001390ee, 0x00151a78, 0x0016a906, 0x00183290,
1880 0x0019bc19, 0x001b4822, 0x001cd12c, 0x001e9ab5,
1881 0x00000000, 0x00000000,
1882};
1883
1884static const u32 b43_ntab_pilotlt_r3[] = {
1885 0x76540213, 0x62407351, 0x76543210, 0x76540213,
1886 0x76540213, 0x76430521,
1887};
1888
1889static const u32 b43_ntab_channelest_r3[] = {
1890 0x44444444, 0x44444444, 0x44444444, 0x44444444,
1891 0x44444444, 0x44444444, 0x44444444, 0x44444444,
1892 0x10101010, 0x10101010, 0x10101010, 0x10101010,
1893 0x10101010, 0x10101010, 0x10101010, 0x10101010,
1894 0x44444444, 0x44444444, 0x44444444, 0x44444444,
1895 0x44444444, 0x44444444, 0x44444444, 0x44444444,
1896 0x10101010, 0x10101010, 0x10101010, 0x10101010,
1897 0x10101010, 0x10101010, 0x10101010, 0x10101010,
1898 0x44444444, 0x44444444, 0x44444444, 0x44444444,
1899 0x44444444, 0x44444444, 0x44444444, 0x44444444,
1900 0x44444444, 0x44444444, 0x44444444, 0x44444444,
1901 0x44444444, 0x44444444, 0x44444444, 0x44444444,
1902 0x10101010, 0x10101010, 0x10101010, 0x10101010,
1903 0x10101010, 0x10101010, 0x10101010, 0x10101010,
1904 0x10101010, 0x10101010, 0x10101010, 0x10101010,
1905 0x10101010, 0x10101010, 0x10101010, 0x10101010,
1906 0x44444444, 0x44444444, 0x44444444, 0x44444444,
1907 0x44444444, 0x44444444, 0x44444444, 0x44444444,
1908 0x44444444, 0x44444444, 0x44444444, 0x44444444,
1909 0x44444444, 0x44444444, 0x44444444, 0x44444444,
1910 0x10101010, 0x10101010, 0x10101010, 0x10101010,
1911 0x10101010, 0x10101010, 0x10101010, 0x10101010,
1912 0x10101010, 0x10101010, 0x10101010, 0x10101010,
1913 0x10101010, 0x10101010, 0x10101010, 0x10101010,
1914};
1915
1916static const u8 b43_ntab_framelookup_r3[] = {
1917 0x02, 0x04, 0x14, 0x14, 0x03, 0x05, 0x16, 0x16,
1918 0x0a, 0x0c, 0x1c, 0x1c, 0x0b, 0x0d, 0x1e, 0x1e,
1919 0x06, 0x08, 0x18, 0x18, 0x07, 0x09, 0x1a, 0x1a,
1920 0x0e, 0x10, 0x20, 0x28, 0x0f, 0x11, 0x22, 0x2a,
1921};
1922
1923static const u8 b43_ntab_estimatepowerlt0_r3[] = {
1924 0x55, 0x54, 0x54, 0x53, 0x52, 0x52, 0x51, 0x51,
1925 0x50, 0x4f, 0x4f, 0x4e, 0x4e, 0x4d, 0x4c, 0x4c,
1926 0x4b, 0x4a, 0x49, 0x49, 0x48, 0x47, 0x46, 0x46,
1927 0x45, 0x44, 0x43, 0x42, 0x41, 0x40, 0x40, 0x3f,
1928 0x3e, 0x3d, 0x3c, 0x3a, 0x39, 0x38, 0x37, 0x36,
1929 0x35, 0x33, 0x32, 0x31, 0x2f, 0x2e, 0x2c, 0x2b,
1930 0x29, 0x27, 0x25, 0x23, 0x21, 0x1f, 0x1d, 0x1a,
1931 0x18, 0x15, 0x12, 0x0e, 0x0b, 0x07, 0x02, 0xfd,
1932};
1933
1934static const u8 b43_ntab_estimatepowerlt1_r3[] = {
1935 0x55, 0x54, 0x54, 0x53, 0x52, 0x52, 0x51, 0x51,
1936 0x50, 0x4f, 0x4f, 0x4e, 0x4e, 0x4d, 0x4c, 0x4c,
1937 0x4b, 0x4a, 0x49, 0x49, 0x48, 0x47, 0x46, 0x46,
1938 0x45, 0x44, 0x43, 0x42, 0x41, 0x40, 0x40, 0x3f,
1939 0x3e, 0x3d, 0x3c, 0x3a, 0x39, 0x38, 0x37, 0x36,
1940 0x35, 0x33, 0x32, 0x31, 0x2f, 0x2e, 0x2c, 0x2b,
1941 0x29, 0x27, 0x25, 0x23, 0x21, 0x1f, 0x1d, 0x1a,
1942 0x18, 0x15, 0x12, 0x0e, 0x0b, 0x07, 0x02, 0xfd,
1943};
1944
1945static const u8 b43_ntab_adjustpower0_r3[] = {
1946 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1947 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1948 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1949 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1950 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1951 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1952 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1953 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1954 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1955 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1956 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1957 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1958 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1959 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1960 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1961 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1962};
1963
1964static const u8 b43_ntab_adjustpower1_r3[] = {
1965 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1966 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1967 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1968 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1969 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1970 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1971 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1972 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1973 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1974 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1975 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1976 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1977 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1978 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1979 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1980 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1981};
1982
1983static const u32 b43_ntab_gainctl0_r3[] = {
1984 0x5bf70044, 0x5bf70042, 0x5bf70040, 0x5bf7003e,
1985 0x5bf7003c, 0x5bf7003b, 0x5bf70039, 0x5bf70037,
1986 0x5bf70036, 0x5bf70034, 0x5bf70033, 0x5bf70031,
1987 0x5bf70030, 0x5ba70044, 0x5ba70042, 0x5ba70040,
1988 0x5ba7003e, 0x5ba7003c, 0x5ba7003b, 0x5ba70039,
1989 0x5ba70037, 0x5ba70036, 0x5ba70034, 0x5ba70033,
1990 0x5b770044, 0x5b770042, 0x5b770040, 0x5b77003e,
1991 0x5b77003c, 0x5b77003b, 0x5b770039, 0x5b770037,
1992 0x5b770036, 0x5b770034, 0x5b770033, 0x5b770031,
1993 0x5b770030, 0x5b77002f, 0x5b77002d, 0x5b77002c,
1994 0x5b470044, 0x5b470042, 0x5b470040, 0x5b47003e,
1995 0x5b47003c, 0x5b47003b, 0x5b470039, 0x5b470037,
1996 0x5b470036, 0x5b470034, 0x5b470033, 0x5b470031,
1997 0x5b470030, 0x5b47002f, 0x5b47002d, 0x5b47002c,
1998 0x5b47002b, 0x5b47002a, 0x5b270044, 0x5b270042,
1999 0x5b270040, 0x5b27003e, 0x5b27003c, 0x5b27003b,
2000 0x5b270039, 0x5b270037, 0x5b270036, 0x5b270034,
2001 0x5b270033, 0x5b270031, 0x5b270030, 0x5b27002f,
2002 0x5b170044, 0x5b170042, 0x5b170040, 0x5b17003e,
2003 0x5b17003c, 0x5b17003b, 0x5b170039, 0x5b170037,
2004 0x5b170036, 0x5b170034, 0x5b170033, 0x5b170031,
2005 0x5b170030, 0x5b17002f, 0x5b17002d, 0x5b17002c,
2006 0x5b17002b, 0x5b17002a, 0x5b170028, 0x5b170027,
2007 0x5b170026, 0x5b170025, 0x5b170024, 0x5b170023,
2008 0x5b070044, 0x5b070042, 0x5b070040, 0x5b07003e,
2009 0x5b07003c, 0x5b07003b, 0x5b070039, 0x5b070037,
2010 0x5b070036, 0x5b070034, 0x5b070033, 0x5b070031,
2011 0x5b070030, 0x5b07002f, 0x5b07002d, 0x5b07002c,
2012 0x5b07002b, 0x5b07002a, 0x5b070028, 0x5b070027,
2013 0x5b070026, 0x5b070025, 0x5b070024, 0x5b070023,
2014 0x5b070022, 0x5b070021, 0x5b070020, 0x5b07001f,
2015 0x5b07001e, 0x5b07001d, 0x5b07001d, 0x5b07001c,
2016};
2017
2018static const u32 b43_ntab_gainctl1_r3[] = {
2019 0x5bf70044, 0x5bf70042, 0x5bf70040, 0x5bf7003e,
2020 0x5bf7003c, 0x5bf7003b, 0x5bf70039, 0x5bf70037,
2021 0x5bf70036, 0x5bf70034, 0x5bf70033, 0x5bf70031,
2022 0x5bf70030, 0x5ba70044, 0x5ba70042, 0x5ba70040,
2023 0x5ba7003e, 0x5ba7003c, 0x5ba7003b, 0x5ba70039,
2024 0x5ba70037, 0x5ba70036, 0x5ba70034, 0x5ba70033,
2025 0x5b770044, 0x5b770042, 0x5b770040, 0x5b77003e,
2026 0x5b77003c, 0x5b77003b, 0x5b770039, 0x5b770037,
2027 0x5b770036, 0x5b770034, 0x5b770033, 0x5b770031,
2028 0x5b770030, 0x5b77002f, 0x5b77002d, 0x5b77002c,
2029 0x5b470044, 0x5b470042, 0x5b470040, 0x5b47003e,
2030 0x5b47003c, 0x5b47003b, 0x5b470039, 0x5b470037,
2031 0x5b470036, 0x5b470034, 0x5b470033, 0x5b470031,
2032 0x5b470030, 0x5b47002f, 0x5b47002d, 0x5b47002c,
2033 0x5b47002b, 0x5b47002a, 0x5b270044, 0x5b270042,
2034 0x5b270040, 0x5b27003e, 0x5b27003c, 0x5b27003b,
2035 0x5b270039, 0x5b270037, 0x5b270036, 0x5b270034,
2036 0x5b270033, 0x5b270031, 0x5b270030, 0x5b27002f,
2037 0x5b170044, 0x5b170042, 0x5b170040, 0x5b17003e,
2038 0x5b17003c, 0x5b17003b, 0x5b170039, 0x5b170037,
2039 0x5b170036, 0x5b170034, 0x5b170033, 0x5b170031,
2040 0x5b170030, 0x5b17002f, 0x5b17002d, 0x5b17002c,
2041 0x5b17002b, 0x5b17002a, 0x5b170028, 0x5b170027,
2042 0x5b170026, 0x5b170025, 0x5b170024, 0x5b170023,
2043 0x5b070044, 0x5b070042, 0x5b070040, 0x5b07003e,
2044 0x5b07003c, 0x5b07003b, 0x5b070039, 0x5b070037,
2045 0x5b070036, 0x5b070034, 0x5b070033, 0x5b070031,
2046 0x5b070030, 0x5b07002f, 0x5b07002d, 0x5b07002c,
2047 0x5b07002b, 0x5b07002a, 0x5b070028, 0x5b070027,
2048 0x5b070026, 0x5b070025, 0x5b070024, 0x5b070023,
2049 0x5b070022, 0x5b070021, 0x5b070020, 0x5b07001f,
2050 0x5b07001e, 0x5b07001d, 0x5b07001d, 0x5b07001c,
2051};
2052
2053static const u32 b43_ntab_iqlt0_r3[] = {
2054 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2055 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2056 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2057 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2058 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2059 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2060 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2061 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2062 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2063 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2064 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2065 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2066 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2067 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2068 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2069 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2070 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2071 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2072 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2073 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2074 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2075 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2076 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2077 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2078 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2079 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2080 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2081 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2082 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2083 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2084 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2085 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2086};
2087
2088static const u32 b43_ntab_iqlt1_r3[] = {
2089 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2090 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2091 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2092 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2093 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2094 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2095 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2096 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2097 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2098 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2099 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2100 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2101 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2102 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2103 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2104 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2105 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2106 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2107 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2108 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2109 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2110 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2111 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2112 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2113 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2114 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2115 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2116 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2117 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2118 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2119 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2120 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2121};
2122
2123static const u16 b43_ntab_loftlt0_r3[] = {
2124 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2125 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2126 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2127 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2128 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2129 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2130 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2131 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2132 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2133 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2134 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2135 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2136 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2137 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2138 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2139 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2140 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2141 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2142 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2143 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2144 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2145 0x0000, 0x0000,
2146};
2147
2148static const u16 b43_ntab_loftlt1_r3[] = {
2149 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2150 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2151 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2152 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2153 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2154 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2155 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2156 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2157 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2158 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2159 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2160 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2161 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2162 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2163 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2164 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2165 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2166 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2167 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2168 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2169 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2170 0x0000, 0x0000,
2171};
2172
2173/* TX gain tables */
1100const u32 b43_ntab_tx_gain_rev0_1_2[] = { 2174const u32 b43_ntab_tx_gain_rev0_1_2[] = {
1101 0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42, 2175 0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42,
1102 0x03cc2944, 0x03c82b44, 0x03c82b42, 0x03c82a44, 2176 0x03cc2944, 0x03c82b44, 0x03c82b42, 0x03c82a44,
@@ -1635,6 +2709,79 @@ const struct nphy_rf_control_override_rev3 tbl_rf_control_override_rev3[] = {
1635 { 0x00C0, 6, 0xE7, 0xF9, 0xEC, 0xFB } /* field == 0x4000 (fls 15) */ 2709 { 0x00C0, 6, 0xE7, 0xF9, 0xEC, 0xFB } /* field == 0x4000 (fls 15) */
1636}; 2710};
1637 2711
2712struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][3] = {
2713 { /* 2GHz */
2714 { /* PHY rev 3 */
2715 { 7, 11, 16, 23 },
2716 { -5, 6, 10, 14 },
2717 { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA },
2718 { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
2719 0x627E,
2720 { 0x613F, 0x613F, 0x613F, 0x613F },
2721 0x107E, 0x0066, 0x0074,
2722 0x18, 0x18, 0x18,
2723 0x020D, 0x5,
2724 },
2725 { /* PHY rev 4 */
2726 { 8, 12, 17, 25 },
2727 { -5, 6, 10, 14 },
2728 { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA },
2729 { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
2730 0x527E,
2731 { 0x513F, 0x513F, 0x513F, 0x513F },
2732 0x007E, 0x0066, 0x0074,
2733 0x18, 0x18, 0x18,
2734 0x01A1, 0x5,
2735 },
2736 { /* PHY rev 5+ */
2737 { 9, 13, 18, 26 },
2738 { -3, 7, 11, 16 },
2739 { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA },
2740 { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
2741 0x427E, /* invalid for external LNA! */
2742 { 0x413F, 0x413F, 0x413F, 0x413F }, /* invalid for external LNA! */
2743 0x1076, 0x0066, 0x106A,
2744 0xC, 0xC, 0xC,
2745 0x01D0, 0x5,
2746 },
2747 },
2748 { /* 5GHz */
2749 { /* PHY rev 3 */
2750 { 7, 11, 17, 23 },
2751 { -6, 2, 6, 10 },
2752 { 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 },
2753 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 },
2754 0x52DE,
2755 { 0x516F, 0x516F, 0x516F, 0x516F },
2756 0x00DE, 0x00CA, 0x00CC,
2757 0x1E, 0x1E, 0x1E,
2758 0x01A1, 25,
2759 },
2760 { /* PHY rev 4 */
2761 { 8, 12, 18, 23 },
2762 { -5, 2, 6, 10 },
2763 { 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD },
2764 { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 },
2765 0x629E,
2766 { 0x614F, 0x614F, 0x614F, 0x614F },
2767 0x029E, 0x1084, 0x0086,
2768 0x24, 0x24, 0x24,
2769 0x0107, 25,
2770 },
2771 { /* PHY rev 5+ */
2772 { 6, 10, 16, 21 },
2773 { -7, 0, 4, 8 },
2774 { 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD },
2775 { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 },
2776 0x729E,
2777 { 0x714F, 0x714F, 0x714F, 0x714F },
2778 0x029E, 0x2084, 0x2086,
2779 0x24, 0x24, 0x24,
2780 0x00A9, 25,
2781 },
2782 },
2783};
2784
1638static inline void assert_ntab_array_sizes(void) 2785static inline void assert_ntab_array_sizes(void)
1639{ 2786{
1640#undef check 2787#undef check
@@ -1813,7 +2960,6 @@ void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset,
1813#define ntab_upload(dev, offset, data) do { \ 2960#define ntab_upload(dev, offset, data) do { \
1814 b43_ntab_write_bulk(dev, offset, offset##_SIZE, data); \ 2961 b43_ntab_write_bulk(dev, offset, offset##_SIZE, data); \
1815 } while (0) 2962 } while (0)
1816
1817void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev) 2963void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev)
1818{ 2964{
1819 /* Static tables */ 2965 /* Static tables */
@@ -1847,11 +2993,70 @@ void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev)
1847 ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1); 2993 ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1);
1848} 2994}
1849 2995
2996#define ntab_upload_r3(dev, offset, data) do { \
2997 b43_ntab_write_bulk(dev, offset, ARRAY_SIZE(data), data); \
2998 } while (0)
1850void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev) 2999void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
1851{ 3000{
1852 /* Static tables */ 3001 /* Static tables */
1853 /* TODO */ 3002 ntab_upload_r3(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3);
3003 ntab_upload_r3(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3);
3004 ntab_upload_r3(dev, B43_NTAB_TMAP_R3, b43_ntab_tmap_r3);
3005 ntab_upload_r3(dev, B43_NTAB_INTLEVEL_R3, b43_ntab_intlevel_r3);
3006 ntab_upload_r3(dev, B43_NTAB_TDTRN_R3, b43_ntab_tdtrn_r3);
3007 ntab_upload_r3(dev, B43_NTAB_NOISEVAR0_R3, b43_ntab_noisevar0_r3);
3008 ntab_upload_r3(dev, B43_NTAB_NOISEVAR1_R3, b43_ntab_noisevar1_r3);
3009 ntab_upload_r3(dev, B43_NTAB_MCS_R3, b43_ntab_mcs_r3);
3010 ntab_upload_r3(dev, B43_NTAB_TDI20A0_R3, b43_ntab_tdi20a0_r3);
3011 ntab_upload_r3(dev, B43_NTAB_TDI20A1_R3, b43_ntab_tdi20a1_r3);
3012 ntab_upload_r3(dev, B43_NTAB_TDI40A0_R3, b43_ntab_tdi40a0_r3);
3013 ntab_upload_r3(dev, B43_NTAB_TDI40A1_R3, b43_ntab_tdi40a1_r3);
3014 ntab_upload_r3(dev, B43_NTAB_PILOTLT_R3, b43_ntab_pilotlt_r3);
3015 ntab_upload_r3(dev, B43_NTAB_CHANEST_R3, b43_ntab_channelest_r3);
3016 ntab_upload_r3(dev, B43_NTAB_FRAMELT_R3, b43_ntab_framelookup_r3);
3017 ntab_upload_r3(dev, B43_NTAB_C0_ESTPLT_R3,
3018 b43_ntab_estimatepowerlt0_r3);
3019 ntab_upload_r3(dev, B43_NTAB_C1_ESTPLT_R3,
3020 b43_ntab_estimatepowerlt1_r3);
3021 ntab_upload_r3(dev, B43_NTAB_C0_ADJPLT_R3, b43_ntab_adjustpower0_r3);
3022 ntab_upload_r3(dev, B43_NTAB_C1_ADJPLT_R3, b43_ntab_adjustpower1_r3);
3023 ntab_upload_r3(dev, B43_NTAB_C0_GAINCTL_R3, b43_ntab_gainctl0_r3);
3024 ntab_upload_r3(dev, B43_NTAB_C1_GAINCTL_R3, b43_ntab_gainctl1_r3);
3025 ntab_upload_r3(dev, B43_NTAB_C0_IQLT_R3, b43_ntab_iqlt0_r3);
3026 ntab_upload_r3(dev, B43_NTAB_C1_IQLT_R3, b43_ntab_iqlt1_r3);
3027 ntab_upload_r3(dev, B43_NTAB_C0_LOFEEDTH_R3, b43_ntab_loftlt0_r3);
3028 ntab_upload_r3(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3);
1854 3029
1855 /* Volatile tables */ 3030 /* Volatile tables */
1856 /* TODO */ 3031 /* TODO */
1857} 3032}
3033
3034struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
3035 struct b43_wldev *dev, bool ghz5, bool ext_lna)
3036{
3037 struct nphy_gain_ctl_workaround_entry *e;
3038 u8 phy_idx;
3039
3040 B43_WARN_ON(dev->phy.rev < 3);
3041 if (dev->phy.rev >= 5)
3042 phy_idx = 2;
3043 else if (dev->phy.rev == 4)
3044 phy_idx = 1;
3045 else
3046 phy_idx = 0;
3047
3048 e = &nphy_gain_ctl_workaround[ghz5][phy_idx];
3049
3050 /* Only one entry differs for external LNA, so instead making whole
3051 * table 2 times bigger, hack is here
3052 */
3053 if (!ghz5 && dev->phy.rev >= 5 && ext_lna) {
3054 e->rfseq_init[0] &= 0x0FFF;
3055 e->rfseq_init[1] &= 0x0FFF;
3056 e->rfseq_init[2] &= 0x0FFF;
3057 e->rfseq_init[3] &= 0x0FFF;
3058 e->init_gain &= 0x0FFF;
3059 }
3060
3061 return e;
3062}
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index 4ec593ba3eef..18569367ce43 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -35,6 +35,31 @@ struct nphy_rf_control_override_rev3 {
35 u8 val_addr1; 35 u8 val_addr1;
36}; 36};
37 37
38struct nphy_gain_ctl_workaround_entry {
39 s8 lna1_gain[4];
40 s8 lna2_gain[4];
41 u8 gain_db[10];
42 u8 gain_bits[10];
43
44 u16 init_gain;
45 u16 rfseq_init[4];
46
47 u16 cliphi_gain;
48 u16 clipmd_gain;
49 u16 cliplo_gain;
50
51 u16 crsmin;
52 u16 crsminl;
53 u16 crsminu;
54
55 u16 nbclip;
56 u16 wlclip;
57};
58
59/* Get entry with workaround values for gain ctl. Does not return NULL. */
60struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
61 struct b43_wldev *dev, bool ghz5, bool ext_lna);
62
38/* Get the NPHY Channel Switch Table entry for a channel. 63/* Get the NPHY Channel Switch Table entry for a channel.
39 * Returns NULL on failure to find an entry. */ 64 * Returns NULL on failure to find an entry. */
40const struct b43_nphy_channeltab_entry_rev2 * 65const struct b43_nphy_channeltab_entry_rev2 *
@@ -109,6 +134,33 @@ b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq);
109#define B43_NTAB_C1_LOFEEDTH B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */ 134#define B43_NTAB_C1_LOFEEDTH B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */
110#define B43_NTAB_C1_LOFEEDTH_SIZE 128 135#define B43_NTAB_C1_LOFEEDTH_SIZE 128
111 136
137/* Static N-PHY tables, PHY revision >= 3 */
138#define B43_NTAB_FRAMESTRUCT_R3 B43_NTAB32(10, 000) /* frame struct */
139#define B43_NTAB_PILOT_R3 B43_NTAB16(11, 000) /* pilot */
140#define B43_NTAB_TMAP_R3 B43_NTAB32(12, 000) /* TM AP */
141#define B43_NTAB_INTLEVEL_R3 B43_NTAB32(13, 000) /* INT LV */
142#define B43_NTAB_TDTRN_R3 B43_NTAB32(14, 000) /* TD TRN */
143#define B43_NTAB_NOISEVAR0_R3 B43_NTAB32(16, 000) /* noise variance 0 */
144#define B43_NTAB_NOISEVAR1_R3 B43_NTAB32(16, 128) /* noise variance 1 */
145#define B43_NTAB_MCS_R3 B43_NTAB16(18, 000) /* MCS */
146#define B43_NTAB_TDI20A0_R3 B43_NTAB32(19, 128) /* TDI 20/0 */
147#define B43_NTAB_TDI20A1_R3 B43_NTAB32(19, 256) /* TDI 20/1 */
148#define B43_NTAB_TDI40A0_R3 B43_NTAB32(19, 640) /* TDI 40/0 */
149#define B43_NTAB_TDI40A1_R3 B43_NTAB32(19, 768) /* TDI 40/1 */
150#define B43_NTAB_PILOTLT_R3 B43_NTAB32(20, 000) /* PLT lookup */
151#define B43_NTAB_CHANEST_R3 B43_NTAB32(22, 000) /* channel estimate */
152#define B43_NTAB_FRAMELT_R3 B43_NTAB8 (24, 000) /* frame lookup */
153#define B43_NTAB_C0_ESTPLT_R3 B43_NTAB8 (26, 000) /* estimated power lookup 0 */
154#define B43_NTAB_C1_ESTPLT_R3 B43_NTAB8 (27, 000) /* estimated power lookup 1 */
155#define B43_NTAB_C0_ADJPLT_R3 B43_NTAB8 (26, 064) /* adjusted power lookup 0 */
156#define B43_NTAB_C1_ADJPLT_R3 B43_NTAB8 (27, 064) /* adjusted power lookup 1 */
157#define B43_NTAB_C0_GAINCTL_R3 B43_NTAB32(26, 192) /* gain control lookup 0 */
158#define B43_NTAB_C1_GAINCTL_R3 B43_NTAB32(27, 192) /* gain control lookup 1 */
159#define B43_NTAB_C0_IQLT_R3 B43_NTAB32(26, 320) /* I/Q lookup 0 */
160#define B43_NTAB_C1_IQLT_R3 B43_NTAB32(27, 320) /* I/Q lookup 1 */
161#define B43_NTAB_C0_LOFEEDTH_R3 B43_NTAB16(26, 448) /* Local Oscillator Feed Through lookup 0 */
162#define B43_NTAB_C1_LOFEEDTH_R3 B43_NTAB16(27, 448) /* Local Oscillator Feed Through lookup 1 */
163
112#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_40_SIZE 18 164#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_40_SIZE 18
113#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_20_SIZE 18 165#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_20_SIZE 18
114#define B43_NTAB_TX_IQLO_CAL_IQIMB_LADDER_40_SIZE 18 166#define B43_NTAB_TX_IQLO_CAL_IQIMB_LADDER_40_SIZE 18
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index e6b0528f3b52..e5be381c17bc 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -32,6 +32,36 @@
32#include "dma.h" 32#include "dma.h"
33#include "pio.h" 33#include "pio.h"
34 34
35static const struct b43_tx_legacy_rate_phy_ctl_entry b43_tx_legacy_rate_phy_ctl[] = {
36 { B43_CCK_RATE_1MB, 0x0, 0x0 },
37 { B43_CCK_RATE_2MB, 0x0, 0x1 },
38 { B43_CCK_RATE_5MB, 0x0, 0x2 },
39 { B43_CCK_RATE_11MB, 0x0, 0x3 },
40 { B43_OFDM_RATE_6MB, B43_TXH_PHY1_CRATE_1_2, B43_TXH_PHY1_MODUL_BPSK },
41 { B43_OFDM_RATE_9MB, B43_TXH_PHY1_CRATE_3_4, B43_TXH_PHY1_MODUL_BPSK },
42 { B43_OFDM_RATE_12MB, B43_TXH_PHY1_CRATE_1_2, B43_TXH_PHY1_MODUL_QPSK },
43 { B43_OFDM_RATE_18MB, B43_TXH_PHY1_CRATE_3_4, B43_TXH_PHY1_MODUL_QPSK },
44 { B43_OFDM_RATE_24MB, B43_TXH_PHY1_CRATE_1_2, B43_TXH_PHY1_MODUL_QAM16 },
45 { B43_OFDM_RATE_36MB, B43_TXH_PHY1_CRATE_3_4, B43_TXH_PHY1_MODUL_QAM16 },
46 { B43_OFDM_RATE_48MB, B43_TXH_PHY1_CRATE_2_3, B43_TXH_PHY1_MODUL_QAM64 },
47 { B43_OFDM_RATE_54MB, B43_TXH_PHY1_CRATE_3_4, B43_TXH_PHY1_MODUL_QAM64 },
48};
49
50static const struct b43_tx_legacy_rate_phy_ctl_entry *
51b43_tx_legacy_rate_phy_ctl_ent(u8 bitrate)
52{
53 const struct b43_tx_legacy_rate_phy_ctl_entry *e;
54 unsigned int i;
55
56 for (i = 0; i < ARRAY_SIZE(b43_tx_legacy_rate_phy_ctl); i++) {
57 e = &(b43_tx_legacy_rate_phy_ctl[i]);
58 if (e->bitrate == bitrate)
59 return e;
60 }
61
62 B43_WARN_ON(1);
63 return NULL;
64}
35 65
36/* Extract the bitrate index out of a CCK PLCP header. */ 66/* Extract the bitrate index out of a CCK PLCP header. */
37static int b43_plcp_get_bitrate_idx_cck(struct b43_plcp_hdr6 *plcp) 67static int b43_plcp_get_bitrate_idx_cck(struct b43_plcp_hdr6 *plcp)
@@ -145,6 +175,34 @@ void b43_generate_plcp_hdr(struct b43_plcp_hdr4 *plcp,
145 } 175 }
146} 176}
147 177
178static u16 b43_generate_tx_phy_ctl1(struct b43_wldev *dev, u8 bitrate)
179{
180 const struct b43_phy *phy = &dev->phy;
181 const struct b43_tx_legacy_rate_phy_ctl_entry *e;
182 u16 control = 0;
183 u16 bw;
184
185 if (phy->type == B43_PHYTYPE_LP)
186 bw = B43_TXH_PHY1_BW_20;
187 else /* FIXME */
188 bw = B43_TXH_PHY1_BW_20;
189
190 if (0) { /* FIXME: MIMO */
191 } else if (b43_is_cck_rate(bitrate) && phy->type != B43_PHYTYPE_LP) {
192 control = bw;
193 } else {
194 control = bw;
195 e = b43_tx_legacy_rate_phy_ctl_ent(bitrate);
196 if (e) {
197 control |= e->coding_rate;
198 control |= e->modulation;
199 }
200 control |= B43_TXH_PHY1_MODE_SISO;
201 }
202
203 return control;
204}
205
148static u8 b43_calc_fallback_rate(u8 bitrate) 206static u8 b43_calc_fallback_rate(u8 bitrate)
149{ 207{
150 switch (bitrate) { 208 switch (bitrate) {
@@ -437,6 +495,14 @@ int b43_generate_txhdr(struct b43_wldev *dev,
437 extra_ft |= B43_TXH_EFT_RTSFB_OFDM; 495 extra_ft |= B43_TXH_EFT_RTSFB_OFDM;
438 else 496 else
439 extra_ft |= B43_TXH_EFT_RTSFB_CCK; 497 extra_ft |= B43_TXH_EFT_RTSFB_CCK;
498
499 if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS &&
500 phy->type == B43_PHYTYPE_N) {
501 txhdr->phy_ctl1_rts = cpu_to_le16(
502 b43_generate_tx_phy_ctl1(dev, rts_rate));
503 txhdr->phy_ctl1_rts_fb = cpu_to_le16(
504 b43_generate_tx_phy_ctl1(dev, rts_rate_fb));
505 }
440 } 506 }
441 507
442 /* Magic cookie */ 508 /* Magic cookie */
@@ -445,6 +511,13 @@ int b43_generate_txhdr(struct b43_wldev *dev,
445 else 511 else
446 txhdr->new_format.cookie = cpu_to_le16(cookie); 512 txhdr->new_format.cookie = cpu_to_le16(cookie);
447 513
514 if (phy->type == B43_PHYTYPE_N) {
515 txhdr->phy_ctl1 =
516 cpu_to_le16(b43_generate_tx_phy_ctl1(dev, rate));
517 txhdr->phy_ctl1_fb =
518 cpu_to_le16(b43_generate_tx_phy_ctl1(dev, rate_fb));
519 }
520
448 /* Apply the bitfields */ 521 /* Apply the bitfields */
449 txhdr->mac_ctl = cpu_to_le32(mac_ctl); 522 txhdr->mac_ctl = cpu_to_le32(mac_ctl);
450 txhdr->phy_ctl = cpu_to_le16(phy_ctl); 523 txhdr->phy_ctl = cpu_to_le16(phy_ctl);
@@ -652,7 +725,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
652 status.mactime += mactime; 725 status.mactime += mactime;
653 if (low_mactime_now <= mactime) 726 if (low_mactime_now <= mactime)
654 status.mactime -= 0x10000; 727 status.mactime -= 0x10000;
655 status.flag |= RX_FLAG_TSFT; 728 status.flag |= RX_FLAG_MACTIME_MPDU;
656 } 729 }
657 730
658 chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT; 731 chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT;
diff --git a/drivers/net/wireless/b43/xmit.h b/drivers/net/wireless/b43/xmit.h
index d4cf9b390af3..42debb5cd6fa 100644
--- a/drivers/net/wireless/b43/xmit.h
+++ b/drivers/net/wireless/b43/xmit.h
@@ -73,6 +73,12 @@ struct b43_txhdr {
73 } __packed; 73 } __packed;
74} __packed; 74} __packed;
75 75
76struct b43_tx_legacy_rate_phy_ctl_entry {
77 u8 bitrate;
78 u16 coding_rate;
79 u16 modulation;
80};
81
76/* MAC TX control */ 82/* MAC TX control */
77#define B43_TXH_MAC_USEFBR 0x10000000 /* Use fallback rate for this AMPDU */ 83#define B43_TXH_MAC_USEFBR 0x10000000 /* Use fallback rate for this AMPDU */
78#define B43_TXH_MAC_KEYIDX 0x0FF00000 /* Security key index */ 84#define B43_TXH_MAC_KEYIDX 0x0FF00000 /* Security key index */
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 1f11e1670bf0..c7fd73e3ad76 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -2442,8 +2442,8 @@ static int b43legacy_rng_init(struct b43legacy_wl *wl)
2442 return err; 2442 return err;
2443} 2443}
2444 2444
2445static int b43legacy_op_tx(struct ieee80211_hw *hw, 2445static void b43legacy_op_tx(struct ieee80211_hw *hw,
2446 struct sk_buff *skb) 2446 struct sk_buff *skb)
2447{ 2447{
2448 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); 2448 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
2449 struct b43legacy_wldev *dev = wl->current_dev; 2449 struct b43legacy_wldev *dev = wl->current_dev;
@@ -2466,7 +2466,6 @@ out:
2466 /* Drop the packet. */ 2466 /* Drop the packet. */
2467 dev_kfree_skb_any(skb); 2467 dev_kfree_skb_any(skb);
2468 } 2468 }
2469 return NETDEV_TX_OK;
2470} 2469}
2471 2470
2472static int b43legacy_op_conf_tx(struct ieee80211_hw *hw, u16 queue, 2471static int b43legacy_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index 7d177d97f1f7..3a95541708a6 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -572,7 +572,7 @@ void b43legacy_rx(struct b43legacy_wldev *dev,
572 status.mactime += mactime; 572 status.mactime += mactime;
573 if (low_mactime_now <= mactime) 573 if (low_mactime_now <= mactime)
574 status.mactime -= 0x10000; 574 status.mactime -= 0x10000;
575 status.flag |= RX_FLAG_TSFT; 575 status.flag |= RX_FLAG_MACTIME_MPDU;
576 } 576 }
577 577
578 chanid = (chanstat & B43legacy_RX_CHAN_ID) >> 578 chanid = (chanstat & B43legacy_RX_CHAN_ID) >>
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 471a52a2f8d4..4b97f918daff 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1396,7 +1396,7 @@ static int ipw2100_power_cycle_adapter(struct ipw2100_priv *priv)
1396} 1396}
1397 1397
1398/* 1398/*
1399 * Send the CARD_DISABLE_PHY_OFF comamnd to the card to disable it 1399 * Send the CARD_DISABLE_PHY_OFF command to the card to disable it
1400 * 1400 *
1401 * After disabling, if the card was associated, a STATUS_ASSN_LOST will be sent. 1401 * After disabling, if the card was associated, a STATUS_ASSN_LOST will be sent.
1402 * 1402 *
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.h b/drivers/net/wireless/ipw2x00/ipw2200.h
index 0441445b8bfa..91795b5a93c5 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.h
+++ b/drivers/net/wireless/ipw2x00/ipw2200.h
@@ -961,7 +961,7 @@ struct ipw_country_channel_info {
961struct ipw_country_info { 961struct ipw_country_info {
962 u8 id; 962 u8 id;
963 u8 length; 963 u8 length;
964 u8 country_str[3]; 964 u8 country_str[IEEE80211_COUNTRY_STRING_LEN];
965 struct ipw_country_channel_info groups[7]; 965 struct ipw_country_channel_info groups[7];
966} __packed; 966} __packed;
967 967
diff --git a/drivers/net/wireless/iwlegacy/Kconfig b/drivers/net/wireless/iwlegacy/Kconfig
new file mode 100644
index 000000000000..2a45dd44cc12
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/Kconfig
@@ -0,0 +1,116 @@
1config IWLWIFI_LEGACY
2 tristate "Intel Wireless Wifi legacy devices"
3 depends on PCI && MAC80211
4 select FW_LOADER
5 select NEW_LEDS
6 select LEDS_CLASS
7 select LEDS_TRIGGERS
8 select MAC80211_LEDS
9
10menu "Debugging Options"
11 depends on IWLWIFI_LEGACY
12
13config IWLWIFI_LEGACY_DEBUG
14 bool "Enable full debugging output in 4965 and 3945 drivers"
15 depends on IWLWIFI_LEGACY
16 ---help---
17 This option will enable debug tracing output for the iwlwifilegacy
18 drivers.
19
20 This will result in the kernel module being ~100k larger. You can
21 control which debug output is sent to the kernel log by setting the
22 value in
23
24 /sys/class/net/wlan0/device/debug_level
25
26 This entry will only exist if this option is enabled.
27
28 To set a value, simply echo an 8-byte hex value to the same file:
29
30 % echo 0x43fff > /sys/class/net/wlan0/device/debug_level
31
32 You can find the list of debug mask values in:
33 drivers/net/wireless/iwlwifilegacy/iwl-debug.h
34
35 If this is your first time using this driver, you should say Y here
36 as the debug information can assist others in helping you resolve
37 any problems you may encounter.
38
39config IWLWIFI_LEGACY_DEBUGFS
40 bool "4965 and 3945 debugfs support"
41 depends on IWLWIFI_LEGACY && MAC80211_DEBUGFS
42 ---help---
43 Enable creation of debugfs files for the iwlwifilegacy drivers. This
44 is a low-impact option that allows getting insight into the
45 driver's state at runtime.
46
47config IWLWIFI_LEGACY_DEVICE_TRACING
48 bool "iwlwifilegacy legacy device access tracing"
49 depends on IWLWIFI_LEGACY
50 depends on EVENT_TRACING
51 help
52 Say Y here to trace all commands, including TX frames and IO
53 accesses, sent to the device. If you say yes, iwlwifilegacy will
54 register with the ftrace framework for event tracing and dump
55 all this information to the ringbuffer, you may need to
56 increase the ringbuffer size. See the ftrace documentation
57 for more information.
58
59 When tracing is not enabled, this option still has some
60 (though rather small) overhead.
61
62 If unsure, say Y so we can help you better when problems
63 occur.
64endmenu
65
66config IWL4965
67 tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
68 depends on IWLWIFI_LEGACY
69 ---help---
70 This option enables support for
71
72 Select to build the driver supporting the:
73
74 Intel Wireless WiFi Link 4965AGN
75
76 This driver uses the kernel's mac80211 subsystem.
77
78 In order to use this driver, you will need a microcode (uCode)
79 image for it. You can obtain the microcode from:
80
81 <http://intellinuxwireless.org/>.
82
83 The microcode is typically installed in /lib/firmware. You can
84 look in the hotplug script /etc/hotplug/firmware.agent to
85 determine which directory FIRMWARE_DIR is set to when the script
86 runs.
87
88 If you want to compile the driver as a module ( = code which can be
89 inserted in and removed from the running kernel whenever you want),
90 say M here and read <file:Documentation/kbuild/modules.txt>. The
91 module will be called iwl4965.
92
93config IWL3945
94 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
95 depends on IWLWIFI_LEGACY
96 ---help---
97 Select to build the driver supporting the:
98
99 Intel PRO/Wireless 3945ABG/BG Network Connection
100
101 This driver uses the kernel's mac80211 subsystem.
102
103 In order to use this driver, you will need a microcode (uCode)
104 image for it. You can obtain the microcode from:
105
106 <http://intellinuxwireless.org/>.
107
108 The microcode is typically installed in /lib/firmware. You can
109 look in the hotplug script /etc/hotplug/firmware.agent to
110 determine which directory FIRMWARE_DIR is set to when the script
111 runs.
112
113 If you want to compile the driver as a module ( = code which can be
114 inserted in and removed from the running kernel whenever you want),
115 say M here and read <file:Documentation/kbuild/modules.txt>. The
116 module will be called iwl3945.
diff --git a/drivers/net/wireless/iwlegacy/Makefile b/drivers/net/wireless/iwlegacy/Makefile
new file mode 100644
index 000000000000..d56aeb38c211
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/Makefile
@@ -0,0 +1,25 @@
1obj-$(CONFIG_IWLWIFI_LEGACY) += iwl-legacy.o
2iwl-legacy-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
3iwl-legacy-objs += iwl-rx.o iwl-tx.o iwl-sta.o
4iwl-legacy-objs += iwl-scan.o iwl-led.o
5iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-debugfs.o
6iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) += iwl-devtrace.o
7
8iwl-legacy-objs += $(iwl-legacy-m)
9
10CFLAGS_iwl-devtrace.o := -I$(src)
11
12# 4965
13obj-$(CONFIG_IWL4965) += iwl4965.o
14iwl4965-objs := iwl-4965.o iwl4965-base.o iwl-4965-rs.o iwl-4965-led.o
15iwl4965-objs += iwl-4965-ucode.o iwl-4965-tx.o
16iwl4965-objs += iwl-4965-lib.o iwl-4965-rx.o iwl-4965-calib.o
17iwl4965-objs += iwl-4965-sta.o iwl-4965-eeprom.o
18iwl4965-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-4965-debugfs.o
19
20# 3945
21obj-$(CONFIG_IWL3945) += iwl3945.o
22iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
23iwl3945-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-3945-debugfs.o
24
25ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
index ef0835b01b6b..cfabb38793ab 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * GPL LICENSE SUMMARY 3 * GPL LICENSE SUMMARY
4 * 4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as 8 * it under the terms of version 2 of the GNU General Public License as
@@ -60,12 +60,13 @@ ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
60 int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 + 60 int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 +
61 sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400; 61 sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400;
62 ssize_t ret; 62 ssize_t ret;
63 struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm; 63 struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm,
64 *max_ofdm;
64 struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck; 65 struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
65 struct iwl39_statistics_rx_non_phy *general, *accum_general; 66 struct iwl39_statistics_rx_non_phy *general, *accum_general;
66 struct iwl39_statistics_rx_non_phy *delta_general, *max_general; 67 struct iwl39_statistics_rx_non_phy *delta_general, *max_general;
67 68
68 if (!iwl_is_alive(priv)) 69 if (!iwl_legacy_is_alive(priv))
69 return -EAGAIN; 70 return -EAGAIN;
70 71
71 buf = kzalloc(bufsz, GFP_KERNEL); 72 buf = kzalloc(bufsz, GFP_KERNEL);
@@ -335,7 +336,7 @@ ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
335 ssize_t ret; 336 ssize_t ret;
336 struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx; 337 struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
337 338
338 if (!iwl_is_alive(priv)) 339 if (!iwl_legacy_is_alive(priv))
339 return -EAGAIN; 340 return -EAGAIN;
340 341
341 buf = kzalloc(bufsz, GFP_KERNEL); 342 buf = kzalloc(bufsz, GFP_KERNEL);
@@ -434,7 +435,7 @@ ssize_t iwl3945_ucode_general_stats_read(struct file *file,
434 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg; 435 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
435 struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div; 436 struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div;
436 437
437 if (!iwl_is_alive(priv)) 438 if (!iwl_legacy_is_alive(priv))
438 return -EAGAIN; 439 return -EAGAIN;
439 440
440 buf = kzalloc(bufsz, GFP_KERNEL); 441 buf = kzalloc(bufsz, GFP_KERNEL);
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
index 70809c53c215..8fef4b32b447 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
@@ -2,7 +2,7 @@
2 * 2 *
3 * GPL LICENSE SUMMARY 3 * GPL LICENSE SUMMARY
4 * 4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as 8 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30#include "iwl-core.h" 30#include "iwl-core.h"
31#include "iwl-debug.h" 31#include "iwl-debug.h"
32 32
33#ifdef CONFIG_IWLWIFI_DEBUGFS 33#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
34ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf, 34ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
35 size_t count, loff_t *ppos); 35 size_t count, loff_t *ppos);
36ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf, 36ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h b/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
index 2c9ed2b502a3..836c9919f82e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -185,4 +185,3 @@ struct iwl3945_tfd {
185 185
186 186
187#endif /* __iwl_3945_fh_h__ */ 187#endif /* __iwl_3945_fh_h__ */
188
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
index 65b5834da28c..779d3cb86e2c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -164,12 +164,11 @@ struct iwl3945_eeprom {
164/* 164/*
165 * Per-channel regulatory data. 165 * Per-channel regulatory data.
166 * 166 *
167 * Each channel that *might* be supported by 3945 or 4965 has a fixed location 167 * Each channel that *might* be supported by 3945 has a fixed location
168 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory 168 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
169 * txpower (MSB). 169 * txpower (MSB).
170 * 170 *
171 * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz) 171 * Entries immediately below are for 20 MHz channel width.
172 * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
173 * 172 *
174 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 173 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
175 */ 174 */
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlegacy/iwl-3945-led.c
index abe2b739c4dc..abd923558d48 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-led.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -56,36 +56,9 @@ static int iwl3945_send_led_cmd(struct iwl_priv *priv,
56 .callback = NULL, 56 .callback = NULL,
57 }; 57 };
58 58
59 return iwl_send_cmd(priv, &cmd); 59 return iwl_legacy_send_cmd(priv, &cmd);
60}
61
62/* Set led on command */
63static int iwl3945_led_on(struct iwl_priv *priv)
64{
65 struct iwl_led_cmd led_cmd = {
66 .id = IWL_LED_LINK,
67 .on = IWL_LED_SOLID,
68 .off = 0,
69 .interval = IWL_DEF_LED_INTRVL
70 };
71 return iwl3945_send_led_cmd(priv, &led_cmd);
72}
73
74/* Set led off command */
75static int iwl3945_led_off(struct iwl_priv *priv)
76{
77 struct iwl_led_cmd led_cmd = {
78 .id = IWL_LED_LINK,
79 .on = 0,
80 .off = 0,
81 .interval = IWL_DEF_LED_INTRVL
82 };
83 IWL_DEBUG_LED(priv, "led off\n");
84 return iwl3945_send_led_cmd(priv, &led_cmd);
85} 60}
86 61
87const struct iwl_led_ops iwl3945_led_ops = { 62const struct iwl_led_ops iwl3945_led_ops = {
88 .cmd = iwl3945_send_led_cmd, 63 .cmd = iwl3945_send_led_cmd,
89 .on = iwl3945_led_on,
90 .off = iwl3945_led_off,
91}; 64};
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.h b/drivers/net/wireless/iwlegacy/iwl-3945-led.h
index ce990adc51e7..96716276eb0d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.h
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-led.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
index 1f3e7e34fbc7..977bd2477c6a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -89,7 +89,7 @@ static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = {
89}; 89};
90 90
91#define IWL_RATE_MAX_WINDOW 62 91#define IWL_RATE_MAX_WINDOW 62
92#define IWL_RATE_FLUSH (3*HZ) 92#define IWL_RATE_FLUSH (3*HZ)
93#define IWL_RATE_WIN_FLUSH (HZ/2) 93#define IWL_RATE_WIN_FLUSH (HZ/2)
94#define IWL39_RATE_HIGH_TH 11520 94#define IWL39_RATE_HIGH_TH 11520
95#define IWL_SUCCESS_UP_TH 8960 95#define IWL_SUCCESS_UP_TH 8960
@@ -394,18 +394,18 @@ out:
394 IWL_DEBUG_INFO(priv, "leave\n"); 394 IWL_DEBUG_INFO(priv, "leave\n");
395} 395}
396 396
397static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) 397static void *iwl3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
398{ 398{
399 return hw->priv; 399 return hw->priv;
400} 400}
401 401
402/* rate scale requires free function to be implemented */ 402/* rate scale requires free function to be implemented */
403static void rs_free(void *priv) 403static void iwl3945_rs_free(void *priv)
404{ 404{
405 return; 405 return;
406} 406}
407 407
408static void *rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp) 408static void *iwl3945_rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
409{ 409{
410 struct iwl3945_rs_sta *rs_sta; 410 struct iwl3945_rs_sta *rs_sta;
411 struct iwl3945_sta_priv *psta = (void *) sta->drv_priv; 411 struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
@@ -423,7 +423,7 @@ static void *rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
423 return rs_sta; 423 return rs_sta;
424} 424}
425 425
426static void rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta, 426static void iwl3945_rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
427 void *priv_sta) 427 void *priv_sta)
428{ 428{
429 struct iwl3945_rs_sta *rs_sta = priv_sta; 429 struct iwl3945_rs_sta *rs_sta = priv_sta;
@@ -438,12 +438,12 @@ static void rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
438 438
439 439
440/** 440/**
441 * rs_tx_status - Update rate control values based on Tx results 441 * iwl3945_rs_tx_status - Update rate control values based on Tx results
442 * 442 *
443 * NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by 443 * NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by
444 * the hardware for each rate. 444 * the hardware for each rate.
445 */ 445 */
446static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband, 446static void iwl3945_rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband,
447 struct ieee80211_sta *sta, void *priv_sta, 447 struct ieee80211_sta *sta, void *priv_sta,
448 struct sk_buff *skb) 448 struct sk_buff *skb)
449{ 449{
@@ -612,7 +612,7 @@ static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
612} 612}
613 613
614/** 614/**
615 * rs_get_rate - find the rate for the requested packet 615 * iwl3945_rs_get_rate - find the rate for the requested packet
616 * 616 *
617 * Returns the ieee80211_rate structure allocated by the driver. 617 * Returns the ieee80211_rate structure allocated by the driver.
618 * 618 *
@@ -627,7 +627,7 @@ static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
627 * rate table and must reference the driver allocated rate table 627 * rate table and must reference the driver allocated rate table
628 * 628 *
629 */ 629 */
630static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, 630static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
631 void *priv_sta, struct ieee80211_tx_rate_control *txrc) 631 void *priv_sta, struct ieee80211_tx_rate_control *txrc)
632{ 632{
633 struct ieee80211_supported_band *sband = txrc->sband; 633 struct ieee80211_supported_band *sband = txrc->sband;
@@ -644,7 +644,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
644 u32 fail_count; 644 u32 fail_count;
645 s8 scale_action = 0; 645 s8 scale_action = 0;
646 unsigned long flags; 646 unsigned long flags;
647 u16 rate_mask = sta ? sta->supp_rates[sband->band] : 0; 647 u16 rate_mask;
648 s8 max_rate_idx = -1; 648 s8 max_rate_idx = -1;
649 struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r; 649 struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
650 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 650 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -899,7 +899,8 @@ static void iwl3945_remove_debugfs(void *priv, void *priv_sta)
899 * the station is added. Since mac80211 calls this function before a 899 * the station is added. Since mac80211 calls this function before a
900 * station is added we ignore it. 900 * station is added we ignore it.
901 */ 901 */
902static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband, 902static void iwl3945_rs_rate_init_stub(void *priv_r,
903 struct ieee80211_supported_band *sband,
903 struct ieee80211_sta *sta, void *priv_sta) 904 struct ieee80211_sta *sta, void *priv_sta)
904{ 905{
905} 906}
@@ -907,13 +908,13 @@ static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sba
907static struct rate_control_ops rs_ops = { 908static struct rate_control_ops rs_ops = {
908 .module = NULL, 909 .module = NULL,
909 .name = RS_NAME, 910 .name = RS_NAME,
910 .tx_status = rs_tx_status, 911 .tx_status = iwl3945_rs_tx_status,
911 .get_rate = rs_get_rate, 912 .get_rate = iwl3945_rs_get_rate,
912 .rate_init = rs_rate_init_stub, 913 .rate_init = iwl3945_rs_rate_init_stub,
913 .alloc = rs_alloc, 914 .alloc = iwl3945_rs_alloc,
914 .free = rs_free, 915 .free = iwl3945_rs_free,
915 .alloc_sta = rs_alloc_sta, 916 .alloc_sta = iwl3945_rs_alloc_sta,
916 .free_sta = rs_free_sta, 917 .free_sta = iwl3945_rs_free_sta,
917#ifdef CONFIG_MAC80211_DEBUGFS 918#ifdef CONFIG_MAC80211_DEBUGFS
918 .add_sta_debugfs = iwl3945_add_debugfs, 919 .add_sta_debugfs = iwl3945_add_debugfs,
919 .remove_sta_debugfs = iwl3945_remove_debugfs, 920 .remove_sta_debugfs = iwl3945_remove_debugfs,
@@ -991,5 +992,3 @@ void iwl3945_rate_control_unregister(void)
991{ 992{
992 ieee80211_rate_control_unregister(&rs_ops); 993 ieee80211_rate_control_unregister(&rs_ops);
993} 994}
994
995
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlegacy/iwl-3945.c
index 39b6f16c87fa..d096dc28204d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -51,7 +51,6 @@
51#include "iwl-led.h" 51#include "iwl-led.h"
52#include "iwl-3945-led.h" 52#include "iwl-3945-led.h"
53#include "iwl-3945-debugfs.h" 53#include "iwl-3945-debugfs.h"
54#include "iwl-legacy.h"
55 54
56#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \ 55#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
57 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ 56 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
@@ -172,14 +171,14 @@ void iwl3945_disable_events(struct iwl_priv *priv)
172 return; 171 return;
173 } 172 }
174 173
175 disable_ptr = iwl_read_targ_mem(priv, base + (4 * sizeof(u32))); 174 disable_ptr = iwl_legacy_read_targ_mem(priv, base + (4 * sizeof(u32)));
176 array_size = iwl_read_targ_mem(priv, base + (5 * sizeof(u32))); 175 array_size = iwl_legacy_read_targ_mem(priv, base + (5 * sizeof(u32)));
177 176
178 if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) { 177 if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) {
179 IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n", 178 IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n",
180 disable_ptr); 179 disable_ptr);
181 for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++) 180 for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++)
182 iwl_write_targ_mem(priv, 181 iwl_legacy_write_targ_mem(priv,
183 disable_ptr + (i * sizeof(u32)), 182 disable_ptr + (i * sizeof(u32)),
184 evt_disable[i]); 183 evt_disable[i]);
185 184
@@ -202,7 +201,7 @@ static int iwl3945_hwrate_to_plcp_idx(u8 plcp)
202 return -1; 201 return -1;
203} 202}
204 203
205#ifdef CONFIG_IWLWIFI_DEBUG 204#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
206#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x 205#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
207 206
208static const char *iwl3945_get_tx_fail_reason(u32 status) 207static const char *iwl3945_get_tx_fail_reason(u32 status)
@@ -255,7 +254,7 @@ int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate)
255 break; 254 break;
256 case IEEE80211_BAND_2GHZ: 255 case IEEE80211_BAND_2GHZ:
257 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) && 256 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
258 iwl_is_associated(priv, IWL_RXON_CTX_BSS)) { 257 iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
259 if (rate == IWL_RATE_11M_INDEX) 258 if (rate == IWL_RATE_11M_INDEX)
260 next_rate = IWL_RATE_5M_INDEX; 259 next_rate = IWL_RATE_5M_INDEX;
261 } 260 }
@@ -285,8 +284,9 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
285 284
286 BUG_ON(txq_id == IWL39_CMD_QUEUE_NUM); 285 BUG_ON(txq_id == IWL39_CMD_QUEUE_NUM);
287 286
288 for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index; 287 for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
289 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 288 q->read_ptr != index;
289 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
290 290
291 tx_info = &txq->txb[txq->q.read_ptr]; 291 tx_info = &txq->txb[txq->q.read_ptr];
292 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb); 292 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
@@ -294,10 +294,10 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
294 priv->cfg->ops->lib->txq_free_tfd(priv, txq); 294 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
295 } 295 }
296 296
297 if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) && 297 if (iwl_legacy_queue_space(q) > q->low_mark && (txq_id >= 0) &&
298 (txq_id != IWL39_CMD_QUEUE_NUM) && 298 (txq_id != IWL39_CMD_QUEUE_NUM) &&
299 priv->mac80211_registered) 299 priv->mac80211_registered)
300 iwl_wake_queue(priv, txq); 300 iwl_legacy_wake_queue(priv, txq);
301} 301}
302 302
303/** 303/**
@@ -317,7 +317,7 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
317 int rate_idx; 317 int rate_idx;
318 int fail; 318 int fail;
319 319
320 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) { 320 if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
321 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d " 321 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
322 "is out of range [0-%d] %d %d\n", txq_id, 322 "is out of range [0-%d] %d %d\n", txq_id,
323 index, txq->q.n_bd, txq->q.write_ptr, 323 index, txq->q.n_bd, txq->q.write_ptr,
@@ -363,12 +363,7 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
363 * RX handler implementations 363 * RX handler implementations
364 * 364 *
365 *****************************************************************************/ 365 *****************************************************************************/
366#ifdef CONFIG_IWLWIFI_DEBUGFS 366#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
367/*
368 * based on the assumption of all statistics counter are in DWORD
369 * FIXME: This function is for debugging, do not deal with
370 * the case of counters roll-over.
371 */
372static void iwl3945_accumulative_statistics(struct iwl_priv *priv, 367static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
373 __le32 *stats) 368 __le32 *stats)
374{ 369{
@@ -410,10 +405,10 @@ void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
410 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", 405 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
411 (int)sizeof(struct iwl3945_notif_statistics), 406 (int)sizeof(struct iwl3945_notif_statistics),
412 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); 407 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
413#ifdef CONFIG_IWLWIFI_DEBUGFS 408#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
414 iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw); 409 iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw);
415#endif 410#endif
416 iwl_recover_from_statistics(priv, pkt); 411 iwl_legacy_recover_from_statistics(priv, pkt);
417 412
418 memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics)); 413 memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics));
419} 414}
@@ -425,7 +420,7 @@ void iwl3945_reply_statistics(struct iwl_priv *priv,
425 __le32 *flag = (__le32 *)&pkt->u.raw; 420 __le32 *flag = (__le32 *)&pkt->u.raw;
426 421
427 if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) { 422 if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) {
428#ifdef CONFIG_IWLWIFI_DEBUGFS 423#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
429 memset(&priv->_3945.accum_statistics, 0, 424 memset(&priv->_3945.accum_statistics, 0,
430 sizeof(struct iwl3945_notif_statistics)); 425 sizeof(struct iwl3945_notif_statistics));
431 memset(&priv->_3945.delta_statistics, 0, 426 memset(&priv->_3945.delta_statistics, 0,
@@ -496,14 +491,14 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
496 } 491 }
497 492
498 if (!iwl3945_mod_params.sw_crypto) 493 if (!iwl3945_mod_params.sw_crypto)
499 iwl_set_decrypted_flag(priv, 494 iwl_legacy_set_decrypted_flag(priv,
500 (struct ieee80211_hdr *)rxb_addr(rxb), 495 (struct ieee80211_hdr *)rxb_addr(rxb),
501 le32_to_cpu(rx_end->status), stats); 496 le32_to_cpu(rx_end->status), stats);
502 497
503 skb_add_rx_frag(skb, 0, rxb->page, 498 skb_add_rx_frag(skb, 0, rxb->page,
504 (void *)rx_hdr->payload - (void *)pkt, len); 499 (void *)rx_hdr->payload - (void *)pkt, len);
505 500
506 iwl_update_stats(priv, false, fc, len); 501 iwl_legacy_update_stats(priv, false, fc, len);
507 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); 502 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
508 503
509 ieee80211_rx(priv->hw, skb); 504 ieee80211_rx(priv->hw, skb);
@@ -528,10 +523,11 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
528 523
529 rx_status.flag = 0; 524 rx_status.flag = 0;
530 rx_status.mactime = le64_to_cpu(rx_end->timestamp); 525 rx_status.mactime = le64_to_cpu(rx_end->timestamp);
531 rx_status.freq =
532 ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel));
533 rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? 526 rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
534 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; 527 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
528 rx_status.freq =
529 ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
530 rx_status.band);
535 531
536 rx_status.rate_idx = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate); 532 rx_status.rate_idx = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate);
537 if (rx_status.band == IEEE80211_BAND_5GHZ) 533 if (rx_status.band == IEEE80211_BAND_5GHZ)
@@ -575,7 +571,8 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
575 rx_status.signal, rx_status.signal, 571 rx_status.signal, rx_status.signal,
576 rx_status.rate_idx); 572 rx_status.rate_idx);
577 573
578 iwl_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len), header); 574 iwl_legacy_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len),
575 header);
579 576
580 if (network_packet) { 577 if (network_packet) {
581 priv->_3945.last_beacon_time = 578 priv->_3945.last_beacon_time =
@@ -695,8 +692,7 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
695 692
696 /* We need to figure out how to get the sta->supp_rates while 693 /* We need to figure out how to get the sta->supp_rates while
697 * in this running context */ 694 * in this running context */
698 rate_mask = IWL_RATES_MASK; 695 rate_mask = IWL_RATES_MASK_3945;
699
700 696
701 /* Set retry limit on DATA packets and Probe Responses*/ 697 /* Set retry limit on DATA packets and Probe Responses*/
702 if (ieee80211_is_probe_resp(fc)) 698 if (ieee80211_is_probe_resp(fc))
@@ -744,7 +740,7 @@ static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate)
744 station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK; 740 station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
745 station->sta.rate_n_flags = cpu_to_le16(tx_rate); 741 station->sta.rate_n_flags = cpu_to_le16(tx_rate);
746 station->sta.mode = STA_CONTROL_MODIFY_MSK; 742 station->sta.mode = STA_CONTROL_MODIFY_MSK;
747 iwl_send_add_sta(priv, &station->sta, CMD_ASYNC); 743 iwl_legacy_send_add_sta(priv, &station->sta, CMD_ASYNC);
748 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 744 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
749 745
750 IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n", 746 IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n",
@@ -759,7 +755,7 @@ static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
759 * to set power to V_AUX, do 755 * to set power to V_AUX, do
760 756
761 if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) { 757 if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) {
762 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, 758 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
763 APMG_PS_CTRL_VAL_PWR_SRC_VAUX, 759 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
764 ~APMG_PS_CTRL_MSK_PWR_SRC); 760 ~APMG_PS_CTRL_MSK_PWR_SRC);
765 761
@@ -769,7 +765,7 @@ static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
769 } 765 }
770 */ 766 */
771 767
772 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, 768 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
773 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, 769 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
774 ~APMG_PS_CTRL_MSK_PWR_SRC); 770 ~APMG_PS_CTRL_MSK_PWR_SRC);
775 771
@@ -779,10 +775,11 @@ static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
779 775
780static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) 776static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
781{ 777{
782 iwl_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma); 778 iwl_legacy_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
783 iwl_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma); 779 iwl_legacy_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0),
784 iwl_write_direct32(priv, FH39_RCSR_WPTR(0), 0); 780 rxq->rb_stts_dma);
785 iwl_write_direct32(priv, FH39_RCSR_CONFIG(0), 781 iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
782 iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0),
786 FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE | 783 FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
787 FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE | 784 FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
788 FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN | 785 FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
@@ -793,7 +790,7 @@ static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
793 FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH); 790 FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
794 791
795 /* fake read to flush all prev I/O */ 792 /* fake read to flush all prev I/O */
796 iwl_read_direct32(priv, FH39_RSSR_CTRL); 793 iwl_legacy_read_direct32(priv, FH39_RSSR_CTRL);
797 794
798 return 0; 795 return 0;
799} 796}
@@ -802,23 +799,23 @@ static int iwl3945_tx_reset(struct iwl_priv *priv)
802{ 799{
803 800
804 /* bypass mode */ 801 /* bypass mode */
805 iwl_write_prph(priv, ALM_SCD_MODE_REG, 0x2); 802 iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0x2);
806 803
807 /* RA 0 is active */ 804 /* RA 0 is active */
808 iwl_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01); 805 iwl_legacy_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01);
809 806
810 /* all 6 fifo are active */ 807 /* all 6 fifo are active */
811 iwl_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f); 808 iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f);
812 809
813 iwl_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000); 810 iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
814 iwl_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002); 811 iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
815 iwl_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004); 812 iwl_legacy_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004);
816 iwl_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005); 813 iwl_legacy_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005);
817 814
818 iwl_write_direct32(priv, FH39_TSSR_CBB_BASE, 815 iwl_legacy_write_direct32(priv, FH39_TSSR_CBB_BASE,
819 priv->_3945.shared_phys); 816 priv->_3945.shared_phys);
820 817
821 iwl_write_direct32(priv, FH39_TSSR_MSG_CONFIG, 818 iwl_legacy_write_direct32(priv, FH39_TSSR_MSG_CONFIG,
822 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON | 819 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
823 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON | 820 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
824 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B | 821 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
@@ -844,7 +841,7 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
844 iwl3945_hw_txq_ctx_free(priv); 841 iwl3945_hw_txq_ctx_free(priv);
845 842
846 /* allocate tx queue structure */ 843 /* allocate tx queue structure */
847 rc = iwl_alloc_txq_mem(priv); 844 rc = iwl_legacy_alloc_txq_mem(priv);
848 if (rc) 845 if (rc)
849 return rc; 846 return rc;
850 847
@@ -857,8 +854,8 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
857 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 854 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
858 slots_num = (txq_id == IWL39_CMD_QUEUE_NUM) ? 855 slots_num = (txq_id == IWL39_CMD_QUEUE_NUM) ?
859 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 856 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
860 rc = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, 857 rc = iwl_legacy_tx_queue_init(priv, &priv->txq[txq_id],
861 txq_id); 858 slots_num, txq_id);
862 if (rc) { 859 if (rc) {
863 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); 860 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
864 goto error; 861 goto error;
@@ -875,21 +872,23 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
875 872
876/* 873/*
877 * Start up 3945's basic functionality after it has been reset 874 * Start up 3945's basic functionality after it has been reset
878 * (e.g. after platform boot, or shutdown via iwl_apm_stop()) 875 * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
879 * NOTE: This does not load uCode nor start the embedded processor 876 * NOTE: This does not load uCode nor start the embedded processor
880 */ 877 */
881static int iwl3945_apm_init(struct iwl_priv *priv) 878static int iwl3945_apm_init(struct iwl_priv *priv)
882{ 879{
883 int ret = iwl_apm_init(priv); 880 int ret = iwl_legacy_apm_init(priv);
884 881
885 /* Clear APMG (NIC's internal power management) interrupts */ 882 /* Clear APMG (NIC's internal power management) interrupts */
886 iwl_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0); 883 iwl_legacy_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
887 iwl_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF); 884 iwl_legacy_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
888 885
889 /* Reset radio chip */ 886 /* Reset radio chip */
890 iwl_set_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ); 887 iwl_legacy_set_bits_prph(priv, APMG_PS_CTRL_REG,
888 APMG_PS_CTRL_VAL_RESET_REQ);
891 udelay(5); 889 udelay(5);
892 iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ); 890 iwl_legacy_clear_bits_prph(priv, APMG_PS_CTRL_REG,
891 APMG_PS_CTRL_VAL_RESET_REQ);
893 892
894 return ret; 893 return ret;
895} 894}
@@ -898,30 +897,28 @@ static void iwl3945_nic_config(struct iwl_priv *priv)
898{ 897{
899 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; 898 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
900 unsigned long flags; 899 unsigned long flags;
901 u8 rev_id = 0; 900 u8 rev_id = priv->pci_dev->revision;
902 901
903 spin_lock_irqsave(&priv->lock, flags); 902 spin_lock_irqsave(&priv->lock, flags);
904 903
905 /* Determine HW type */ 904 /* Determine HW type */
906 pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id);
907
908 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id); 905 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
909 906
910 if (rev_id & PCI_CFG_REV_ID_BIT_RTP) 907 if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
911 IWL_DEBUG_INFO(priv, "RTP type\n"); 908 IWL_DEBUG_INFO(priv, "RTP type\n");
912 else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) { 909 else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
913 IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n"); 910 IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n");
914 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 911 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
915 CSR39_HW_IF_CONFIG_REG_BIT_3945_MB); 912 CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
916 } else { 913 } else {
917 IWL_DEBUG_INFO(priv, "3945 RADIO-MM type\n"); 914 IWL_DEBUG_INFO(priv, "3945 RADIO-MM type\n");
918 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 915 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
919 CSR39_HW_IF_CONFIG_REG_BIT_3945_MM); 916 CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
920 } 917 }
921 918
922 if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) { 919 if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
923 IWL_DEBUG_INFO(priv, "SKU OP mode is mrc\n"); 920 IWL_DEBUG_INFO(priv, "SKU OP mode is mrc\n");
924 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 921 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
925 CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC); 922 CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
926 } else 923 } else
927 IWL_DEBUG_INFO(priv, "SKU OP mode is basic\n"); 924 IWL_DEBUG_INFO(priv, "SKU OP mode is basic\n");
@@ -929,24 +926,24 @@ static void iwl3945_nic_config(struct iwl_priv *priv)
929 if ((eeprom->board_revision & 0xF0) == 0xD0) { 926 if ((eeprom->board_revision & 0xF0) == 0xD0) {
930 IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n", 927 IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
931 eeprom->board_revision); 928 eeprom->board_revision);
932 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 929 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
933 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE); 930 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
934 } else { 931 } else {
935 IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n", 932 IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
936 eeprom->board_revision); 933 eeprom->board_revision);
937 iwl_clear_bit(priv, CSR_HW_IF_CONFIG_REG, 934 iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
938 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE); 935 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
939 } 936 }
940 937
941 if (eeprom->almgor_m_version <= 1) { 938 if (eeprom->almgor_m_version <= 1) {
942 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 939 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
943 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A); 940 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
944 IWL_DEBUG_INFO(priv, "Card M type A version is 0x%X\n", 941 IWL_DEBUG_INFO(priv, "Card M type A version is 0x%X\n",
945 eeprom->almgor_m_version); 942 eeprom->almgor_m_version);
946 } else { 943 } else {
947 IWL_DEBUG_INFO(priv, "Card M type B version is 0x%X\n", 944 IWL_DEBUG_INFO(priv, "Card M type B version is 0x%X\n",
948 eeprom->almgor_m_version); 945 eeprom->almgor_m_version);
949 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 946 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
950 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B); 947 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
951 } 948 }
952 spin_unlock_irqrestore(&priv->lock, flags); 949 spin_unlock_irqrestore(&priv->lock, flags);
@@ -974,7 +971,7 @@ int iwl3945_hw_nic_init(struct iwl_priv *priv)
974 971
975 /* Allocate the RX queue, or reset if it is already allocated */ 972 /* Allocate the RX queue, or reset if it is already allocated */
976 if (!rxq->bd) { 973 if (!rxq->bd) {
977 rc = iwl_rx_queue_alloc(priv); 974 rc = iwl_legacy_rx_queue_alloc(priv);
978 if (rc) { 975 if (rc) {
979 IWL_ERR(priv, "Unable to initialize Rx queue\n"); 976 IWL_ERR(priv, "Unable to initialize Rx queue\n");
980 return -ENOMEM; 977 return -ENOMEM;
@@ -989,10 +986,10 @@ int iwl3945_hw_nic_init(struct iwl_priv *priv)
989 986
990 /* Look at using this instead: 987 /* Look at using this instead:
991 rxq->need_update = 1; 988 rxq->need_update = 1;
992 iwl_rx_queue_update_write_ptr(priv, rxq); 989 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
993 */ 990 */
994 991
995 iwl_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7); 992 iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7);
996 993
997 rc = iwl3945_txq_ctx_reset(priv); 994 rc = iwl3945_txq_ctx_reset(priv);
998 if (rc) 995 if (rc)
@@ -1017,12 +1014,12 @@ void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
1017 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; 1014 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
1018 txq_id++) 1015 txq_id++)
1019 if (txq_id == IWL39_CMD_QUEUE_NUM) 1016 if (txq_id == IWL39_CMD_QUEUE_NUM)
1020 iwl_cmd_queue_free(priv); 1017 iwl_legacy_cmd_queue_free(priv);
1021 else 1018 else
1022 iwl_tx_queue_free(priv, txq_id); 1019 iwl_legacy_tx_queue_free(priv, txq_id);
1023 1020
1024 /* free tx queue structure */ 1021 /* free tx queue structure */
1025 iwl_free_txq_mem(priv); 1022 iwl_legacy_txq_mem(priv);
1026} 1023}
1027 1024
1028void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv) 1025void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
@@ -1030,12 +1027,12 @@ void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
1030 int txq_id; 1027 int txq_id;
1031 1028
1032 /* stop SCD */ 1029 /* stop SCD */
1033 iwl_write_prph(priv, ALM_SCD_MODE_REG, 0); 1030 iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0);
1034 iwl_write_prph(priv, ALM_SCD_TXFACT_REG, 0); 1031 iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0);
1035 1032
1036 /* reset TFD queues */ 1033 /* reset TFD queues */
1037 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 1034 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
1038 iwl_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0); 1035 iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0);
1039 iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS, 1036 iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS,
1040 FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id), 1037 FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
1041 1000); 1038 1000);
@@ -1102,12 +1099,12 @@ static int iwl3945_hw_reg_txpower_get_temperature(struct iwl_priv *priv)
1102#define IWL_TEMPERATURE_LIMIT_TIMER 6 1099#define IWL_TEMPERATURE_LIMIT_TIMER 6
1103 1100
1104/** 1101/**
1105 * is_temp_calib_needed - determines if new calibration is needed 1102 * iwl3945_is_temp_calib_needed - determines if new calibration is needed
1106 * 1103 *
1107 * records new temperature in tx_mgr->temperature. 1104 * records new temperature in tx_mgr->temperature.
1108 * replaces tx_mgr->last_temperature *only* if calib needed 1105 * replaces tx_mgr->last_temperature *only* if calib needed
1109 * (assumes caller will actually do the calibration!). */ 1106 * (assumes caller will actually do the calibration!). */
1110static int is_temp_calib_needed(struct iwl_priv *priv) 1107static int iwl3945_is_temp_calib_needed(struct iwl_priv *priv)
1111{ 1108{
1112 int temp_diff; 1109 int temp_diff;
1113 1110
@@ -1338,9 +1335,6 @@ static void iwl3945_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_in
1338 * based on eeprom channel data) for this channel. */ 1335 * based on eeprom channel data) for this channel. */
1339 power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX_TABLE]); 1336 power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX_TABLE]);
1340 1337
1341 /* further limit to user's max power preference.
1342 * FIXME: Other spectrum management power limitations do not
1343 * seem to apply?? */
1344 power = min(power, priv->tx_power_user_lmt); 1338 power = min(power, priv->tx_power_user_lmt);
1345 scan_power_info->requested_power = power; 1339 scan_power_info->requested_power = power;
1346 1340
@@ -1394,7 +1388,7 @@ static int iwl3945_send_tx_power(struct iwl_priv *priv)
1394 chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel); 1388 chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel);
1395 1389
1396 txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1; 1390 txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
1397 ch_info = iwl_get_channel_info(priv, priv->band, chan); 1391 ch_info = iwl_legacy_get_channel_info(priv, priv->band, chan);
1398 if (!ch_info) { 1392 if (!ch_info) {
1399 IWL_ERR(priv, 1393 IWL_ERR(priv,
1400 "Failed to get channel info for channel %d [%d]\n", 1394 "Failed to get channel info for channel %d [%d]\n",
@@ -1402,7 +1396,7 @@ static int iwl3945_send_tx_power(struct iwl_priv *priv)
1402 return -EINVAL; 1396 return -EINVAL;
1403 } 1397 }
1404 1398
1405 if (!is_channel_valid(ch_info)) { 1399 if (!iwl_legacy_is_channel_valid(ch_info)) {
1406 IWL_DEBUG_POWER(priv, "Not calling TX_PWR_TABLE_CMD on " 1400 IWL_DEBUG_POWER(priv, "Not calling TX_PWR_TABLE_CMD on "
1407 "non-Tx channel.\n"); 1401 "non-Tx channel.\n");
1408 return 0; 1402 return 0;
@@ -1437,7 +1431,7 @@ static int iwl3945_send_tx_power(struct iwl_priv *priv)
1437 txpower.power[i].rate); 1431 txpower.power[i].rate);
1438 } 1432 }
1439 1433
1440 return iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, 1434 return iwl_legacy_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD,
1441 sizeof(struct iwl3945_txpowertable_cmd), 1435 sizeof(struct iwl3945_txpowertable_cmd),
1442 &txpower); 1436 &txpower);
1443 1437
@@ -1571,7 +1565,7 @@ static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
1571 /* set up new Tx power info for each and every channel, 2.4 and 5.x */ 1565 /* set up new Tx power info for each and every channel, 2.4 and 5.x */
1572 for (i = 0; i < priv->channel_count; i++) { 1566 for (i = 0; i < priv->channel_count; i++) {
1573 ch_info = &priv->channel_info[i]; 1567 ch_info = &priv->channel_info[i];
1574 a_band = is_channel_a_band(ch_info); 1568 a_band = iwl_legacy_is_channel_a_band(ch_info);
1575 1569
1576 /* Get this chnlgrp's factory calibration temperature */ 1570 /* Get this chnlgrp's factory calibration temperature */
1577 ref_temp = (s16)eeprom->groups[ch_info->group_index]. 1571 ref_temp = (s16)eeprom->groups[ch_info->group_index].
@@ -1583,7 +1577,7 @@ static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
1583 ref_temp); 1577 ref_temp);
1584 1578
1585 /* set tx power value for all rates, OFDM and CCK */ 1579 /* set tx power value for all rates, OFDM and CCK */
1586 for (rate_index = 0; rate_index < IWL_RATE_COUNT; 1580 for (rate_index = 0; rate_index < IWL_RATE_COUNT_3945;
1587 rate_index++) { 1581 rate_index++) {
1588 int power_idx = 1582 int power_idx =
1589 ch_info->power_info[rate_index].base_power_index; 1583 ch_info->power_info[rate_index].base_power_index;
@@ -1637,7 +1631,7 @@ int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
1637 1631
1638 for (i = 0; i < priv->channel_count; i++) { 1632 for (i = 0; i < priv->channel_count; i++) {
1639 ch_info = &priv->channel_info[i]; 1633 ch_info = &priv->channel_info[i];
1640 a_band = is_channel_a_band(ch_info); 1634 a_band = iwl_legacy_is_channel_a_band(ch_info);
1641 1635
1642 /* find minimum power of all user and regulatory constraints 1636 /* find minimum power of all user and regulatory constraints
1643 * (does not consider h/w clipping limitations) */ 1637 * (does not consider h/w clipping limitations) */
@@ -1653,7 +1647,7 @@ int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
1653 1647
1654 /* update txpower settings for all channels, 1648 /* update txpower settings for all channels,
1655 * send to NIC if associated. */ 1649 * send to NIC if associated. */
1656 is_temp_calib_needed(priv); 1650 iwl3945_is_temp_calib_needed(priv);
1657 iwl3945_hw_reg_comp_txpower_temp(priv); 1651 iwl3945_hw_reg_comp_txpower_temp(priv);
1658 1652
1659 return 0; 1653 return 0;
@@ -1671,8 +1665,8 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
1671 .flags = CMD_WANT_SKB, 1665 .flags = CMD_WANT_SKB,
1672 .data = &rxon_assoc, 1666 .data = &rxon_assoc,
1673 }; 1667 };
1674 const struct iwl_rxon_cmd *rxon1 = &ctx->staging; 1668 const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
1675 const struct iwl_rxon_cmd *rxon2 = &ctx->active; 1669 const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
1676 1670
1677 if ((rxon1->flags == rxon2->flags) && 1671 if ((rxon1->flags == rxon2->flags) &&
1678 (rxon1->filter_flags == rxon2->filter_flags) && 1672 (rxon1->filter_flags == rxon2->filter_flags) &&
@@ -1688,7 +1682,7 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
1688 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates; 1682 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
1689 rxon_assoc.reserved = 0; 1683 rxon_assoc.reserved = 0;
1690 1684
1691 rc = iwl_send_cmd_sync(priv, &cmd); 1685 rc = iwl_legacy_send_cmd_sync(priv, &cmd);
1692 if (rc) 1686 if (rc)
1693 return rc; 1687 return rc;
1694 1688
@@ -1698,7 +1692,7 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
1698 rc = -EIO; 1692 rc = -EIO;
1699 } 1693 }
1700 1694
1701 iwl_free_pages(priv, cmd.reply_page); 1695 iwl_legacy_free_pages(priv, cmd.reply_page);
1702 1696
1703 return rc; 1697 return rc;
1704} 1698}
@@ -1722,7 +1716,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1722 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 1716 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1723 return -EINVAL; 1717 return -EINVAL;
1724 1718
1725 if (!iwl_is_alive(priv)) 1719 if (!iwl_legacy_is_alive(priv))
1726 return -1; 1720 return -1;
1727 1721
1728 /* always get timestamp with Rx frame */ 1722 /* always get timestamp with Rx frame */
@@ -1733,7 +1727,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1733 ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK); 1727 ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
1734 staging_rxon->flags |= iwl3945_get_antenna_flags(priv); 1728 staging_rxon->flags |= iwl3945_get_antenna_flags(priv);
1735 1729
1736 rc = iwl_check_rxon_cmd(priv, ctx); 1730 rc = iwl_legacy_check_rxon_cmd(priv, ctx);
1737 if (rc) { 1731 if (rc) {
1738 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n"); 1732 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
1739 return -EINVAL; 1733 return -EINVAL;
@@ -1742,8 +1736,9 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1742 /* If we don't need to send a full RXON, we can use 1736 /* If we don't need to send a full RXON, we can use
1743 * iwl3945_rxon_assoc_cmd which is used to reconfigure filter 1737 * iwl3945_rxon_assoc_cmd which is used to reconfigure filter
1744 * and other flags for the current radio configuration. */ 1738 * and other flags for the current radio configuration. */
1745 if (!iwl_full_rxon_required(priv, &priv->contexts[IWL_RXON_CTX_BSS])) { 1739 if (!iwl_legacy_full_rxon_required(priv,
1746 rc = iwl_send_rxon_assoc(priv, 1740 &priv->contexts[IWL_RXON_CTX_BSS])) {
1741 rc = iwl_legacy_send_rxon_assoc(priv,
1747 &priv->contexts[IWL_RXON_CTX_BSS]); 1742 &priv->contexts[IWL_RXON_CTX_BSS]);
1748 if (rc) { 1743 if (rc) {
1749 IWL_ERR(priv, "Error setting RXON_ASSOC " 1744 IWL_ERR(priv, "Error setting RXON_ASSOC "
@@ -1760,7 +1755,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1760 * an RXON_ASSOC and the new config wants the associated mask enabled, 1755 * an RXON_ASSOC and the new config wants the associated mask enabled,
1761 * we must clear the associated from the active configuration 1756 * we must clear the associated from the active configuration
1762 * before we apply the new config */ 1757 * before we apply the new config */
1763 if (iwl_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) { 1758 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) {
1764 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n"); 1759 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
1765 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 1760 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1766 1761
@@ -1770,7 +1765,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1770 */ 1765 */
1771 active_rxon->reserved4 = 0; 1766 active_rxon->reserved4 = 0;
1772 active_rxon->reserved5 = 0; 1767 active_rxon->reserved5 = 0;
1773 rc = iwl_send_cmd_pdu(priv, REPLY_RXON, 1768 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
1774 sizeof(struct iwl3945_rxon_cmd), 1769 sizeof(struct iwl3945_rxon_cmd),
1775 &priv->contexts[IWL_RXON_CTX_BSS].active); 1770 &priv->contexts[IWL_RXON_CTX_BSS].active);
1776 1771
@@ -1782,9 +1777,10 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1782 "configuration (%d).\n", rc); 1777 "configuration (%d).\n", rc);
1783 return rc; 1778 return rc;
1784 } 1779 }
1785 iwl_clear_ucode_stations(priv, 1780 iwl_legacy_clear_ucode_stations(priv,
1781 &priv->contexts[IWL_RXON_CTX_BSS]);
1782 iwl_legacy_restore_stations(priv,
1786 &priv->contexts[IWL_RXON_CTX_BSS]); 1783 &priv->contexts[IWL_RXON_CTX_BSS]);
1787 iwl_restore_stations(priv, &priv->contexts[IWL_RXON_CTX_BSS]);
1788 } 1784 }
1789 1785
1790 IWL_DEBUG_INFO(priv, "Sending RXON\n" 1786 IWL_DEBUG_INFO(priv, "Sending RXON\n"
@@ -1802,10 +1798,10 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1802 staging_rxon->reserved4 = 0; 1798 staging_rxon->reserved4 = 0;
1803 staging_rxon->reserved5 = 0; 1799 staging_rxon->reserved5 = 0;
1804 1800
1805 iwl_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto); 1801 iwl_legacy_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto);
1806 1802
1807 /* Apply the new configuration */ 1803 /* Apply the new configuration */
1808 rc = iwl_send_cmd_pdu(priv, REPLY_RXON, 1804 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
1809 sizeof(struct iwl3945_rxon_cmd), 1805 sizeof(struct iwl3945_rxon_cmd),
1810 staging_rxon); 1806 staging_rxon);
1811 if (rc) { 1807 if (rc) {
@@ -1816,14 +1812,15 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1816 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon)); 1812 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
1817 1813
1818 if (!new_assoc) { 1814 if (!new_assoc) {
1819 iwl_clear_ucode_stations(priv, 1815 iwl_legacy_clear_ucode_stations(priv,
1820 &priv->contexts[IWL_RXON_CTX_BSS]); 1816 &priv->contexts[IWL_RXON_CTX_BSS]);
1821 iwl_restore_stations(priv, &priv->contexts[IWL_RXON_CTX_BSS]); 1817 iwl_legacy_restore_stations(priv,
1818 &priv->contexts[IWL_RXON_CTX_BSS]);
1822 } 1819 }
1823 1820
1824 /* If we issue a new RXON command which required a tune then we must 1821 /* If we issue a new RXON command which required a tune then we must
1825 * send a new TXPOWER command or we won't be able to Tx any frames */ 1822 * send a new TXPOWER command or we won't be able to Tx any frames */
1826 rc = priv->cfg->ops->lib->send_tx_power(priv); 1823 rc = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
1827 if (rc) { 1824 if (rc) {
1828 IWL_ERR(priv, "Error setting Tx power (%d).\n", rc); 1825 IWL_ERR(priv, "Error setting Tx power (%d).\n", rc);
1829 return rc; 1826 return rc;
@@ -1853,7 +1850,7 @@ void iwl3945_reg_txpower_periodic(struct iwl_priv *priv)
1853{ 1850{
1854 /* This will kick in the "brute force" 1851 /* This will kick in the "brute force"
1855 * iwl3945_hw_reg_comp_txpower_temp() below */ 1852 * iwl3945_hw_reg_comp_txpower_temp() below */
1856 if (!is_temp_calib_needed(priv)) 1853 if (!iwl3945_is_temp_calib_needed(priv))
1857 goto reschedule; 1854 goto reschedule;
1858 1855
1859 /* Set up a new set of temp-adjusted TxPowers, send to NIC. 1856 /* Set up a new set of temp-adjusted TxPowers, send to NIC.
@@ -1900,7 +1897,7 @@ static u16 iwl3945_hw_reg_get_ch_grp_index(struct iwl_priv *priv,
1900 u8 grp_channel; 1897 u8 grp_channel;
1901 1898
1902 /* Find the group index for the channel ... don't use index 1(?) */ 1899 /* Find the group index for the channel ... don't use index 1(?) */
1903 if (is_channel_a_band(ch_info)) { 1900 if (iwl_legacy_is_channel_a_band(ch_info)) {
1904 for (group = 1; group < 5; group++) { 1901 for (group = 1; group < 5; group++) {
1905 grp_channel = ch_grp[group].group_channel; 1902 grp_channel = ch_grp[group].group_channel;
1906 if (ch_info->channel <= grp_channel) { 1903 if (ch_info->channel <= grp_channel) {
@@ -2080,8 +2077,8 @@ int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
2080 /* initialize Tx power info for each and every channel, 2.4 and 5.x */ 2077 /* initialize Tx power info for each and every channel, 2.4 and 5.x */
2081 for (i = 0, ch_info = priv->channel_info; i < priv->channel_count; 2078 for (i = 0, ch_info = priv->channel_info; i < priv->channel_count;
2082 i++, ch_info++) { 2079 i++, ch_info++) {
2083 a_band = is_channel_a_band(ch_info); 2080 a_band = iwl_legacy_is_channel_a_band(ch_info);
2084 if (!is_channel_valid(ch_info)) 2081 if (!iwl_legacy_is_channel_valid(ch_info))
2085 continue; 2082 continue;
2086 2083
2087 /* find this channel's channel group (*not* "band") index */ 2084 /* find this channel's channel group (*not* "band") index */
@@ -2184,7 +2181,7 @@ int iwl3945_hw_rxq_stop(struct iwl_priv *priv)
2184{ 2181{
2185 int rc; 2182 int rc;
2186 2183
2187 iwl_write_direct32(priv, FH39_RCSR_CONFIG(0), 0); 2184 iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0), 0);
2188 rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS, 2185 rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS,
2189 FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); 2186 FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
2190 if (rc < 0) 2187 if (rc < 0)
@@ -2201,10 +2198,10 @@ int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
2201 2198
2202 shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr); 2199 shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr);
2203 2200
2204 iwl_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0); 2201 iwl_legacy_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0);
2205 iwl_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0); 2202 iwl_legacy_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0);
2206 2203
2207 iwl_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 2204 iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id),
2208 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT | 2205 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
2209 FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF | 2206 FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
2210 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD | 2207 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
@@ -2233,7 +2230,8 @@ static u16 iwl3945_get_hcmd_size(u8 cmd_id, u16 len)
2233} 2230}
2234 2231
2235 2232
2236static u16 iwl3945_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data) 2233static u16 iwl3945_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
2234 u8 *data)
2237{ 2235{
2238 struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data; 2236 struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data;
2239 addsta->mode = cmd->mode; 2237 addsta->mode = cmd->mode;
@@ -2261,7 +2259,7 @@ static int iwl3945_add_bssid_station(struct iwl_priv *priv,
2261 if (sta_id_r) 2259 if (sta_id_r)
2262 *sta_id_r = IWL_INVALID_STATION; 2260 *sta_id_r = IWL_INVALID_STATION;
2263 2261
2264 ret = iwl_add_station_common(priv, ctx, addr, 0, NULL, &sta_id); 2262 ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
2265 if (ret) { 2263 if (ret) {
2266 IWL_ERR(priv, "Unable to add station %pM\n", addr); 2264 IWL_ERR(priv, "Unable to add station %pM\n", addr);
2267 return ret; 2265 return ret;
@@ -2296,7 +2294,7 @@ static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
2296 return 0; 2294 return 0;
2297 } 2295 }
2298 2296
2299 return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id, 2297 return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
2300 vif->bss_conf.bssid); 2298 vif->bss_conf.bssid);
2301} 2299}
2302 2300
@@ -2347,7 +2345,7 @@ int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
2347 * 1M CCK rates */ 2345 * 1M CCK rates */
2348 2346
2349 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) && 2347 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
2350 iwl_is_associated(priv, IWL_RXON_CTX_BSS)) { 2348 iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
2351 2349
2352 index = IWL_FIRST_CCK_RATE; 2350 index = IWL_FIRST_CCK_RATE;
2353 for (i = IWL_RATE_6M_INDEX_TABLE; 2351 for (i = IWL_RATE_6M_INDEX_TABLE;
@@ -2368,14 +2366,14 @@ int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
2368 2366
2369 /* Update the rate scaling for control frame Tx */ 2367 /* Update the rate scaling for control frame Tx */
2370 rate_cmd.table_id = 0; 2368 rate_cmd.table_id = 0;
2371 rc = iwl_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd), 2369 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
2372 &rate_cmd); 2370 &rate_cmd);
2373 if (rc) 2371 if (rc)
2374 return rc; 2372 return rc;
2375 2373
2376 /* Update the rate scaling for data frame Tx */ 2374 /* Update the rate scaling for data frame Tx */
2377 rate_cmd.table_id = 1; 2375 rate_cmd.table_id = 1;
2378 return iwl_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd), 2376 return iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
2379 &rate_cmd); 2377 &rate_cmd);
2380} 2378}
2381 2379
@@ -2475,11 +2473,11 @@ static int iwl3945_verify_bsm(struct iwl_priv *priv)
2475 IWL_DEBUG_INFO(priv, "Begin verify bsm\n"); 2473 IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
2476 2474
2477 /* verify BSM SRAM contents */ 2475 /* verify BSM SRAM contents */
2478 val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG); 2476 val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
2479 for (reg = BSM_SRAM_LOWER_BOUND; 2477 for (reg = BSM_SRAM_LOWER_BOUND;
2480 reg < BSM_SRAM_LOWER_BOUND + len; 2478 reg < BSM_SRAM_LOWER_BOUND + len;
2481 reg += sizeof(u32), image++) { 2479 reg += sizeof(u32), image++) {
2482 val = iwl_read_prph(priv, reg); 2480 val = iwl_legacy_read_prph(priv, reg);
2483 if (val != le32_to_cpu(*image)) { 2481 if (val != le32_to_cpu(*image)) {
2484 IWL_ERR(priv, "BSM uCode verification failed at " 2482 IWL_ERR(priv, "BSM uCode verification failed at "
2485 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n", 2483 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
@@ -2512,7 +2510,7 @@ static int iwl3945_verify_bsm(struct iwl_priv *priv)
2512 */ 2510 */
2513static int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv) 2511static int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv)
2514{ 2512{
2515 _iwl_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK); 2513 _iwl_legacy_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
2516 return 0; 2514 return 0;
2517} 2515}
2518 2516
@@ -2583,16 +2581,16 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
2583 inst_len = priv->ucode_init.len; 2581 inst_len = priv->ucode_init.len;
2584 data_len = priv->ucode_init_data.len; 2582 data_len = priv->ucode_init_data.len;
2585 2583
2586 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); 2584 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
2587 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); 2585 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
2588 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); 2586 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
2589 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); 2587 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
2590 2588
2591 /* Fill BSM memory with bootstrap instructions */ 2589 /* Fill BSM memory with bootstrap instructions */
2592 for (reg_offset = BSM_SRAM_LOWER_BOUND; 2590 for (reg_offset = BSM_SRAM_LOWER_BOUND;
2593 reg_offset < BSM_SRAM_LOWER_BOUND + len; 2591 reg_offset < BSM_SRAM_LOWER_BOUND + len;
2594 reg_offset += sizeof(u32), image++) 2592 reg_offset += sizeof(u32), image++)
2595 _iwl_write_prph(priv, reg_offset, 2593 _iwl_legacy_write_prph(priv, reg_offset,
2596 le32_to_cpu(*image)); 2594 le32_to_cpu(*image));
2597 2595
2598 rc = iwl3945_verify_bsm(priv); 2596 rc = iwl3945_verify_bsm(priv);
@@ -2600,19 +2598,19 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
2600 return rc; 2598 return rc;
2601 2599
2602 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ 2600 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
2603 iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0); 2601 iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
2604 iwl_write_prph(priv, BSM_WR_MEM_DST_REG, 2602 iwl_legacy_write_prph(priv, BSM_WR_MEM_DST_REG,
2605 IWL39_RTC_INST_LOWER_BOUND); 2603 IWL39_RTC_INST_LOWER_BOUND);
2606 iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); 2604 iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
2607 2605
2608 /* Load bootstrap code into instruction SRAM now, 2606 /* Load bootstrap code into instruction SRAM now,
2609 * to prepare to load "initialize" uCode */ 2607 * to prepare to load "initialize" uCode */
2610 iwl_write_prph(priv, BSM_WR_CTRL_REG, 2608 iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
2611 BSM_WR_CTRL_REG_BIT_START); 2609 BSM_WR_CTRL_REG_BIT_START);
2612 2610
2613 /* Wait for load of bootstrap uCode to finish */ 2611 /* Wait for load of bootstrap uCode to finish */
2614 for (i = 0; i < 100; i++) { 2612 for (i = 0; i < 100; i++) {
2615 done = iwl_read_prph(priv, BSM_WR_CTRL_REG); 2613 done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
2616 if (!(done & BSM_WR_CTRL_REG_BIT_START)) 2614 if (!(done & BSM_WR_CTRL_REG_BIT_START))
2617 break; 2615 break;
2618 udelay(10); 2616 udelay(10);
@@ -2626,7 +2624,7 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
2626 2624
2627 /* Enable future boot loads whenever power management unit triggers it 2625 /* Enable future boot loads whenever power management unit triggers it
2628 * (e.g. when powering back up after power-save shutdown) */ 2626 * (e.g. when powering back up after power-save shutdown) */
2629 iwl_write_prph(priv, BSM_WR_CTRL_REG, 2627 iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
2630 BSM_WR_CTRL_REG_BIT_START_EN); 2628 BSM_WR_CTRL_REG_BIT_START_EN);
2631 2629
2632 return 0; 2630 return 0;
@@ -2635,7 +2633,6 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
2635static struct iwl_hcmd_ops iwl3945_hcmd = { 2633static struct iwl_hcmd_ops iwl3945_hcmd = {
2636 .rxon_assoc = iwl3945_send_rxon_assoc, 2634 .rxon_assoc = iwl3945_send_rxon_assoc,
2637 .commit_rxon = iwl3945_commit_rxon, 2635 .commit_rxon = iwl3945_commit_rxon,
2638 .send_bt_config = iwl_send_bt_config,
2639}; 2636};
2640 2637
2641static struct iwl_lib_ops iwl3945_lib = { 2638static struct iwl_lib_ops iwl3945_lib = {
@@ -2661,13 +2658,9 @@ static struct iwl_lib_ops iwl3945_lib = {
2661 }, 2658 },
2662 .acquire_semaphore = iwl3945_eeprom_acquire_semaphore, 2659 .acquire_semaphore = iwl3945_eeprom_acquire_semaphore,
2663 .release_semaphore = iwl3945_eeprom_release_semaphore, 2660 .release_semaphore = iwl3945_eeprom_release_semaphore,
2664 .query_addr = iwlcore_eeprom_query_addr,
2665 }, 2661 },
2666 .send_tx_power = iwl3945_send_tx_power, 2662 .send_tx_power = iwl3945_send_tx_power,
2667 .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr, 2663 .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr,
2668 .isr_ops = {
2669 .isr = iwl_isr_legacy,
2670 },
2671 2664
2672 .debugfs_ops = { 2665 .debugfs_ops = {
2673 .rx_stats_read = iwl3945_ucode_rx_stats_read, 2666 .rx_stats_read = iwl3945_ucode_rx_stats_read,
@@ -2685,7 +2678,6 @@ static const struct iwl_legacy_ops iwl3945_legacy_ops = {
2685static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = { 2678static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
2686 .get_hcmd_size = iwl3945_get_hcmd_size, 2679 .get_hcmd_size = iwl3945_get_hcmd_size,
2687 .build_addsta_hcmd = iwl3945_build_addsta_hcmd, 2680 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
2688 .tx_cmd_protection = iwl_legacy_tx_cmd_protection,
2689 .request_scan = iwl3945_request_scan, 2681 .request_scan = iwl3945_request_scan,
2690 .post_scan = iwl3945_post_scan, 2682 .post_scan = iwl3945_post_scan,
2691}; 2683};
@@ -2705,13 +2697,10 @@ static struct iwl_base_params iwl3945_base_params = {
2705 .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL, 2697 .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
2706 .set_l0s = false, 2698 .set_l0s = false,
2707 .use_bsm = true, 2699 .use_bsm = true,
2708 .use_isr_legacy = true,
2709 .led_compensation = 64, 2700 .led_compensation = 64,
2710 .broken_powersave = true,
2711 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 2701 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
2712 .wd_timeout = IWL_DEF_WD_TIMEOUT, 2702 .wd_timeout = IWL_DEF_WD_TIMEOUT,
2713 .max_event_log_size = 512, 2703 .max_event_log_size = 512,
2714 .tx_power_by_driver = true,
2715}; 2704};
2716 2705
2717static struct iwl_cfg iwl3945_bg_cfg = { 2706static struct iwl_cfg iwl3945_bg_cfg = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlegacy/iwl-3945.h
index 3eef1eb74a78..b118b59b71de 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlegacy/iwl-3945.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -108,7 +108,7 @@ struct iwl3945_rs_sta {
108 108
109/* 109/*
110 * The common struct MUST be first because it is shared between 110 * The common struct MUST be first because it is shared between
111 * 3945 and agn! 111 * 3945 and 4965!
112 */ 112 */
113struct iwl3945_sta_priv { 113struct iwl3945_sta_priv {
114 struct iwl_station_priv_common common; 114 struct iwl_station_priv_common common;
@@ -201,7 +201,7 @@ struct iwl3945_ibss_seq {
201 201
202/****************************************************************************** 202/******************************************************************************
203 * 203 *
204 * Functions implemented in iwl-base.c which are forward declared here 204 * Functions implemented in iwl3945-base.c which are forward declared here
205 * for use by iwl-*.c 205 * for use by iwl-*.c
206 * 206 *
207 *****************************************************************************/ 207 *****************************************************************************/
@@ -209,7 +209,7 @@ extern int iwl3945_calc_db_from_ratio(int sig_ratio);
209extern void iwl3945_rx_replenish(void *data); 209extern void iwl3945_rx_replenish(void *data);
210extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); 210extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
211extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv, 211extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
212 struct ieee80211_hdr *hdr,int left); 212 struct ieee80211_hdr *hdr, int left);
213extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log, 213extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
214 char **buf, bool display); 214 char **buf, bool display);
215extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv); 215extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
@@ -217,7 +217,7 @@ extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
217/****************************************************************************** 217/******************************************************************************
218 * 218 *
219 * Functions implemented in iwl-[34]*.c which are forward declared here 219 * Functions implemented in iwl-[34]*.c which are forward declared here
220 * for use by iwl-base.c 220 * for use by iwl3945-base.c
221 * 221 *
222 * NOTE: The implementation of these functions are hardware specific 222 * NOTE: The implementation of these functions are hardware specific
223 * which is why they are in the hardware specific files (vs. iwl-base.c) 223 * which is why they are in the hardware specific files (vs. iwl-base.c)
@@ -283,7 +283,7 @@ extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
283extern struct ieee80211_ops iwl3945_hw_ops; 283extern struct ieee80211_ops iwl3945_hw_ops;
284 284
285/* 285/*
286 * Forward declare iwl-3945.c functions for iwl-base.c 286 * Forward declare iwl-3945.c functions for iwl3945-base.c
287 */ 287 */
288extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv); 288extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv);
289extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv); 289extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.c b/drivers/net/wireless/iwlegacy/iwl-4965-calib.c
new file mode 100644
index 000000000000..81d6a25eb04f
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-calib.c
@@ -0,0 +1,967 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#include <linux/slab.h>
64#include <net/mac80211.h>
65
66#include "iwl-dev.h"
67#include "iwl-core.h"
68#include "iwl-4965-calib.h"
69
70/*****************************************************************************
71 * INIT calibrations framework
72 *****************************************************************************/
73
74struct statistics_general_data {
75 u32 beacon_silence_rssi_a;
76 u32 beacon_silence_rssi_b;
77 u32 beacon_silence_rssi_c;
78 u32 beacon_energy_a;
79 u32 beacon_energy_b;
80 u32 beacon_energy_c;
81};
82
83void iwl4965_calib_free_results(struct iwl_priv *priv)
84{
85 int i;
86
87 for (i = 0; i < IWL_CALIB_MAX; i++) {
88 kfree(priv->calib_results[i].buf);
89 priv->calib_results[i].buf = NULL;
90 priv->calib_results[i].buf_len = 0;
91 }
92}
93
94/*****************************************************************************
95 * RUNTIME calibrations framework
96 *****************************************************************************/
97
98/* "false alarms" are signals that our DSP tries to lock onto,
99 * but then determines that they are either noise, or transmissions
100 * from a distant wireless network (also "noise", really) that get
101 * "stepped on" by stronger transmissions within our own network.
102 * This algorithm attempts to set a sensitivity level that is high
103 * enough to receive all of our own network traffic, but not so
104 * high that our DSP gets too busy trying to lock onto non-network
105 * activity/noise. */
106static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
107 u32 norm_fa,
108 u32 rx_enable_time,
109 struct statistics_general_data *rx_info)
110{
111 u32 max_nrg_cck = 0;
112 int i = 0;
113 u8 max_silence_rssi = 0;
114 u32 silence_ref = 0;
115 u8 silence_rssi_a = 0;
116 u8 silence_rssi_b = 0;
117 u8 silence_rssi_c = 0;
118 u32 val;
119
120 /* "false_alarms" values below are cross-multiplications to assess the
121 * numbers of false alarms within the measured period of actual Rx
122 * (Rx is off when we're txing), vs the min/max expected false alarms
123 * (some should be expected if rx is sensitive enough) in a
124 * hypothetical listening period of 200 time units (TU), 204.8 msec:
125 *
126 * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
127 *
128 * */
129 u32 false_alarms = norm_fa * 200 * 1024;
130 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
131 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
132 struct iwl_sensitivity_data *data = NULL;
133 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
134
135 data = &(priv->sensitivity_data);
136
137 data->nrg_auto_corr_silence_diff = 0;
138
139 /* Find max silence rssi among all 3 receivers.
140 * This is background noise, which may include transmissions from other
141 * networks, measured during silence before our network's beacon */
142 silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
143 ALL_BAND_FILTER) >> 8);
144 silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
145 ALL_BAND_FILTER) >> 8);
146 silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
147 ALL_BAND_FILTER) >> 8);
148
149 val = max(silence_rssi_b, silence_rssi_c);
150 max_silence_rssi = max(silence_rssi_a, (u8) val);
151
152 /* Store silence rssi in 20-beacon history table */
153 data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
154 data->nrg_silence_idx++;
155 if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
156 data->nrg_silence_idx = 0;
157
158 /* Find max silence rssi across 20 beacon history */
159 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
160 val = data->nrg_silence_rssi[i];
161 silence_ref = max(silence_ref, val);
162 }
163 IWL_DEBUG_CALIB(priv, "silence a %u, b %u, c %u, 20-bcn max %u\n",
164 silence_rssi_a, silence_rssi_b, silence_rssi_c,
165 silence_ref);
166
167 /* Find max rx energy (min value!) among all 3 receivers,
168 * measured during beacon frame.
169 * Save it in 10-beacon history table. */
170 i = data->nrg_energy_idx;
171 val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
172 data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
173
174 data->nrg_energy_idx++;
175 if (data->nrg_energy_idx >= 10)
176 data->nrg_energy_idx = 0;
177
178 /* Find min rx energy (max value) across 10 beacon history.
179 * This is the minimum signal level that we want to receive well.
180 * Add backoff (margin so we don't miss slightly lower energy frames).
181 * This establishes an upper bound (min value) for energy threshold. */
182 max_nrg_cck = data->nrg_value[0];
183 for (i = 1; i < 10; i++)
184 max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
185 max_nrg_cck += 6;
186
187 IWL_DEBUG_CALIB(priv, "rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
188 rx_info->beacon_energy_a, rx_info->beacon_energy_b,
189 rx_info->beacon_energy_c, max_nrg_cck - 6);
190
191 /* Count number of consecutive beacons with fewer-than-desired
192 * false alarms. */
193 if (false_alarms < min_false_alarms)
194 data->num_in_cck_no_fa++;
195 else
196 data->num_in_cck_no_fa = 0;
197 IWL_DEBUG_CALIB(priv, "consecutive bcns with few false alarms = %u\n",
198 data->num_in_cck_no_fa);
199
200 /* If we got too many false alarms this time, reduce sensitivity */
201 if ((false_alarms > max_false_alarms) &&
202 (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) {
203 IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u\n",
204 false_alarms, max_false_alarms);
205 IWL_DEBUG_CALIB(priv, "... reducing sensitivity\n");
206 data->nrg_curr_state = IWL_FA_TOO_MANY;
207 /* Store for "fewer than desired" on later beacon */
208 data->nrg_silence_ref = silence_ref;
209
210 /* increase energy threshold (reduce nrg value)
211 * to decrease sensitivity */
212 data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK;
213 /* Else if we got fewer than desired, increase sensitivity */
214 } else if (false_alarms < min_false_alarms) {
215 data->nrg_curr_state = IWL_FA_TOO_FEW;
216
217 /* Compare silence level with silence level for most recent
218 * healthy number or too many false alarms */
219 data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
220 (s32)silence_ref;
221
222 IWL_DEBUG_CALIB(priv,
223 "norm FA %u < min FA %u, silence diff %d\n",
224 false_alarms, min_false_alarms,
225 data->nrg_auto_corr_silence_diff);
226
227 /* Increase value to increase sensitivity, but only if:
228 * 1a) previous beacon did *not* have *too many* false alarms
229 * 1b) AND there's a significant difference in Rx levels
230 * from a previous beacon with too many, or healthy # FAs
231 * OR 2) We've seen a lot of beacons (100) with too few
232 * false alarms */
233 if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
234 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
235 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
236
237 IWL_DEBUG_CALIB(priv, "... increasing sensitivity\n");
238 /* Increase nrg value to increase sensitivity */
239 val = data->nrg_th_cck + NRG_STEP_CCK;
240 data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val);
241 } else {
242 IWL_DEBUG_CALIB(priv,
243 "... but not changing sensitivity\n");
244 }
245
246 /* Else we got a healthy number of false alarms, keep status quo */
247 } else {
248 IWL_DEBUG_CALIB(priv, " FA in safe zone\n");
249 data->nrg_curr_state = IWL_FA_GOOD_RANGE;
250
251 /* Store for use in "fewer than desired" with later beacon */
252 data->nrg_silence_ref = silence_ref;
253
254 /* If previous beacon had too many false alarms,
255 * give it some extra margin by reducing sensitivity again
256 * (but don't go below measured energy of desired Rx) */
257 if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
258 IWL_DEBUG_CALIB(priv, "... increasing margin\n");
259 if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN))
260 data->nrg_th_cck -= NRG_MARGIN;
261 else
262 data->nrg_th_cck = max_nrg_cck;
263 }
264 }
265
266 /* Make sure the energy threshold does not go above the measured
267 * energy of the desired Rx signals (reduced by backoff margin),
268 * or else we might start missing Rx frames.
269 * Lower value is higher energy, so we use max()!
270 */
271 data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
272 IWL_DEBUG_CALIB(priv, "new nrg_th_cck %u\n", data->nrg_th_cck);
273
274 data->nrg_prev_state = data->nrg_curr_state;
275
276 /* Auto-correlation CCK algorithm */
277 if (false_alarms > min_false_alarms) {
278
279 /* increase auto_corr values to decrease sensitivity
280 * so the DSP won't be disturbed by the noise
281 */
282 if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
283 data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
284 else {
285 val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
286 data->auto_corr_cck =
287 min((u32)ranges->auto_corr_max_cck, val);
288 }
289 val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
290 data->auto_corr_cck_mrc =
291 min((u32)ranges->auto_corr_max_cck_mrc, val);
292 } else if ((false_alarms < min_false_alarms) &&
293 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
294 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
295
296 /* Decrease auto_corr values to increase sensitivity */
297 val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
298 data->auto_corr_cck =
299 max((u32)ranges->auto_corr_min_cck, val);
300 val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
301 data->auto_corr_cck_mrc =
302 max((u32)ranges->auto_corr_min_cck_mrc, val);
303 }
304
305 return 0;
306}
307
308
309static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv,
310 u32 norm_fa,
311 u32 rx_enable_time)
312{
313 u32 val;
314 u32 false_alarms = norm_fa * 200 * 1024;
315 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
316 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
317 struct iwl_sensitivity_data *data = NULL;
318 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
319
320 data = &(priv->sensitivity_data);
321
322 /* If we got too many false alarms this time, reduce sensitivity */
323 if (false_alarms > max_false_alarms) {
324
325 IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u)\n",
326 false_alarms, max_false_alarms);
327
328 val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
329 data->auto_corr_ofdm =
330 min((u32)ranges->auto_corr_max_ofdm, val);
331
332 val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
333 data->auto_corr_ofdm_mrc =
334 min((u32)ranges->auto_corr_max_ofdm_mrc, val);
335
336 val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
337 data->auto_corr_ofdm_x1 =
338 min((u32)ranges->auto_corr_max_ofdm_x1, val);
339
340 val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
341 data->auto_corr_ofdm_mrc_x1 =
342 min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val);
343 }
344
345 /* Else if we got fewer than desired, increase sensitivity */
346 else if (false_alarms < min_false_alarms) {
347
348 IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u\n",
349 false_alarms, min_false_alarms);
350
351 val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
352 data->auto_corr_ofdm =
353 max((u32)ranges->auto_corr_min_ofdm, val);
354
355 val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
356 data->auto_corr_ofdm_mrc =
357 max((u32)ranges->auto_corr_min_ofdm_mrc, val);
358
359 val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
360 data->auto_corr_ofdm_x1 =
361 max((u32)ranges->auto_corr_min_ofdm_x1, val);
362
363 val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
364 data->auto_corr_ofdm_mrc_x1 =
365 max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val);
366 } else {
367 IWL_DEBUG_CALIB(priv, "min FA %u < norm FA %u < max FA %u OK\n",
368 min_false_alarms, false_alarms, max_false_alarms);
369 }
370 return 0;
371}
372
373static void iwl4965_prepare_legacy_sensitivity_tbl(struct iwl_priv *priv,
374 struct iwl_sensitivity_data *data,
375 __le16 *tbl)
376{
377 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
378 cpu_to_le16((u16)data->auto_corr_ofdm);
379 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
380 cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
381 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
382 cpu_to_le16((u16)data->auto_corr_ofdm_x1);
383 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
384 cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
385
386 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
387 cpu_to_le16((u16)data->auto_corr_cck);
388 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
389 cpu_to_le16((u16)data->auto_corr_cck_mrc);
390
391 tbl[HD_MIN_ENERGY_CCK_DET_INDEX] =
392 cpu_to_le16((u16)data->nrg_th_cck);
393 tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] =
394 cpu_to_le16((u16)data->nrg_th_ofdm);
395
396 tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
397 cpu_to_le16(data->barker_corr_th_min);
398 tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
399 cpu_to_le16(data->barker_corr_th_min_mrc);
400 tbl[HD_OFDM_ENERGY_TH_IN_INDEX] =
401 cpu_to_le16(data->nrg_th_cca);
402
403 IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
404 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
405 data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
406 data->nrg_th_ofdm);
407
408 IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n",
409 data->auto_corr_cck, data->auto_corr_cck_mrc,
410 data->nrg_th_cck);
411}
412
413/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
414static int iwl4965_sensitivity_write(struct iwl_priv *priv)
415{
416 struct iwl_sensitivity_cmd cmd;
417 struct iwl_sensitivity_data *data = NULL;
418 struct iwl_host_cmd cmd_out = {
419 .id = SENSITIVITY_CMD,
420 .len = sizeof(struct iwl_sensitivity_cmd),
421 .flags = CMD_ASYNC,
422 .data = &cmd,
423 };
424
425 data = &(priv->sensitivity_data);
426
427 memset(&cmd, 0, sizeof(cmd));
428
429 iwl4965_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]);
430
431 /* Update uCode's "work" table, and copy it to DSP */
432 cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
433
434 /* Don't send command to uCode if nothing has changed */
435 if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
436 sizeof(u16)*HD_TABLE_SIZE)) {
437 IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n");
438 return 0;
439 }
440
441 /* Copy table for comparison next time */
442 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
443 sizeof(u16)*HD_TABLE_SIZE);
444
445 return iwl_legacy_send_cmd(priv, &cmd_out);
446}
447
448void iwl4965_init_sensitivity(struct iwl_priv *priv)
449{
450 int ret = 0;
451 int i;
452 struct iwl_sensitivity_data *data = NULL;
453 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
454
455 if (priv->disable_sens_cal)
456 return;
457
458 IWL_DEBUG_CALIB(priv, "Start iwl4965_init_sensitivity\n");
459
460 /* Clear driver's sensitivity algo data */
461 data = &(priv->sensitivity_data);
462
463 if (ranges == NULL)
464 return;
465
466 memset(data, 0, sizeof(struct iwl_sensitivity_data));
467
468 data->num_in_cck_no_fa = 0;
469 data->nrg_curr_state = IWL_FA_TOO_MANY;
470 data->nrg_prev_state = IWL_FA_TOO_MANY;
471 data->nrg_silence_ref = 0;
472 data->nrg_silence_idx = 0;
473 data->nrg_energy_idx = 0;
474
475 for (i = 0; i < 10; i++)
476 data->nrg_value[i] = 0;
477
478 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
479 data->nrg_silence_rssi[i] = 0;
480
481 data->auto_corr_ofdm = ranges->auto_corr_min_ofdm;
482 data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
483 data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1;
484 data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
485 data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
486 data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
487 data->nrg_th_cck = ranges->nrg_th_cck;
488 data->nrg_th_ofdm = ranges->nrg_th_ofdm;
489 data->barker_corr_th_min = ranges->barker_corr_th_min;
490 data->barker_corr_th_min_mrc = ranges->barker_corr_th_min_mrc;
491 data->nrg_th_cca = ranges->nrg_th_cca;
492
493 data->last_bad_plcp_cnt_ofdm = 0;
494 data->last_fa_cnt_ofdm = 0;
495 data->last_bad_plcp_cnt_cck = 0;
496 data->last_fa_cnt_cck = 0;
497
498 ret |= iwl4965_sensitivity_write(priv);
499 IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret);
500}
501
502void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
503{
504 u32 rx_enable_time;
505 u32 fa_cck;
506 u32 fa_ofdm;
507 u32 bad_plcp_cck;
508 u32 bad_plcp_ofdm;
509 u32 norm_fa_ofdm;
510 u32 norm_fa_cck;
511 struct iwl_sensitivity_data *data = NULL;
512 struct statistics_rx_non_phy *rx_info;
513 struct statistics_rx_phy *ofdm, *cck;
514 unsigned long flags;
515 struct statistics_general_data statis;
516
517 if (priv->disable_sens_cal)
518 return;
519
520 data = &(priv->sensitivity_data);
521
522 if (!iwl_legacy_is_any_associated(priv)) {
523 IWL_DEBUG_CALIB(priv, "<< - not associated\n");
524 return;
525 }
526
527 spin_lock_irqsave(&priv->lock, flags);
528
529 rx_info = &(((struct iwl_notif_statistics *)resp)->rx.general);
530 ofdm = &(((struct iwl_notif_statistics *)resp)->rx.ofdm);
531 cck = &(((struct iwl_notif_statistics *)resp)->rx.cck);
532
533 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
534 IWL_DEBUG_CALIB(priv, "<< invalid data.\n");
535 spin_unlock_irqrestore(&priv->lock, flags);
536 return;
537 }
538
539 /* Extract Statistics: */
540 rx_enable_time = le32_to_cpu(rx_info->channel_load);
541 fa_cck = le32_to_cpu(cck->false_alarm_cnt);
542 fa_ofdm = le32_to_cpu(ofdm->false_alarm_cnt);
543 bad_plcp_cck = le32_to_cpu(cck->plcp_err);
544 bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err);
545
546 statis.beacon_silence_rssi_a =
547 le32_to_cpu(rx_info->beacon_silence_rssi_a);
548 statis.beacon_silence_rssi_b =
549 le32_to_cpu(rx_info->beacon_silence_rssi_b);
550 statis.beacon_silence_rssi_c =
551 le32_to_cpu(rx_info->beacon_silence_rssi_c);
552 statis.beacon_energy_a =
553 le32_to_cpu(rx_info->beacon_energy_a);
554 statis.beacon_energy_b =
555 le32_to_cpu(rx_info->beacon_energy_b);
556 statis.beacon_energy_c =
557 le32_to_cpu(rx_info->beacon_energy_c);
558
559 spin_unlock_irqrestore(&priv->lock, flags);
560
561 IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
562
563 if (!rx_enable_time) {
564 IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n");
565 return;
566 }
567
568 /* These statistics increase monotonically, and do not reset
569 * at each beacon. Calculate difference from last value, or just
570 * use the new statistics value if it has reset or wrapped around. */
571 if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
572 data->last_bad_plcp_cnt_cck = bad_plcp_cck;
573 else {
574 bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
575 data->last_bad_plcp_cnt_cck += bad_plcp_cck;
576 }
577
578 if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm)
579 data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm;
580 else {
581 bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm;
582 data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm;
583 }
584
585 if (data->last_fa_cnt_ofdm > fa_ofdm)
586 data->last_fa_cnt_ofdm = fa_ofdm;
587 else {
588 fa_ofdm -= data->last_fa_cnt_ofdm;
589 data->last_fa_cnt_ofdm += fa_ofdm;
590 }
591
592 if (data->last_fa_cnt_cck > fa_cck)
593 data->last_fa_cnt_cck = fa_cck;
594 else {
595 fa_cck -= data->last_fa_cnt_cck;
596 data->last_fa_cnt_cck += fa_cck;
597 }
598
599 /* Total aborted signal locks */
600 norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
601 norm_fa_cck = fa_cck + bad_plcp_cck;
602
603 IWL_DEBUG_CALIB(priv,
604 "cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
605 bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
606
607 iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
608 iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
609
610 iwl4965_sensitivity_write(priv);
611}
612
613static inline u8 iwl4965_find_first_chain(u8 mask)
614{
615 if (mask & ANT_A)
616 return CHAIN_A;
617 if (mask & ANT_B)
618 return CHAIN_B;
619 return CHAIN_C;
620}
621
622/**
623 * Run disconnected antenna algorithm to find out which antennas are
624 * disconnected.
625 */
626static void
627iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
628 struct iwl_chain_noise_data *data)
629{
630 u32 active_chains = 0;
631 u32 max_average_sig;
632 u16 max_average_sig_antenna_i;
633 u8 num_tx_chains;
634 u8 first_chain;
635 u16 i = 0;
636
637 average_sig[0] = data->chain_signal_a /
638 priv->cfg->base_params->chain_noise_num_beacons;
639 average_sig[1] = data->chain_signal_b /
640 priv->cfg->base_params->chain_noise_num_beacons;
641 average_sig[2] = data->chain_signal_c /
642 priv->cfg->base_params->chain_noise_num_beacons;
643
644 if (average_sig[0] >= average_sig[1]) {
645 max_average_sig = average_sig[0];
646 max_average_sig_antenna_i = 0;
647 active_chains = (1 << max_average_sig_antenna_i);
648 } else {
649 max_average_sig = average_sig[1];
650 max_average_sig_antenna_i = 1;
651 active_chains = (1 << max_average_sig_antenna_i);
652 }
653
654 if (average_sig[2] >= max_average_sig) {
655 max_average_sig = average_sig[2];
656 max_average_sig_antenna_i = 2;
657 active_chains = (1 << max_average_sig_antenna_i);
658 }
659
660 IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n",
661 average_sig[0], average_sig[1], average_sig[2]);
662 IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n",
663 max_average_sig, max_average_sig_antenna_i);
664
665 /* Compare signal strengths for all 3 receivers. */
666 for (i = 0; i < NUM_RX_CHAINS; i++) {
667 if (i != max_average_sig_antenna_i) {
668 s32 rssi_delta = (max_average_sig - average_sig[i]);
669
670 /* If signal is very weak, compared with
671 * strongest, mark it as disconnected. */
672 if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
673 data->disconn_array[i] = 1;
674 else
675 active_chains |= (1 << i);
676 IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d "
677 "disconn_array[i] = %d\n",
678 i, rssi_delta, data->disconn_array[i]);
679 }
680 }
681
682 /*
683 * The above algorithm sometimes fails when the ucode
684 * reports 0 for all chains. It's not clear why that
685 * happens to start with, but it is then causing trouble
686 * because this can make us enable more chains than the
687 * hardware really has.
688 *
689 * To be safe, simply mask out any chains that we know
690 * are not on the device.
691 */
692 active_chains &= priv->hw_params.valid_rx_ant;
693
694 num_tx_chains = 0;
695 for (i = 0; i < NUM_RX_CHAINS; i++) {
696 /* loops on all the bits of
697 * priv->hw_setting.valid_tx_ant */
698 u8 ant_msk = (1 << i);
699 if (!(priv->hw_params.valid_tx_ant & ant_msk))
700 continue;
701
702 num_tx_chains++;
703 if (data->disconn_array[i] == 0)
704 /* there is a Tx antenna connected */
705 break;
706 if (num_tx_chains == priv->hw_params.tx_chains_num &&
707 data->disconn_array[i]) {
708 /*
709 * If all chains are disconnected
710 * connect the first valid tx chain
711 */
712 first_chain =
713 iwl4965_find_first_chain(priv->cfg->valid_tx_ant);
714 data->disconn_array[first_chain] = 0;
715 active_chains |= BIT(first_chain);
716 IWL_DEBUG_CALIB(priv, "All Tx chains are disconnected \
717 W/A - declare %d as connected\n",
718 first_chain);
719 break;
720 }
721 }
722
723 if (active_chains != priv->hw_params.valid_rx_ant &&
724 active_chains != priv->chain_noise_data.active_chains)
725 IWL_DEBUG_CALIB(priv,
726 "Detected that not all antennas are connected! "
727 "Connected: %#x, valid: %#x.\n",
728 active_chains, priv->hw_params.valid_rx_ant);
729
730 /* Save for use within RXON, TX, SCAN commands, etc. */
731 data->active_chains = active_chains;
732 IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
733 active_chains);
734}
735
736static void iwl4965_gain_computation(struct iwl_priv *priv,
737 u32 *average_noise,
738 u16 min_average_noise_antenna_i,
739 u32 min_average_noise,
740 u8 default_chain)
741{
742 int i, ret;
743 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
744
745 data->delta_gain_code[min_average_noise_antenna_i] = 0;
746
747 for (i = default_chain; i < NUM_RX_CHAINS; i++) {
748 s32 delta_g = 0;
749
750 if (!(data->disconn_array[i]) &&
751 (data->delta_gain_code[i] ==
752 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
753 delta_g = average_noise[i] - min_average_noise;
754 data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
755 data->delta_gain_code[i] =
756 min(data->delta_gain_code[i],
757 (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
758
759 data->delta_gain_code[i] =
760 (data->delta_gain_code[i] | (1 << 2));
761 } else {
762 data->delta_gain_code[i] = 0;
763 }
764 }
765 IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n",
766 data->delta_gain_code[0],
767 data->delta_gain_code[1],
768 data->delta_gain_code[2]);
769
770 /* Differential gain gets sent to uCode only once */
771 if (!data->radio_write) {
772 struct iwl_calib_diff_gain_cmd cmd;
773 data->radio_write = 1;
774
775 memset(&cmd, 0, sizeof(cmd));
776 cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
777 cmd.diff_gain_a = data->delta_gain_code[0];
778 cmd.diff_gain_b = data->delta_gain_code[1];
779 cmd.diff_gain_c = data->delta_gain_code[2];
780 ret = iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
781 sizeof(cmd), &cmd);
782 if (ret)
783 IWL_DEBUG_CALIB(priv, "fail sending cmd "
784 "REPLY_PHY_CALIBRATION_CMD\n");
785
786 /* TODO we might want recalculate
787 * rx_chain in rxon cmd */
788
789 /* Mark so we run this algo only once! */
790 data->state = IWL_CHAIN_NOISE_CALIBRATED;
791 }
792}
793
794
795
796/*
797 * Accumulate 16 beacons of signal and noise statistics for each of
798 * 3 receivers/antennas/rx-chains, then figure out:
799 * 1) Which antennas are connected.
800 * 2) Differential rx gain settings to balance the 3 receivers.
801 */
802void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
803{
804 struct iwl_chain_noise_data *data = NULL;
805
806 u32 chain_noise_a;
807 u32 chain_noise_b;
808 u32 chain_noise_c;
809 u32 chain_sig_a;
810 u32 chain_sig_b;
811 u32 chain_sig_c;
812 u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
813 u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
814 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
815 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
816 u16 i = 0;
817 u16 rxon_chnum = INITIALIZATION_VALUE;
818 u16 stat_chnum = INITIALIZATION_VALUE;
819 u8 rxon_band24;
820 u8 stat_band24;
821 unsigned long flags;
822 struct statistics_rx_non_phy *rx_info;
823
824 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
825
826 if (priv->disable_chain_noise_cal)
827 return;
828
829 data = &(priv->chain_noise_data);
830
831 /*
832 * Accumulate just the first "chain_noise_num_beacons" after
833 * the first association, then we're done forever.
834 */
835 if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
836 if (data->state == IWL_CHAIN_NOISE_ALIVE)
837 IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n");
838 return;
839 }
840
841 spin_lock_irqsave(&priv->lock, flags);
842
843 rx_info = &(((struct iwl_notif_statistics *)stat_resp)->
844 rx.general);
845
846 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
847 IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n");
848 spin_unlock_irqrestore(&priv->lock, flags);
849 return;
850 }
851
852 rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
853 rxon_chnum = le16_to_cpu(ctx->staging.channel);
854
855 stat_band24 = !!(((struct iwl_notif_statistics *)
856 stat_resp)->flag &
857 STATISTICS_REPLY_FLG_BAND_24G_MSK);
858 stat_chnum = le32_to_cpu(((struct iwl_notif_statistics *)
859 stat_resp)->flag) >> 16;
860
861 /* Make sure we accumulate data for just the associated channel
862 * (even if scanning). */
863 if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) {
864 IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n",
865 rxon_chnum, rxon_band24);
866 spin_unlock_irqrestore(&priv->lock, flags);
867 return;
868 }
869
870 /*
871 * Accumulate beacon statistics values across
872 * "chain_noise_num_beacons"
873 */
874 chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
875 IN_BAND_FILTER;
876 chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
877 IN_BAND_FILTER;
878 chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
879 IN_BAND_FILTER;
880
881 chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
882 chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
883 chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
884
885 spin_unlock_irqrestore(&priv->lock, flags);
886
887 data->beacon_count++;
888
889 data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
890 data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
891 data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
892
893 data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
894 data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
895 data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
896
897 IWL_DEBUG_CALIB(priv, "chan=%d, band24=%d, beacon=%d\n",
898 rxon_chnum, rxon_band24, data->beacon_count);
899 IWL_DEBUG_CALIB(priv, "chain_sig: a %d b %d c %d\n",
900 chain_sig_a, chain_sig_b, chain_sig_c);
901 IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n",
902 chain_noise_a, chain_noise_b, chain_noise_c);
903
904 /* If this is the "chain_noise_num_beacons", determine:
905 * 1) Disconnected antennas (using signal strengths)
906 * 2) Differential gain (using silence noise) to balance receivers */
907 if (data->beacon_count !=
908 priv->cfg->base_params->chain_noise_num_beacons)
909 return;
910
911 /* Analyze signal for disconnected antenna */
912 iwl4965_find_disconn_antenna(priv, average_sig, data);
913
914 /* Analyze noise for rx balance */
915 average_noise[0] = data->chain_noise_a /
916 priv->cfg->base_params->chain_noise_num_beacons;
917 average_noise[1] = data->chain_noise_b /
918 priv->cfg->base_params->chain_noise_num_beacons;
919 average_noise[2] = data->chain_noise_c /
920 priv->cfg->base_params->chain_noise_num_beacons;
921
922 for (i = 0; i < NUM_RX_CHAINS; i++) {
923 if (!(data->disconn_array[i]) &&
924 (average_noise[i] <= min_average_noise)) {
925 /* This means that chain i is active and has
926 * lower noise values so far: */
927 min_average_noise = average_noise[i];
928 min_average_noise_antenna_i = i;
929 }
930 }
931
932 IWL_DEBUG_CALIB(priv, "average_noise: a %d b %d c %d\n",
933 average_noise[0], average_noise[1],
934 average_noise[2]);
935
936 IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n",
937 min_average_noise, min_average_noise_antenna_i);
938
939 iwl4965_gain_computation(priv, average_noise,
940 min_average_noise_antenna_i, min_average_noise,
941 iwl4965_find_first_chain(priv->cfg->valid_rx_ant));
942
943 /* Some power changes may have been made during the calibration.
944 * Update and commit the RXON
945 */
946 if (priv->cfg->ops->lib->update_chain_flags)
947 priv->cfg->ops->lib->update_chain_flags(priv);
948
949 data->state = IWL_CHAIN_NOISE_DONE;
950 iwl_legacy_power_update_mode(priv, false);
951}
952
953void iwl4965_reset_run_time_calib(struct iwl_priv *priv)
954{
955 int i;
956 memset(&(priv->sensitivity_data), 0,
957 sizeof(struct iwl_sensitivity_data));
958 memset(&(priv->chain_noise_data), 0,
959 sizeof(struct iwl_chain_noise_data));
960 for (i = 0; i < NUM_RX_CHAINS; i++)
961 priv->chain_noise_data.delta_gain_code[i] =
962 CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
963
964 /* Ask for statistics now, the uCode will send notification
965 * periodically after association */
966 iwl_legacy_send_statistics_request(priv, CMD_ASYNC, true);
967}
diff --git a/drivers/net/wireless/iwlwifi/iwl-legacy.h b/drivers/net/wireless/iwlegacy/iwl-4965-calib.h
index 9f7b2f935964..f46c80e6e005 100644
--- a/drivers/net/wireless/iwlwifi/iwl-legacy.h
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-calib.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -59,21 +59,17 @@
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/ 61 *****************************************************************************/
62#ifndef __iwl_4965_calib_h__
63#define __iwl_4965_calib_h__
62 64
63#ifndef __iwl_legacy_h__ 65#include "iwl-dev.h"
64#define __iwl_legacy_h__ 66#include "iwl-core.h"
67#include "iwl-commands.h"
65 68
66/* mac80211 handlers */ 69void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp);
67int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed); 70void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp);
68void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw); 71void iwl4965_init_sensitivity(struct iwl_priv *priv);
69void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw, 72void iwl4965_reset_run_time_calib(struct iwl_priv *priv);
70 struct ieee80211_vif *vif, 73void iwl4965_calib_free_results(struct iwl_priv *priv);
71 struct ieee80211_bss_conf *bss_conf,
72 u32 changes);
73void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
74 struct ieee80211_tx_info *info,
75 __le16 fc, __le32 *tx_flags);
76 74
77irqreturn_t iwl_isr_legacy(int irq, void *data); 75#endif /* __iwl_4965_calib_h__ */
78
79#endif /* __iwl_legacy_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
new file mode 100644
index 000000000000..1c93665766e4
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
@@ -0,0 +1,774 @@
1/******************************************************************************
2*
3* GPL LICENSE SUMMARY
4*
5* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6*
7* This program is free software; you can redistribute it and/or modify
8* it under the terms of version 2 of the GNU General Public License as
9* published by the Free Software Foundation.
10*
11* This program is distributed in the hope that it will be useful, but
12* WITHOUT ANY WARRANTY; without even the implied warranty of
13* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14* General Public License for more details.
15*
16* You should have received a copy of the GNU General Public License
17* along with this program; if not, write to the Free Software
18* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19* USA
20*
21* The full GNU General Public License is included in this distribution
22* in the file called LICENSE.GPL.
23*
24* Contact Information:
25* Intel Linux Wireless <ilw@linux.intel.com>
26* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27*****************************************************************************/
28#include "iwl-4965.h"
29#include "iwl-4965-debugfs.h"
30
31static const char *fmt_value = " %-30s %10u\n";
32static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
33static const char *fmt_header =
34 "%-32s current cumulative delta max\n";
35
36static int iwl4965_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
37{
38 int p = 0;
39 u32 flag;
40
41 flag = le32_to_cpu(priv->_4965.statistics.flag);
42
43 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
44 if (flag & UCODE_STATISTICS_CLEAR_MSK)
45 p += scnprintf(buf + p, bufsz - p,
46 "\tStatistics have been cleared\n");
47 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
48 (flag & UCODE_STATISTICS_FREQUENCY_MSK)
49 ? "2.4 GHz" : "5.2 GHz");
50 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
51 (flag & UCODE_STATISTICS_NARROW_BAND_MSK)
52 ? "enabled" : "disabled");
53
54 return p;
55}
56
57ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
58 size_t count, loff_t *ppos)
59{
60 struct iwl_priv *priv = file->private_data;
61 int pos = 0;
62 char *buf;
63 int bufsz = sizeof(struct statistics_rx_phy) * 40 +
64 sizeof(struct statistics_rx_non_phy) * 40 +
65 sizeof(struct statistics_rx_ht_phy) * 40 + 400;
66 ssize_t ret;
67 struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
68 struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
69 struct statistics_rx_non_phy *general, *accum_general;
70 struct statistics_rx_non_phy *delta_general, *max_general;
71 struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
72
73 if (!iwl_legacy_is_alive(priv))
74 return -EAGAIN;
75
76 buf = kzalloc(bufsz, GFP_KERNEL);
77 if (!buf) {
78 IWL_ERR(priv, "Can not allocate Buffer\n");
79 return -ENOMEM;
80 }
81
82 /*
83 * the statistic information display here is based on
84 * the last statistics notification from uCode
85 * might not reflect the current uCode activity
86 */
87 ofdm = &priv->_4965.statistics.rx.ofdm;
88 cck = &priv->_4965.statistics.rx.cck;
89 general = &priv->_4965.statistics.rx.general;
90 ht = &priv->_4965.statistics.rx.ofdm_ht;
91 accum_ofdm = &priv->_4965.accum_statistics.rx.ofdm;
92 accum_cck = &priv->_4965.accum_statistics.rx.cck;
93 accum_general = &priv->_4965.accum_statistics.rx.general;
94 accum_ht = &priv->_4965.accum_statistics.rx.ofdm_ht;
95 delta_ofdm = &priv->_4965.delta_statistics.rx.ofdm;
96 delta_cck = &priv->_4965.delta_statistics.rx.cck;
97 delta_general = &priv->_4965.delta_statistics.rx.general;
98 delta_ht = &priv->_4965.delta_statistics.rx.ofdm_ht;
99 max_ofdm = &priv->_4965.max_delta.rx.ofdm;
100 max_cck = &priv->_4965.max_delta.rx.cck;
101 max_general = &priv->_4965.max_delta.rx.general;
102 max_ht = &priv->_4965.max_delta.rx.ofdm_ht;
103
104 pos += iwl4965_statistics_flag(priv, buf, bufsz);
105 pos += scnprintf(buf + pos, bufsz - pos,
106 fmt_header, "Statistics_Rx - OFDM:");
107 pos += scnprintf(buf + pos, bufsz - pos,
108 fmt_table, "ina_cnt:",
109 le32_to_cpu(ofdm->ina_cnt),
110 accum_ofdm->ina_cnt,
111 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
112 pos += scnprintf(buf + pos, bufsz - pos,
113 fmt_table, "fina_cnt:",
114 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
115 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
116 pos += scnprintf(buf + pos, bufsz - pos,
117 fmt_table, "plcp_err:",
118 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
119 delta_ofdm->plcp_err, max_ofdm->plcp_err);
120 pos += scnprintf(buf + pos, bufsz - pos,
121 fmt_table, "crc32_err:",
122 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
123 delta_ofdm->crc32_err, max_ofdm->crc32_err);
124 pos += scnprintf(buf + pos, bufsz - pos,
125 fmt_table, "overrun_err:",
126 le32_to_cpu(ofdm->overrun_err),
127 accum_ofdm->overrun_err, delta_ofdm->overrun_err,
128 max_ofdm->overrun_err);
129 pos += scnprintf(buf + pos, bufsz - pos,
130 fmt_table, "early_overrun_err:",
131 le32_to_cpu(ofdm->early_overrun_err),
132 accum_ofdm->early_overrun_err,
133 delta_ofdm->early_overrun_err,
134 max_ofdm->early_overrun_err);
135 pos += scnprintf(buf + pos, bufsz - pos,
136 fmt_table, "crc32_good:",
137 le32_to_cpu(ofdm->crc32_good),
138 accum_ofdm->crc32_good, delta_ofdm->crc32_good,
139 max_ofdm->crc32_good);
140 pos += scnprintf(buf + pos, bufsz - pos,
141 fmt_table, "false_alarm_cnt:",
142 le32_to_cpu(ofdm->false_alarm_cnt),
143 accum_ofdm->false_alarm_cnt,
144 delta_ofdm->false_alarm_cnt,
145 max_ofdm->false_alarm_cnt);
146 pos += scnprintf(buf + pos, bufsz - pos,
147 fmt_table, "fina_sync_err_cnt:",
148 le32_to_cpu(ofdm->fina_sync_err_cnt),
149 accum_ofdm->fina_sync_err_cnt,
150 delta_ofdm->fina_sync_err_cnt,
151 max_ofdm->fina_sync_err_cnt);
152 pos += scnprintf(buf + pos, bufsz - pos,
153 fmt_table, "sfd_timeout:",
154 le32_to_cpu(ofdm->sfd_timeout),
155 accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
156 max_ofdm->sfd_timeout);
157 pos += scnprintf(buf + pos, bufsz - pos,
158 fmt_table, "fina_timeout:",
159 le32_to_cpu(ofdm->fina_timeout),
160 accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
161 max_ofdm->fina_timeout);
162 pos += scnprintf(buf + pos, bufsz - pos,
163 fmt_table, "unresponded_rts:",
164 le32_to_cpu(ofdm->unresponded_rts),
165 accum_ofdm->unresponded_rts,
166 delta_ofdm->unresponded_rts,
167 max_ofdm->unresponded_rts);
168 pos += scnprintf(buf + pos, bufsz - pos,
169 fmt_table, "rxe_frame_lmt_ovrun:",
170 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
171 accum_ofdm->rxe_frame_limit_overrun,
172 delta_ofdm->rxe_frame_limit_overrun,
173 max_ofdm->rxe_frame_limit_overrun);
174 pos += scnprintf(buf + pos, bufsz - pos,
175 fmt_table, "sent_ack_cnt:",
176 le32_to_cpu(ofdm->sent_ack_cnt),
177 accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
178 max_ofdm->sent_ack_cnt);
179 pos += scnprintf(buf + pos, bufsz - pos,
180 fmt_table, "sent_cts_cnt:",
181 le32_to_cpu(ofdm->sent_cts_cnt),
182 accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
183 max_ofdm->sent_cts_cnt);
184 pos += scnprintf(buf + pos, bufsz - pos,
185 fmt_table, "sent_ba_rsp_cnt:",
186 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
187 accum_ofdm->sent_ba_rsp_cnt,
188 delta_ofdm->sent_ba_rsp_cnt,
189 max_ofdm->sent_ba_rsp_cnt);
190 pos += scnprintf(buf + pos, bufsz - pos,
191 fmt_table, "dsp_self_kill:",
192 le32_to_cpu(ofdm->dsp_self_kill),
193 accum_ofdm->dsp_self_kill,
194 delta_ofdm->dsp_self_kill,
195 max_ofdm->dsp_self_kill);
196 pos += scnprintf(buf + pos, bufsz - pos,
197 fmt_table, "mh_format_err:",
198 le32_to_cpu(ofdm->mh_format_err),
199 accum_ofdm->mh_format_err,
200 delta_ofdm->mh_format_err,
201 max_ofdm->mh_format_err);
202 pos += scnprintf(buf + pos, bufsz - pos,
203 fmt_table, "re_acq_main_rssi_sum:",
204 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
205 accum_ofdm->re_acq_main_rssi_sum,
206 delta_ofdm->re_acq_main_rssi_sum,
207 max_ofdm->re_acq_main_rssi_sum);
208
209 pos += scnprintf(buf + pos, bufsz - pos,
210 fmt_header, "Statistics_Rx - CCK:");
211 pos += scnprintf(buf + pos, bufsz - pos,
212 fmt_table, "ina_cnt:",
213 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
214 delta_cck->ina_cnt, max_cck->ina_cnt);
215 pos += scnprintf(buf + pos, bufsz - pos,
216 fmt_table, "fina_cnt:",
217 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
218 delta_cck->fina_cnt, max_cck->fina_cnt);
219 pos += scnprintf(buf + pos, bufsz - pos,
220 fmt_table, "plcp_err:",
221 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
222 delta_cck->plcp_err, max_cck->plcp_err);
223 pos += scnprintf(buf + pos, bufsz - pos,
224 fmt_table, "crc32_err:",
225 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
226 delta_cck->crc32_err, max_cck->crc32_err);
227 pos += scnprintf(buf + pos, bufsz - pos,
228 fmt_table, "overrun_err:",
229 le32_to_cpu(cck->overrun_err),
230 accum_cck->overrun_err, delta_cck->overrun_err,
231 max_cck->overrun_err);
232 pos += scnprintf(buf + pos, bufsz - pos,
233 fmt_table, "early_overrun_err:",
234 le32_to_cpu(cck->early_overrun_err),
235 accum_cck->early_overrun_err,
236 delta_cck->early_overrun_err,
237 max_cck->early_overrun_err);
238 pos += scnprintf(buf + pos, bufsz - pos,
239 fmt_table, "crc32_good:",
240 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
241 delta_cck->crc32_good, max_cck->crc32_good);
242 pos += scnprintf(buf + pos, bufsz - pos,
243 fmt_table, "false_alarm_cnt:",
244 le32_to_cpu(cck->false_alarm_cnt),
245 accum_cck->false_alarm_cnt,
246 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
247 pos += scnprintf(buf + pos, bufsz - pos,
248 fmt_table, "fina_sync_err_cnt:",
249 le32_to_cpu(cck->fina_sync_err_cnt),
250 accum_cck->fina_sync_err_cnt,
251 delta_cck->fina_sync_err_cnt,
252 max_cck->fina_sync_err_cnt);
253 pos += scnprintf(buf + pos, bufsz - pos,
254 fmt_table, "sfd_timeout:",
255 le32_to_cpu(cck->sfd_timeout),
256 accum_cck->sfd_timeout, delta_cck->sfd_timeout,
257 max_cck->sfd_timeout);
258 pos += scnprintf(buf + pos, bufsz - pos,
259 fmt_table, "fina_timeout:",
260 le32_to_cpu(cck->fina_timeout),
261 accum_cck->fina_timeout, delta_cck->fina_timeout,
262 max_cck->fina_timeout);
263 pos += scnprintf(buf + pos, bufsz - pos,
264 fmt_table, "unresponded_rts:",
265 le32_to_cpu(cck->unresponded_rts),
266 accum_cck->unresponded_rts, delta_cck->unresponded_rts,
267 max_cck->unresponded_rts);
268 pos += scnprintf(buf + pos, bufsz - pos,
269 fmt_table, "rxe_frame_lmt_ovrun:",
270 le32_to_cpu(cck->rxe_frame_limit_overrun),
271 accum_cck->rxe_frame_limit_overrun,
272 delta_cck->rxe_frame_limit_overrun,
273 max_cck->rxe_frame_limit_overrun);
274 pos += scnprintf(buf + pos, bufsz - pos,
275 fmt_table, "sent_ack_cnt:",
276 le32_to_cpu(cck->sent_ack_cnt),
277 accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
278 max_cck->sent_ack_cnt);
279 pos += scnprintf(buf + pos, bufsz - pos,
280 fmt_table, "sent_cts_cnt:",
281 le32_to_cpu(cck->sent_cts_cnt),
282 accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
283 max_cck->sent_cts_cnt);
284 pos += scnprintf(buf + pos, bufsz - pos,
285 fmt_table, "sent_ba_rsp_cnt:",
286 le32_to_cpu(cck->sent_ba_rsp_cnt),
287 accum_cck->sent_ba_rsp_cnt,
288 delta_cck->sent_ba_rsp_cnt,
289 max_cck->sent_ba_rsp_cnt);
290 pos += scnprintf(buf + pos, bufsz - pos,
291 fmt_table, "dsp_self_kill:",
292 le32_to_cpu(cck->dsp_self_kill),
293 accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
294 max_cck->dsp_self_kill);
295 pos += scnprintf(buf + pos, bufsz - pos,
296 fmt_table, "mh_format_err:",
297 le32_to_cpu(cck->mh_format_err),
298 accum_cck->mh_format_err, delta_cck->mh_format_err,
299 max_cck->mh_format_err);
300 pos += scnprintf(buf + pos, bufsz - pos,
301 fmt_table, "re_acq_main_rssi_sum:",
302 le32_to_cpu(cck->re_acq_main_rssi_sum),
303 accum_cck->re_acq_main_rssi_sum,
304 delta_cck->re_acq_main_rssi_sum,
305 max_cck->re_acq_main_rssi_sum);
306
307 pos += scnprintf(buf + pos, bufsz - pos,
308 fmt_header, "Statistics_Rx - GENERAL:");
309 pos += scnprintf(buf + pos, bufsz - pos,
310 fmt_table, "bogus_cts:",
311 le32_to_cpu(general->bogus_cts),
312 accum_general->bogus_cts, delta_general->bogus_cts,
313 max_general->bogus_cts);
314 pos += scnprintf(buf + pos, bufsz - pos,
315 fmt_table, "bogus_ack:",
316 le32_to_cpu(general->bogus_ack),
317 accum_general->bogus_ack, delta_general->bogus_ack,
318 max_general->bogus_ack);
319 pos += scnprintf(buf + pos, bufsz - pos,
320 fmt_table, "non_bssid_frames:",
321 le32_to_cpu(general->non_bssid_frames),
322 accum_general->non_bssid_frames,
323 delta_general->non_bssid_frames,
324 max_general->non_bssid_frames);
325 pos += scnprintf(buf + pos, bufsz - pos,
326 fmt_table, "filtered_frames:",
327 le32_to_cpu(general->filtered_frames),
328 accum_general->filtered_frames,
329 delta_general->filtered_frames,
330 max_general->filtered_frames);
331 pos += scnprintf(buf + pos, bufsz - pos,
332 fmt_table, "non_channel_beacons:",
333 le32_to_cpu(general->non_channel_beacons),
334 accum_general->non_channel_beacons,
335 delta_general->non_channel_beacons,
336 max_general->non_channel_beacons);
337 pos += scnprintf(buf + pos, bufsz - pos,
338 fmt_table, "channel_beacons:",
339 le32_to_cpu(general->channel_beacons),
340 accum_general->channel_beacons,
341 delta_general->channel_beacons,
342 max_general->channel_beacons);
343 pos += scnprintf(buf + pos, bufsz - pos,
344 fmt_table, "num_missed_bcon:",
345 le32_to_cpu(general->num_missed_bcon),
346 accum_general->num_missed_bcon,
347 delta_general->num_missed_bcon,
348 max_general->num_missed_bcon);
349 pos += scnprintf(buf + pos, bufsz - pos,
350 fmt_table, "adc_rx_saturation_time:",
351 le32_to_cpu(general->adc_rx_saturation_time),
352 accum_general->adc_rx_saturation_time,
353 delta_general->adc_rx_saturation_time,
354 max_general->adc_rx_saturation_time);
355 pos += scnprintf(buf + pos, bufsz - pos,
356 fmt_table, "ina_detect_search_tm:",
357 le32_to_cpu(general->ina_detection_search_time),
358 accum_general->ina_detection_search_time,
359 delta_general->ina_detection_search_time,
360 max_general->ina_detection_search_time);
361 pos += scnprintf(buf + pos, bufsz - pos,
362 fmt_table, "beacon_silence_rssi_a:",
363 le32_to_cpu(general->beacon_silence_rssi_a),
364 accum_general->beacon_silence_rssi_a,
365 delta_general->beacon_silence_rssi_a,
366 max_general->beacon_silence_rssi_a);
367 pos += scnprintf(buf + pos, bufsz - pos,
368 fmt_table, "beacon_silence_rssi_b:",
369 le32_to_cpu(general->beacon_silence_rssi_b),
370 accum_general->beacon_silence_rssi_b,
371 delta_general->beacon_silence_rssi_b,
372 max_general->beacon_silence_rssi_b);
373 pos += scnprintf(buf + pos, bufsz - pos,
374 fmt_table, "beacon_silence_rssi_c:",
375 le32_to_cpu(general->beacon_silence_rssi_c),
376 accum_general->beacon_silence_rssi_c,
377 delta_general->beacon_silence_rssi_c,
378 max_general->beacon_silence_rssi_c);
379 pos += scnprintf(buf + pos, bufsz - pos,
380 fmt_table, "interference_data_flag:",
381 le32_to_cpu(general->interference_data_flag),
382 accum_general->interference_data_flag,
383 delta_general->interference_data_flag,
384 max_general->interference_data_flag);
385 pos += scnprintf(buf + pos, bufsz - pos,
386 fmt_table, "channel_load:",
387 le32_to_cpu(general->channel_load),
388 accum_general->channel_load,
389 delta_general->channel_load,
390 max_general->channel_load);
391 pos += scnprintf(buf + pos, bufsz - pos,
392 fmt_table, "dsp_false_alarms:",
393 le32_to_cpu(general->dsp_false_alarms),
394 accum_general->dsp_false_alarms,
395 delta_general->dsp_false_alarms,
396 max_general->dsp_false_alarms);
397 pos += scnprintf(buf + pos, bufsz - pos,
398 fmt_table, "beacon_rssi_a:",
399 le32_to_cpu(general->beacon_rssi_a),
400 accum_general->beacon_rssi_a,
401 delta_general->beacon_rssi_a,
402 max_general->beacon_rssi_a);
403 pos += scnprintf(buf + pos, bufsz - pos,
404 fmt_table, "beacon_rssi_b:",
405 le32_to_cpu(general->beacon_rssi_b),
406 accum_general->beacon_rssi_b,
407 delta_general->beacon_rssi_b,
408 max_general->beacon_rssi_b);
409 pos += scnprintf(buf + pos, bufsz - pos,
410 fmt_table, "beacon_rssi_c:",
411 le32_to_cpu(general->beacon_rssi_c),
412 accum_general->beacon_rssi_c,
413 delta_general->beacon_rssi_c,
414 max_general->beacon_rssi_c);
415 pos += scnprintf(buf + pos, bufsz - pos,
416 fmt_table, "beacon_energy_a:",
417 le32_to_cpu(general->beacon_energy_a),
418 accum_general->beacon_energy_a,
419 delta_general->beacon_energy_a,
420 max_general->beacon_energy_a);
421 pos += scnprintf(buf + pos, bufsz - pos,
422 fmt_table, "beacon_energy_b:",
423 le32_to_cpu(general->beacon_energy_b),
424 accum_general->beacon_energy_b,
425 delta_general->beacon_energy_b,
426 max_general->beacon_energy_b);
427 pos += scnprintf(buf + pos, bufsz - pos,
428 fmt_table, "beacon_energy_c:",
429 le32_to_cpu(general->beacon_energy_c),
430 accum_general->beacon_energy_c,
431 delta_general->beacon_energy_c,
432 max_general->beacon_energy_c);
433
434 pos += scnprintf(buf + pos, bufsz - pos,
435 fmt_header, "Statistics_Rx - OFDM_HT:");
436 pos += scnprintf(buf + pos, bufsz - pos,
437 fmt_table, "plcp_err:",
438 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
439 delta_ht->plcp_err, max_ht->plcp_err);
440 pos += scnprintf(buf + pos, bufsz - pos,
441 fmt_table, "overrun_err:",
442 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
443 delta_ht->overrun_err, max_ht->overrun_err);
444 pos += scnprintf(buf + pos, bufsz - pos,
445 fmt_table, "early_overrun_err:",
446 le32_to_cpu(ht->early_overrun_err),
447 accum_ht->early_overrun_err,
448 delta_ht->early_overrun_err,
449 max_ht->early_overrun_err);
450 pos += scnprintf(buf + pos, bufsz - pos,
451 fmt_table, "crc32_good:",
452 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
453 delta_ht->crc32_good, max_ht->crc32_good);
454 pos += scnprintf(buf + pos, bufsz - pos,
455 fmt_table, "crc32_err:",
456 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
457 delta_ht->crc32_err, max_ht->crc32_err);
458 pos += scnprintf(buf + pos, bufsz - pos,
459 fmt_table, "mh_format_err:",
460 le32_to_cpu(ht->mh_format_err),
461 accum_ht->mh_format_err,
462 delta_ht->mh_format_err, max_ht->mh_format_err);
463 pos += scnprintf(buf + pos, bufsz - pos,
464 fmt_table, "agg_crc32_good:",
465 le32_to_cpu(ht->agg_crc32_good),
466 accum_ht->agg_crc32_good,
467 delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
468 pos += scnprintf(buf + pos, bufsz - pos,
469 fmt_table, "agg_mpdu_cnt:",
470 le32_to_cpu(ht->agg_mpdu_cnt),
471 accum_ht->agg_mpdu_cnt,
472 delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
473 pos += scnprintf(buf + pos, bufsz - pos,
474 fmt_table, "agg_cnt:",
475 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
476 delta_ht->agg_cnt, max_ht->agg_cnt);
477 pos += scnprintf(buf + pos, bufsz - pos,
478 fmt_table, "unsupport_mcs:",
479 le32_to_cpu(ht->unsupport_mcs),
480 accum_ht->unsupport_mcs,
481 delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
482
483 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
484 kfree(buf);
485 return ret;
486}
487
488ssize_t iwl4965_ucode_tx_stats_read(struct file *file,
489 char __user *user_buf,
490 size_t count, loff_t *ppos)
491{
492 struct iwl_priv *priv = file->private_data;
493 int pos = 0;
494 char *buf;
495 int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
496 ssize_t ret;
497 struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
498
499 if (!iwl_legacy_is_alive(priv))
500 return -EAGAIN;
501
502 buf = kzalloc(bufsz, GFP_KERNEL);
503 if (!buf) {
504 IWL_ERR(priv, "Can not allocate Buffer\n");
505 return -ENOMEM;
506 }
507
508 /* the statistic information display here is based on
509 * the last statistics notification from uCode
510 * might not reflect the current uCode activity
511 */
512 tx = &priv->_4965.statistics.tx;
513 accum_tx = &priv->_4965.accum_statistics.tx;
514 delta_tx = &priv->_4965.delta_statistics.tx;
515 max_tx = &priv->_4965.max_delta.tx;
516
517 pos += iwl4965_statistics_flag(priv, buf, bufsz);
518 pos += scnprintf(buf + pos, bufsz - pos,
519 fmt_header, "Statistics_Tx:");
520 pos += scnprintf(buf + pos, bufsz - pos,
521 fmt_table, "preamble:",
522 le32_to_cpu(tx->preamble_cnt),
523 accum_tx->preamble_cnt,
524 delta_tx->preamble_cnt, max_tx->preamble_cnt);
525 pos += scnprintf(buf + pos, bufsz - pos,
526 fmt_table, "rx_detected_cnt:",
527 le32_to_cpu(tx->rx_detected_cnt),
528 accum_tx->rx_detected_cnt,
529 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
530 pos += scnprintf(buf + pos, bufsz - pos,
531 fmt_table, "bt_prio_defer_cnt:",
532 le32_to_cpu(tx->bt_prio_defer_cnt),
533 accum_tx->bt_prio_defer_cnt,
534 delta_tx->bt_prio_defer_cnt,
535 max_tx->bt_prio_defer_cnt);
536 pos += scnprintf(buf + pos, bufsz - pos,
537 fmt_table, "bt_prio_kill_cnt:",
538 le32_to_cpu(tx->bt_prio_kill_cnt),
539 accum_tx->bt_prio_kill_cnt,
540 delta_tx->bt_prio_kill_cnt,
541 max_tx->bt_prio_kill_cnt);
542 pos += scnprintf(buf + pos, bufsz - pos,
543 fmt_table, "few_bytes_cnt:",
544 le32_to_cpu(tx->few_bytes_cnt),
545 accum_tx->few_bytes_cnt,
546 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
547 pos += scnprintf(buf + pos, bufsz - pos,
548 fmt_table, "cts_timeout:",
549 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
550 delta_tx->cts_timeout, max_tx->cts_timeout);
551 pos += scnprintf(buf + pos, bufsz - pos,
552 fmt_table, "ack_timeout:",
553 le32_to_cpu(tx->ack_timeout),
554 accum_tx->ack_timeout,
555 delta_tx->ack_timeout, max_tx->ack_timeout);
556 pos += scnprintf(buf + pos, bufsz - pos,
557 fmt_table, "expected_ack_cnt:",
558 le32_to_cpu(tx->expected_ack_cnt),
559 accum_tx->expected_ack_cnt,
560 delta_tx->expected_ack_cnt,
561 max_tx->expected_ack_cnt);
562 pos += scnprintf(buf + pos, bufsz - pos,
563 fmt_table, "actual_ack_cnt:",
564 le32_to_cpu(tx->actual_ack_cnt),
565 accum_tx->actual_ack_cnt,
566 delta_tx->actual_ack_cnt,
567 max_tx->actual_ack_cnt);
568 pos += scnprintf(buf + pos, bufsz - pos,
569 fmt_table, "dump_msdu_cnt:",
570 le32_to_cpu(tx->dump_msdu_cnt),
571 accum_tx->dump_msdu_cnt,
572 delta_tx->dump_msdu_cnt,
573 max_tx->dump_msdu_cnt);
574 pos += scnprintf(buf + pos, bufsz - pos,
575 fmt_table, "abort_nxt_frame_mismatch:",
576 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
577 accum_tx->burst_abort_next_frame_mismatch_cnt,
578 delta_tx->burst_abort_next_frame_mismatch_cnt,
579 max_tx->burst_abort_next_frame_mismatch_cnt);
580 pos += scnprintf(buf + pos, bufsz - pos,
581 fmt_table, "abort_missing_nxt_frame:",
582 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
583 accum_tx->burst_abort_missing_next_frame_cnt,
584 delta_tx->burst_abort_missing_next_frame_cnt,
585 max_tx->burst_abort_missing_next_frame_cnt);
586 pos += scnprintf(buf + pos, bufsz - pos,
587 fmt_table, "cts_timeout_collision:",
588 le32_to_cpu(tx->cts_timeout_collision),
589 accum_tx->cts_timeout_collision,
590 delta_tx->cts_timeout_collision,
591 max_tx->cts_timeout_collision);
592 pos += scnprintf(buf + pos, bufsz - pos,
593 fmt_table, "ack_ba_timeout_collision:",
594 le32_to_cpu(tx->ack_or_ba_timeout_collision),
595 accum_tx->ack_or_ba_timeout_collision,
596 delta_tx->ack_or_ba_timeout_collision,
597 max_tx->ack_or_ba_timeout_collision);
598 pos += scnprintf(buf + pos, bufsz - pos,
599 fmt_table, "agg ba_timeout:",
600 le32_to_cpu(tx->agg.ba_timeout),
601 accum_tx->agg.ba_timeout,
602 delta_tx->agg.ba_timeout,
603 max_tx->agg.ba_timeout);
604 pos += scnprintf(buf + pos, bufsz - pos,
605 fmt_table, "agg ba_resched_frames:",
606 le32_to_cpu(tx->agg.ba_reschedule_frames),
607 accum_tx->agg.ba_reschedule_frames,
608 delta_tx->agg.ba_reschedule_frames,
609 max_tx->agg.ba_reschedule_frames);
610 pos += scnprintf(buf + pos, bufsz - pos,
611 fmt_table, "agg scd_query_agg_frame:",
612 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
613 accum_tx->agg.scd_query_agg_frame_cnt,
614 delta_tx->agg.scd_query_agg_frame_cnt,
615 max_tx->agg.scd_query_agg_frame_cnt);
616 pos += scnprintf(buf + pos, bufsz - pos,
617 fmt_table, "agg scd_query_no_agg:",
618 le32_to_cpu(tx->agg.scd_query_no_agg),
619 accum_tx->agg.scd_query_no_agg,
620 delta_tx->agg.scd_query_no_agg,
621 max_tx->agg.scd_query_no_agg);
622 pos += scnprintf(buf + pos, bufsz - pos,
623 fmt_table, "agg scd_query_agg:",
624 le32_to_cpu(tx->agg.scd_query_agg),
625 accum_tx->agg.scd_query_agg,
626 delta_tx->agg.scd_query_agg,
627 max_tx->agg.scd_query_agg);
628 pos += scnprintf(buf + pos, bufsz - pos,
629 fmt_table, "agg scd_query_mismatch:",
630 le32_to_cpu(tx->agg.scd_query_mismatch),
631 accum_tx->agg.scd_query_mismatch,
632 delta_tx->agg.scd_query_mismatch,
633 max_tx->agg.scd_query_mismatch);
634 pos += scnprintf(buf + pos, bufsz - pos,
635 fmt_table, "agg frame_not_ready:",
636 le32_to_cpu(tx->agg.frame_not_ready),
637 accum_tx->agg.frame_not_ready,
638 delta_tx->agg.frame_not_ready,
639 max_tx->agg.frame_not_ready);
640 pos += scnprintf(buf + pos, bufsz - pos,
641 fmt_table, "agg underrun:",
642 le32_to_cpu(tx->agg.underrun),
643 accum_tx->agg.underrun,
644 delta_tx->agg.underrun, max_tx->agg.underrun);
645 pos += scnprintf(buf + pos, bufsz - pos,
646 fmt_table, "agg bt_prio_kill:",
647 le32_to_cpu(tx->agg.bt_prio_kill),
648 accum_tx->agg.bt_prio_kill,
649 delta_tx->agg.bt_prio_kill,
650 max_tx->agg.bt_prio_kill);
651 pos += scnprintf(buf + pos, bufsz - pos,
652 fmt_table, "agg rx_ba_rsp_cnt:",
653 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
654 accum_tx->agg.rx_ba_rsp_cnt,
655 delta_tx->agg.rx_ba_rsp_cnt,
656 max_tx->agg.rx_ba_rsp_cnt);
657
658 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
659 kfree(buf);
660 return ret;
661}
662
663ssize_t
664iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
665 size_t count, loff_t *ppos)
666{
667 struct iwl_priv *priv = file->private_data;
668 int pos = 0;
669 char *buf;
670 int bufsz = sizeof(struct statistics_general) * 10 + 300;
671 ssize_t ret;
672 struct statistics_general_common *general, *accum_general;
673 struct statistics_general_common *delta_general, *max_general;
674 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
675 struct statistics_div *div, *accum_div, *delta_div, *max_div;
676
677 if (!iwl_legacy_is_alive(priv))
678 return -EAGAIN;
679
680 buf = kzalloc(bufsz, GFP_KERNEL);
681 if (!buf) {
682 IWL_ERR(priv, "Can not allocate Buffer\n");
683 return -ENOMEM;
684 }
685
686 /* the statistic information display here is based on
687 * the last statistics notification from uCode
688 * might not reflect the current uCode activity
689 */
690 general = &priv->_4965.statistics.general.common;
691 dbg = &priv->_4965.statistics.general.common.dbg;
692 div = &priv->_4965.statistics.general.common.div;
693 accum_general = &priv->_4965.accum_statistics.general.common;
694 accum_dbg = &priv->_4965.accum_statistics.general.common.dbg;
695 accum_div = &priv->_4965.accum_statistics.general.common.div;
696 delta_general = &priv->_4965.delta_statistics.general.common;
697 max_general = &priv->_4965.max_delta.general.common;
698 delta_dbg = &priv->_4965.delta_statistics.general.common.dbg;
699 max_dbg = &priv->_4965.max_delta.general.common.dbg;
700 delta_div = &priv->_4965.delta_statistics.general.common.div;
701 max_div = &priv->_4965.max_delta.general.common.div;
702
703 pos += iwl4965_statistics_flag(priv, buf, bufsz);
704 pos += scnprintf(buf + pos, bufsz - pos,
705 fmt_header, "Statistics_General:");
706 pos += scnprintf(buf + pos, bufsz - pos,
707 fmt_value, "temperature:",
708 le32_to_cpu(general->temperature));
709 pos += scnprintf(buf + pos, bufsz - pos,
710 fmt_value, "ttl_timestamp:",
711 le32_to_cpu(general->ttl_timestamp));
712 pos += scnprintf(buf + pos, bufsz - pos,
713 fmt_table, "burst_check:",
714 le32_to_cpu(dbg->burst_check),
715 accum_dbg->burst_check,
716 delta_dbg->burst_check, max_dbg->burst_check);
717 pos += scnprintf(buf + pos, bufsz - pos,
718 fmt_table, "burst_count:",
719 le32_to_cpu(dbg->burst_count),
720 accum_dbg->burst_count,
721 delta_dbg->burst_count, max_dbg->burst_count);
722 pos += scnprintf(buf + pos, bufsz - pos,
723 fmt_table, "wait_for_silence_timeout_count:",
724 le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
725 accum_dbg->wait_for_silence_timeout_cnt,
726 delta_dbg->wait_for_silence_timeout_cnt,
727 max_dbg->wait_for_silence_timeout_cnt);
728 pos += scnprintf(buf + pos, bufsz - pos,
729 fmt_table, "sleep_time:",
730 le32_to_cpu(general->sleep_time),
731 accum_general->sleep_time,
732 delta_general->sleep_time, max_general->sleep_time);
733 pos += scnprintf(buf + pos, bufsz - pos,
734 fmt_table, "slots_out:",
735 le32_to_cpu(general->slots_out),
736 accum_general->slots_out,
737 delta_general->slots_out, max_general->slots_out);
738 pos += scnprintf(buf + pos, bufsz - pos,
739 fmt_table, "slots_idle:",
740 le32_to_cpu(general->slots_idle),
741 accum_general->slots_idle,
742 delta_general->slots_idle, max_general->slots_idle);
743 pos += scnprintf(buf + pos, bufsz - pos,
744 fmt_table, "tx_on_a:",
745 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
746 delta_div->tx_on_a, max_div->tx_on_a);
747 pos += scnprintf(buf + pos, bufsz - pos,
748 fmt_table, "tx_on_b:",
749 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
750 delta_div->tx_on_b, max_div->tx_on_b);
751 pos += scnprintf(buf + pos, bufsz - pos,
752 fmt_table, "exec_time:",
753 le32_to_cpu(div->exec_time), accum_div->exec_time,
754 delta_div->exec_time, max_div->exec_time);
755 pos += scnprintf(buf + pos, bufsz - pos,
756 fmt_table, "probe_time:",
757 le32_to_cpu(div->probe_time), accum_div->probe_time,
758 delta_div->probe_time, max_div->probe_time);
759 pos += scnprintf(buf + pos, bufsz - pos,
760 fmt_table, "rx_enable_counter:",
761 le32_to_cpu(general->rx_enable_counter),
762 accum_general->rx_enable_counter,
763 delta_general->rx_enable_counter,
764 max_general->rx_enable_counter);
765 pos += scnprintf(buf + pos, bufsz - pos,
766 fmt_table, "num_of_sos_states:",
767 le32_to_cpu(general->num_of_sos_states),
768 accum_general->num_of_sos_states,
769 delta_general->num_of_sos_states,
770 max_general->num_of_sos_states);
771 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
772 kfree(buf);
773 return ret;
774}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
new file mode 100644
index 000000000000..6c8e35361a9e
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
@@ -0,0 +1,59 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include "iwl-dev.h"
30#include "iwl-core.h"
31#include "iwl-debug.h"
32
33#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
34ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
35 size_t count, loff_t *ppos);
36ssize_t iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
37 size_t count, loff_t *ppos);
38ssize_t iwl4965_ucode_general_stats_read(struct file *file,
39 char __user *user_buf, size_t count, loff_t *ppos);
40#else
41static ssize_t
42iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
43 size_t count, loff_t *ppos)
44{
45 return 0;
46}
47static ssize_t
48iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
49 size_t count, loff_t *ppos)
50{
51 return 0;
52}
53static ssize_t
54iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
55 size_t count, loff_t *ppos)
56{
57 return 0;
58}
59#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
new file mode 100644
index 000000000000..cb9baab1ff7d
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
@@ -0,0 +1,154 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63
64#include <linux/kernel.h>
65#include <linux/module.h>
66#include <linux/slab.h>
67#include <linux/init.h>
68
69#include <net/mac80211.h>
70
71#include "iwl-commands.h"
72#include "iwl-dev.h"
73#include "iwl-core.h"
74#include "iwl-debug.h"
75#include "iwl-4965.h"
76#include "iwl-io.h"
77
78/******************************************************************************
79 *
80 * EEPROM related functions
81 *
82******************************************************************************/
83
84/*
85 * The device's EEPROM semaphore prevents conflicts between driver and uCode
86 * when accessing the EEPROM; each access is a series of pulses to/from the
87 * EEPROM chip, not a single event, so even reads could conflict if they
88 * weren't arbitrated by the semaphore.
89 */
90int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv)
91{
92 u16 count;
93 int ret;
94
95 for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
96 /* Request semaphore */
97 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
98 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
99
100 /* See if we got it */
101 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
102 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
103 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
104 EEPROM_SEM_TIMEOUT);
105 if (ret >= 0) {
106 IWL_DEBUG_IO(priv,
107 "Acquired semaphore after %d tries.\n",
108 count+1);
109 return ret;
110 }
111 }
112
113 return ret;
114}
115
116void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv)
117{
118 iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
119 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
120
121}
122
123int iwl4965_eeprom_check_version(struct iwl_priv *priv)
124{
125 u16 eeprom_ver;
126 u16 calib_ver;
127
128 eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
129 calib_ver = iwl_legacy_eeprom_query16(priv,
130 EEPROM_4965_CALIB_VERSION_OFFSET);
131
132 if (eeprom_ver < priv->cfg->eeprom_ver ||
133 calib_ver < priv->cfg->eeprom_calib_ver)
134 goto err;
135
136 IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
137 eeprom_ver, calib_ver);
138
139 return 0;
140err:
141 IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
142 "CALIB=0x%x < 0x%x\n",
143 eeprom_ver, priv->cfg->eeprom_ver,
144 calib_ver, priv->cfg->eeprom_calib_ver);
145 return -EINVAL;
146
147}
148
149void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
150{
151 const u8 *addr = iwl_legacy_eeprom_query_addr(priv,
152 EEPROM_MAC_ADDRESS);
153 memcpy(mac, addr, ETH_ALEN);
154}
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
index 9166794eda0d..08b189c8472d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -789,4 +789,26 @@ struct iwl4965_scd_bc_tbl {
789 u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)]; 789 u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
790} __packed; 790} __packed;
791 791
792
793#define IWL4965_RTC_INST_LOWER_BOUND (0x000000)
794
795/* RSSI to dBm */
796#define IWL4965_RSSI_OFFSET 44
797
798/* PCI registers */
799#define PCI_CFG_RETRY_TIMEOUT 0x041
800
801/* PCI register values */
802#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
803#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
804
805#define IWL4965_DEFAULT_TX_RETRY 15
806
807/* Limit range of txpower output target to be between these values */
808#define IWL4965_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */
809
810/* EEPROM */
811#define IWL4965_FIRST_AMPDU_QUEUE 10
812
813
792#endif /* !__iwl_4965_hw_h__ */ 814#endif /* !__iwl_4965_hw_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.c b/drivers/net/wireless/iwlegacy/iwl-4965-led.c
new file mode 100644
index 000000000000..26d324e30692
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-led.c
@@ -0,0 +1,74 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <linux/wireless.h>
36#include <net/mac80211.h>
37#include <linux/etherdevice.h>
38#include <asm/unaligned.h>
39
40#include "iwl-commands.h"
41#include "iwl-dev.h"
42#include "iwl-core.h"
43#include "iwl-io.h"
44#include "iwl-4965-led.h"
45
46/* Send led command */
47static int
48iwl4965_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
49{
50 struct iwl_host_cmd cmd = {
51 .id = REPLY_LEDS_CMD,
52 .len = sizeof(struct iwl_led_cmd),
53 .data = led_cmd,
54 .flags = CMD_ASYNC,
55 .callback = NULL,
56 };
57 u32 reg;
58
59 reg = iwl_read32(priv, CSR_LED_REG);
60 if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
61 iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
62
63 return iwl_legacy_send_cmd(priv, &cmd);
64}
65
66/* Set led register off */
67void iwl4965_led_enable(struct iwl_priv *priv)
68{
69 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
70}
71
72const struct iwl_led_ops iwl4965_led_ops = {
73 .cmd = iwl4965_send_led_cmd,
74};
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.h b/drivers/net/wireless/iwlegacy/iwl-4965-led.h
new file mode 100644
index 000000000000..5ed3615fc338
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-led.h
@@ -0,0 +1,33 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_4965_led_h__
28#define __iwl_4965_led_h__
29
30extern const struct iwl_led_ops iwl4965_led_ops;
31void iwl4965_led_enable(struct iwl_priv *priv);
32
33#endif /* __iwl_4965_led_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
new file mode 100644
index 000000000000..5a8a3cce27bc
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
@@ -0,0 +1,1260 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#include <linux/etherdevice.h>
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39#include "iwl-4965-hw.h"
40#include "iwl-4965.h"
41#include "iwl-sta.h"
42
43void iwl4965_check_abort_status(struct iwl_priv *priv,
44 u8 frame_count, u32 status)
45{
46 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
47 IWL_ERR(priv, "Tx flush command to flush out all frames\n");
48 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
49 queue_work(priv->workqueue, &priv->tx_flush);
50 }
51}
52
53/*
54 * EEPROM
55 */
56struct iwl_mod_params iwl4965_mod_params = {
57 .amsdu_size_8K = 1,
58 .restart_fw = 1,
59 /* the rest are 0 by default */
60};
61
62void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
63{
64 unsigned long flags;
65 int i;
66 spin_lock_irqsave(&rxq->lock, flags);
67 INIT_LIST_HEAD(&rxq->rx_free);
68 INIT_LIST_HEAD(&rxq->rx_used);
69 /* Fill the rx_used queue with _all_ of the Rx buffers */
70 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
71 /* In the reset function, these buffers may have been allocated
72 * to an SKB, so we need to unmap and free potential storage */
73 if (rxq->pool[i].page != NULL) {
74 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
75 PAGE_SIZE << priv->hw_params.rx_page_order,
76 PCI_DMA_FROMDEVICE);
77 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
78 rxq->pool[i].page = NULL;
79 }
80 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
81 }
82
83 for (i = 0; i < RX_QUEUE_SIZE; i++)
84 rxq->queue[i] = NULL;
85
86 /* Set us so that we have processed and used all buffers, but have
87 * not restocked the Rx queue with fresh buffers */
88 rxq->read = rxq->write = 0;
89 rxq->write_actual = 0;
90 rxq->free_count = 0;
91 spin_unlock_irqrestore(&rxq->lock, flags);
92}
93
94int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
95{
96 u32 rb_size;
97 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
98 u32 rb_timeout = 0;
99
100 if (priv->cfg->mod_params->amsdu_size_8K)
101 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
102 else
103 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
104
105 /* Stop Rx DMA */
106 iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
107
108 /* Reset driver's Rx queue write index */
109 iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
110
111 /* Tell device where to find RBD circular buffer in DRAM */
112 iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
113 (u32)(rxq->bd_dma >> 8));
114
115 /* Tell device where in DRAM to update its Rx status */
116 iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
117 rxq->rb_stts_dma >> 4);
118
119 /* Enable Rx DMA
120 * Direct rx interrupts to hosts
121 * Rx buffer size 4 or 8k
122 * RB timeout 0x10
123 * 256 RBDs
124 */
125 iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
126 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
127 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
128 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
129 rb_size|
130 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
131 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
132
133 /* Set interrupt coalescing timer to default (2048 usecs) */
134 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
135
136 return 0;
137}
138
139static void iwl4965_set_pwr_vmain(struct iwl_priv *priv)
140{
141/*
142 * (for documentation purposes)
143 * to set power to V_AUX, do:
144
145 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
146 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
147 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
148 ~APMG_PS_CTRL_MSK_PWR_SRC);
149 */
150
151 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
152 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
153 ~APMG_PS_CTRL_MSK_PWR_SRC);
154}
155
156int iwl4965_hw_nic_init(struct iwl_priv *priv)
157{
158 unsigned long flags;
159 struct iwl_rx_queue *rxq = &priv->rxq;
160 int ret;
161
162 /* nic_init */
163 spin_lock_irqsave(&priv->lock, flags);
164 priv->cfg->ops->lib->apm_ops.init(priv);
165
166 /* Set interrupt coalescing calibration timer to default (512 usecs) */
167 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
168
169 spin_unlock_irqrestore(&priv->lock, flags);
170
171 iwl4965_set_pwr_vmain(priv);
172
173 priv->cfg->ops->lib->apm_ops.config(priv);
174
175 /* Allocate the RX queue, or reset if it is already allocated */
176 if (!rxq->bd) {
177 ret = iwl_legacy_rx_queue_alloc(priv);
178 if (ret) {
179 IWL_ERR(priv, "Unable to initialize Rx queue\n");
180 return -ENOMEM;
181 }
182 } else
183 iwl4965_rx_queue_reset(priv, rxq);
184
185 iwl4965_rx_replenish(priv);
186
187 iwl4965_rx_init(priv, rxq);
188
189 spin_lock_irqsave(&priv->lock, flags);
190
191 rxq->need_update = 1;
192 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
193
194 spin_unlock_irqrestore(&priv->lock, flags);
195
196 /* Allocate or reset and init all Tx and Command queues */
197 if (!priv->txq) {
198 ret = iwl4965_txq_ctx_alloc(priv);
199 if (ret)
200 return ret;
201 } else
202 iwl4965_txq_ctx_reset(priv);
203
204 set_bit(STATUS_INIT, &priv->status);
205
206 return 0;
207}
208
209/**
210 * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
211 */
212static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl_priv *priv,
213 dma_addr_t dma_addr)
214{
215 return cpu_to_le32((u32)(dma_addr >> 8));
216}
217
218/**
219 * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool
220 *
221 * If there are slots in the RX queue that need to be restocked,
222 * and we have free pre-allocated buffers, fill the ranks as much
223 * as we can, pulling from rx_free.
224 *
225 * This moves the 'write' index forward to catch up with 'processed', and
226 * also updates the memory address in the firmware to reference the new
227 * target buffer.
228 */
229void iwl4965_rx_queue_restock(struct iwl_priv *priv)
230{
231 struct iwl_rx_queue *rxq = &priv->rxq;
232 struct list_head *element;
233 struct iwl_rx_mem_buffer *rxb;
234 unsigned long flags;
235
236 spin_lock_irqsave(&rxq->lock, flags);
237 while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
238 /* The overwritten rxb must be a used one */
239 rxb = rxq->queue[rxq->write];
240 BUG_ON(rxb && rxb->page);
241
242 /* Get next free Rx buffer, remove from free list */
243 element = rxq->rx_free.next;
244 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
245 list_del(element);
246
247 /* Point to Rx buffer via next RBD in circular buffer */
248 rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv,
249 rxb->page_dma);
250 rxq->queue[rxq->write] = rxb;
251 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
252 rxq->free_count--;
253 }
254 spin_unlock_irqrestore(&rxq->lock, flags);
255 /* If the pre-allocated buffer pool is dropping low, schedule to
256 * refill it */
257 if (rxq->free_count <= RX_LOW_WATERMARK)
258 queue_work(priv->workqueue, &priv->rx_replenish);
259
260
261 /* If we've added more space for the firmware to place data, tell it.
262 * Increment device's write pointer in multiples of 8. */
263 if (rxq->write_actual != (rxq->write & ~0x7)) {
264 spin_lock_irqsave(&rxq->lock, flags);
265 rxq->need_update = 1;
266 spin_unlock_irqrestore(&rxq->lock, flags);
267 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
268 }
269}
270
271/**
272 * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free
273 *
274 * When moving to rx_free an SKB is allocated for the slot.
275 *
276 * Also restock the Rx queue via iwl_rx_queue_restock.
277 * This is called as a scheduled work item (except for during initialization)
278 */
279static void iwl4965_rx_allocate(struct iwl_priv *priv, gfp_t priority)
280{
281 struct iwl_rx_queue *rxq = &priv->rxq;
282 struct list_head *element;
283 struct iwl_rx_mem_buffer *rxb;
284 struct page *page;
285 unsigned long flags;
286 gfp_t gfp_mask = priority;
287
288 while (1) {
289 spin_lock_irqsave(&rxq->lock, flags);
290 if (list_empty(&rxq->rx_used)) {
291 spin_unlock_irqrestore(&rxq->lock, flags);
292 return;
293 }
294 spin_unlock_irqrestore(&rxq->lock, flags);
295
296 if (rxq->free_count > RX_LOW_WATERMARK)
297 gfp_mask |= __GFP_NOWARN;
298
299 if (priv->hw_params.rx_page_order > 0)
300 gfp_mask |= __GFP_COMP;
301
302 /* Alloc a new receive buffer */
303 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
304 if (!page) {
305 if (net_ratelimit())
306 IWL_DEBUG_INFO(priv, "alloc_pages failed, "
307 "order: %d\n",
308 priv->hw_params.rx_page_order);
309
310 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
311 net_ratelimit())
312 IWL_CRIT(priv,
313 "Failed to alloc_pages with %s. "
314 "Only %u free buffers remaining.\n",
315 priority == GFP_ATOMIC ?
316 "GFP_ATOMIC" : "GFP_KERNEL",
317 rxq->free_count);
318 /* We don't reschedule replenish work here -- we will
319 * call the restock method and if it still needs
320 * more buffers it will schedule replenish */
321 return;
322 }
323
324 spin_lock_irqsave(&rxq->lock, flags);
325
326 if (list_empty(&rxq->rx_used)) {
327 spin_unlock_irqrestore(&rxq->lock, flags);
328 __free_pages(page, priv->hw_params.rx_page_order);
329 return;
330 }
331 element = rxq->rx_used.next;
332 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
333 list_del(element);
334
335 spin_unlock_irqrestore(&rxq->lock, flags);
336
337 BUG_ON(rxb->page);
338 rxb->page = page;
339 /* Get physical address of the RB */
340 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
341 PAGE_SIZE << priv->hw_params.rx_page_order,
342 PCI_DMA_FROMDEVICE);
343 /* dma address must be no more than 36 bits */
344 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
345 /* and also 256 byte aligned! */
346 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
347
348 spin_lock_irqsave(&rxq->lock, flags);
349
350 list_add_tail(&rxb->list, &rxq->rx_free);
351 rxq->free_count++;
352 priv->alloc_rxb_page++;
353
354 spin_unlock_irqrestore(&rxq->lock, flags);
355 }
356}
357
358void iwl4965_rx_replenish(struct iwl_priv *priv)
359{
360 unsigned long flags;
361
362 iwl4965_rx_allocate(priv, GFP_KERNEL);
363
364 spin_lock_irqsave(&priv->lock, flags);
365 iwl4965_rx_queue_restock(priv);
366 spin_unlock_irqrestore(&priv->lock, flags);
367}
368
369void iwl4965_rx_replenish_now(struct iwl_priv *priv)
370{
371 iwl4965_rx_allocate(priv, GFP_ATOMIC);
372
373 iwl4965_rx_queue_restock(priv);
374}
375
376/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
377 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
378 * This free routine walks the list of POOL entries and if SKB is set to
379 * non NULL it is unmapped and freed
380 */
381void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
382{
383 int i;
384 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
385 if (rxq->pool[i].page != NULL) {
386 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
387 PAGE_SIZE << priv->hw_params.rx_page_order,
388 PCI_DMA_FROMDEVICE);
389 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
390 rxq->pool[i].page = NULL;
391 }
392 }
393
394 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
395 rxq->bd_dma);
396 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
397 rxq->rb_stts, rxq->rb_stts_dma);
398 rxq->bd = NULL;
399 rxq->rb_stts = NULL;
400}
401
402int iwl4965_rxq_stop(struct iwl_priv *priv)
403{
404
405 /* stop Rx DMA */
406 iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
407 iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
408 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
409
410 return 0;
411}
412
413int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
414{
415 int idx = 0;
416 int band_offset = 0;
417
418 /* HT rate format: mac80211 wants an MCS number, which is just LSB */
419 if (rate_n_flags & RATE_MCS_HT_MSK) {
420 idx = (rate_n_flags & 0xff);
421 return idx;
422 /* Legacy rate format, search for match in table */
423 } else {
424 if (band == IEEE80211_BAND_5GHZ)
425 band_offset = IWL_FIRST_OFDM_RATE;
426 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
427 if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
428 return idx - band_offset;
429 }
430
431 return -1;
432}
433
434static int iwl4965_calc_rssi(struct iwl_priv *priv,
435 struct iwl_rx_phy_res *rx_resp)
436{
437 /* data from PHY/DSP regarding signal strength, etc.,
438 * contents are always there, not configurable by host. */
439 struct iwl4965_rx_non_cfg_phy *ncphy =
440 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
441 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
442 >> IWL49_AGC_DB_POS;
443
444 u32 valid_antennae =
445 (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
446 >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
447 u8 max_rssi = 0;
448 u32 i;
449
450 /* Find max rssi among 3 possible receivers.
451 * These values are measured by the digital signal processor (DSP).
452 * They should stay fairly constant even as the signal strength varies,
453 * if the radio's automatic gain control (AGC) is working right.
454 * AGC value (see below) will provide the "interesting" info. */
455 for (i = 0; i < 3; i++)
456 if (valid_antennae & (1 << i))
457 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
458
459 IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
460 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
461 max_rssi, agc);
462
463 /* dBm = max_rssi dB - agc dB - constant.
464 * Higher AGC (higher radio gain) means lower signal. */
465 return max_rssi - agc - IWL4965_RSSI_OFFSET;
466}
467
468
469static u32 iwl4965_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
470{
471 u32 decrypt_out = 0;
472
473 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
474 RX_RES_STATUS_STATION_FOUND)
475 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
476 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
477
478 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
479
480 /* packet was not encrypted */
481 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
482 RX_RES_STATUS_SEC_TYPE_NONE)
483 return decrypt_out;
484
485 /* packet was encrypted with unknown alg */
486 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
487 RX_RES_STATUS_SEC_TYPE_ERR)
488 return decrypt_out;
489
490 /* decryption was not done in HW */
491 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
492 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
493 return decrypt_out;
494
495 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
496
497 case RX_RES_STATUS_SEC_TYPE_CCMP:
498 /* alg is CCM: check MIC only */
499 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
500 /* Bad MIC */
501 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
502 else
503 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
504
505 break;
506
507 case RX_RES_STATUS_SEC_TYPE_TKIP:
508 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
509 /* Bad TTAK */
510 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
511 break;
512 }
513 /* fall through if TTAK OK */
514 default:
515 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
516 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
517 else
518 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
519 break;
520 }
521
522 IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
523 decrypt_in, decrypt_out);
524
525 return decrypt_out;
526}
527
528static void iwl4965_pass_packet_to_mac80211(struct iwl_priv *priv,
529 struct ieee80211_hdr *hdr,
530 u16 len,
531 u32 ampdu_status,
532 struct iwl_rx_mem_buffer *rxb,
533 struct ieee80211_rx_status *stats)
534{
535 struct sk_buff *skb;
536 __le16 fc = hdr->frame_control;
537
538 /* We only process data packets if the interface is open */
539 if (unlikely(!priv->is_open)) {
540 IWL_DEBUG_DROP_LIMIT(priv,
541 "Dropping packet while interface is not open.\n");
542 return;
543 }
544
545 /* In case of HW accelerated crypto and bad decryption, drop */
546 if (!priv->cfg->mod_params->sw_crypto &&
547 iwl_legacy_set_decrypted_flag(priv, hdr, ampdu_status, stats))
548 return;
549
550 skb = dev_alloc_skb(128);
551 if (!skb) {
552 IWL_ERR(priv, "dev_alloc_skb failed\n");
553 return;
554 }
555
556 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
557
558 iwl_legacy_update_stats(priv, false, fc, len);
559 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
560
561 ieee80211_rx(priv->hw, skb);
562 priv->alloc_rxb_page--;
563 rxb->page = NULL;
564}
565
566/* Called for REPLY_RX (legacy ABG frames), or
567 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
568void iwl4965_rx_reply_rx(struct iwl_priv *priv,
569 struct iwl_rx_mem_buffer *rxb)
570{
571 struct ieee80211_hdr *header;
572 struct ieee80211_rx_status rx_status;
573 struct iwl_rx_packet *pkt = rxb_addr(rxb);
574 struct iwl_rx_phy_res *phy_res;
575 __le32 rx_pkt_status;
576 struct iwl_rx_mpdu_res_start *amsdu;
577 u32 len;
578 u32 ampdu_status;
579 u32 rate_n_flags;
580
581 /**
582 * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
583 * REPLY_RX: physical layer info is in this buffer
584 * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
585 * command and cached in priv->last_phy_res
586 *
587 * Here we set up local variables depending on which command is
588 * received.
589 */
590 if (pkt->hdr.cmd == REPLY_RX) {
591 phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
592 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
593 + phy_res->cfg_phy_cnt);
594
595 len = le16_to_cpu(phy_res->byte_count);
596 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
597 phy_res->cfg_phy_cnt + len);
598 ampdu_status = le32_to_cpu(rx_pkt_status);
599 } else {
600 if (!priv->_4965.last_phy_res_valid) {
601 IWL_ERR(priv, "MPDU frame without cached PHY data\n");
602 return;
603 }
604 phy_res = &priv->_4965.last_phy_res;
605 amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
606 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
607 len = le16_to_cpu(amsdu->byte_count);
608 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
609 ampdu_status = iwl4965_translate_rx_status(priv,
610 le32_to_cpu(rx_pkt_status));
611 }
612
613 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
614 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
615 phy_res->cfg_phy_cnt);
616 return;
617 }
618
619 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
620 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
621 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
622 le32_to_cpu(rx_pkt_status));
623 return;
624 }
625
626 /* This will be used in several places later */
627 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
628
629 /* rx_status carries information about the packet to mac80211 */
630 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
631 rx_status.freq =
632 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
633 rx_status.band);
634 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
635 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
636 rx_status.rate_idx =
637 iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
638 rx_status.flag = 0;
639
640 /* TSF isn't reliable. In order to allow smooth user experience,
641 * this W/A doesn't propagate it to the mac80211 */
642 /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
643
644 priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
645
646 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
647 rx_status.signal = iwl4965_calc_rssi(priv, phy_res);
648
649 iwl_legacy_dbg_log_rx_data_frame(priv, len, header);
650 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
651 rx_status.signal, (unsigned long long)rx_status.mactime);
652
653 /*
654 * "antenna number"
655 *
656 * It seems that the antenna field in the phy flags value
657 * is actually a bit field. This is undefined by radiotap,
658 * it wants an actual antenna number but I always get "7"
659 * for most legacy frames I receive indicating that the
660 * same frame was received on all three RX chains.
661 *
662 * I think this field should be removed in favor of a
663 * new 802.11n radiotap field "RX chains" that is defined
664 * as a bitmask.
665 */
666 rx_status.antenna =
667 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
668 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
669
670 /* set the preamble flag if appropriate */
671 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
672 rx_status.flag |= RX_FLAG_SHORTPRE;
673
674 /* Set up the HT phy flags */
675 if (rate_n_flags & RATE_MCS_HT_MSK)
676 rx_status.flag |= RX_FLAG_HT;
677 if (rate_n_flags & RATE_MCS_HT40_MSK)
678 rx_status.flag |= RX_FLAG_40MHZ;
679 if (rate_n_flags & RATE_MCS_SGI_MSK)
680 rx_status.flag |= RX_FLAG_SHORT_GI;
681
682 iwl4965_pass_packet_to_mac80211(priv, header, len, ampdu_status,
683 rxb, &rx_status);
684}
685
686/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
687 * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
688void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
689 struct iwl_rx_mem_buffer *rxb)
690{
691 struct iwl_rx_packet *pkt = rxb_addr(rxb);
692 priv->_4965.last_phy_res_valid = true;
693 memcpy(&priv->_4965.last_phy_res, pkt->u.raw,
694 sizeof(struct iwl_rx_phy_res));
695}
696
697static int iwl4965_get_single_channel_for_scan(struct iwl_priv *priv,
698 struct ieee80211_vif *vif,
699 enum ieee80211_band band,
700 struct iwl_scan_channel *scan_ch)
701{
702 const struct ieee80211_supported_band *sband;
703 u16 passive_dwell = 0;
704 u16 active_dwell = 0;
705 int added = 0;
706 u16 channel = 0;
707
708 sband = iwl_get_hw_mode(priv, band);
709 if (!sband) {
710 IWL_ERR(priv, "invalid band\n");
711 return added;
712 }
713
714 active_dwell = iwl_legacy_get_active_dwell_time(priv, band, 0);
715 passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
716
717 if (passive_dwell <= active_dwell)
718 passive_dwell = active_dwell + 1;
719
720 channel = iwl_legacy_get_single_channel_number(priv, band);
721 if (channel) {
722 scan_ch->channel = cpu_to_le16(channel);
723 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
724 scan_ch->active_dwell = cpu_to_le16(active_dwell);
725 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
726 /* Set txpower levels to defaults */
727 scan_ch->dsp_atten = 110;
728 if (band == IEEE80211_BAND_5GHZ)
729 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
730 else
731 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
732 added++;
733 } else
734 IWL_ERR(priv, "no valid channel found\n");
735 return added;
736}
737
738static int iwl4965_get_channels_for_scan(struct iwl_priv *priv,
739 struct ieee80211_vif *vif,
740 enum ieee80211_band band,
741 u8 is_active, u8 n_probes,
742 struct iwl_scan_channel *scan_ch)
743{
744 struct ieee80211_channel *chan;
745 const struct ieee80211_supported_band *sband;
746 const struct iwl_channel_info *ch_info;
747 u16 passive_dwell = 0;
748 u16 active_dwell = 0;
749 int added, i;
750 u16 channel;
751
752 sband = iwl_get_hw_mode(priv, band);
753 if (!sband)
754 return 0;
755
756 active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
757 passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
758
759 if (passive_dwell <= active_dwell)
760 passive_dwell = active_dwell + 1;
761
762 for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
763 chan = priv->scan_request->channels[i];
764
765 if (chan->band != band)
766 continue;
767
768 channel = chan->hw_value;
769 scan_ch->channel = cpu_to_le16(channel);
770
771 ch_info = iwl_legacy_get_channel_info(priv, band, channel);
772 if (!iwl_legacy_is_channel_valid(ch_info)) {
773 IWL_DEBUG_SCAN(priv,
774 "Channel %d is INVALID for this band.\n",
775 channel);
776 continue;
777 }
778
779 if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
780 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
781 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
782 else
783 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
784
785 if (n_probes)
786 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
787
788 scan_ch->active_dwell = cpu_to_le16(active_dwell);
789 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
790
791 /* Set txpower levels to defaults */
792 scan_ch->dsp_atten = 110;
793
794 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
795 * power level:
796 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
797 */
798 if (band == IEEE80211_BAND_5GHZ)
799 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
800 else
801 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
802
803 IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
804 channel, le32_to_cpu(scan_ch->type),
805 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
806 "ACTIVE" : "PASSIVE",
807 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
808 active_dwell : passive_dwell);
809
810 scan_ch++;
811 added++;
812 }
813
814 IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
815 return added;
816}
817
818int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
819{
820 struct iwl_host_cmd cmd = {
821 .id = REPLY_SCAN_CMD,
822 .len = sizeof(struct iwl_scan_cmd),
823 .flags = CMD_SIZE_HUGE,
824 };
825 struct iwl_scan_cmd *scan;
826 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
827 u32 rate_flags = 0;
828 u16 cmd_len;
829 u16 rx_chain = 0;
830 enum ieee80211_band band;
831 u8 n_probes = 0;
832 u8 rx_ant = priv->hw_params.valid_rx_ant;
833 u8 rate;
834 bool is_active = false;
835 int chan_mod;
836 u8 active_chains;
837 u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
838 int ret;
839
840 lockdep_assert_held(&priv->mutex);
841
842 if (vif)
843 ctx = iwl_legacy_rxon_ctx_from_vif(vif);
844
845 if (!priv->scan_cmd) {
846 priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
847 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
848 if (!priv->scan_cmd) {
849 IWL_DEBUG_SCAN(priv,
850 "fail to allocate memory for scan\n");
851 return -ENOMEM;
852 }
853 }
854 scan = priv->scan_cmd;
855 memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
856
857 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
858 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
859
860 if (iwl_legacy_is_any_associated(priv)) {
861 u16 interval = 0;
862 u32 extra;
863 u32 suspend_time = 100;
864 u32 scan_suspend_time = 100;
865
866 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
867 if (priv->is_internal_short_scan)
868 interval = 0;
869 else
870 interval = vif->bss_conf.beacon_int;
871
872 scan->suspend_time = 0;
873 scan->max_out_time = cpu_to_le32(200 * 1024);
874 if (!interval)
875 interval = suspend_time;
876
877 extra = (suspend_time / interval) << 22;
878 scan_suspend_time = (extra |
879 ((suspend_time % interval) * 1024));
880 scan->suspend_time = cpu_to_le32(scan_suspend_time);
881 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
882 scan_suspend_time, interval);
883 }
884
885 if (priv->is_internal_short_scan) {
886 IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
887 } else if (priv->scan_request->n_ssids) {
888 int i, p = 0;
889 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
890 for (i = 0; i < priv->scan_request->n_ssids; i++) {
891 /* always does wildcard anyway */
892 if (!priv->scan_request->ssids[i].ssid_len)
893 continue;
894 scan->direct_scan[p].id = WLAN_EID_SSID;
895 scan->direct_scan[p].len =
896 priv->scan_request->ssids[i].ssid_len;
897 memcpy(scan->direct_scan[p].ssid,
898 priv->scan_request->ssids[i].ssid,
899 priv->scan_request->ssids[i].ssid_len);
900 n_probes++;
901 p++;
902 }
903 is_active = true;
904 } else
905 IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
906
907 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
908 scan->tx_cmd.sta_id = ctx->bcast_sta_id;
909 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
910
911 switch (priv->scan_band) {
912 case IEEE80211_BAND_2GHZ:
913 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
914 chan_mod = le32_to_cpu(
915 priv->contexts[IWL_RXON_CTX_BSS].active.flags &
916 RXON_FLG_CHANNEL_MODE_MSK)
917 >> RXON_FLG_CHANNEL_MODE_POS;
918 if (chan_mod == CHANNEL_MODE_PURE_40) {
919 rate = IWL_RATE_6M_PLCP;
920 } else {
921 rate = IWL_RATE_1M_PLCP;
922 rate_flags = RATE_MCS_CCK_MSK;
923 }
924 break;
925 case IEEE80211_BAND_5GHZ:
926 rate = IWL_RATE_6M_PLCP;
927 break;
928 default:
929 IWL_WARN(priv, "Invalid scan band\n");
930 return -EIO;
931 }
932
933 /*
934 * If active scanning is requested but a certain channel is
935 * marked passive, we can do active scanning if we detect
936 * transmissions.
937 *
938 * There is an issue with some firmware versions that triggers
939 * a sysassert on a "good CRC threshold" of zero (== disabled),
940 * on a radar channel even though this means that we should NOT
941 * send probes.
942 *
943 * The "good CRC threshold" is the number of frames that we
944 * need to receive during our dwell time on a channel before
945 * sending out probes -- setting this to a huge value will
946 * mean we never reach it, but at the same time work around
947 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
948 * here instead of IWL_GOOD_CRC_TH_DISABLED.
949 */
950 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
951 IWL_GOOD_CRC_TH_NEVER;
952
953 band = priv->scan_band;
954
955 if (priv->cfg->scan_rx_antennas[band])
956 rx_ant = priv->cfg->scan_rx_antennas[band];
957
958 if (priv->cfg->scan_tx_antennas[band])
959 scan_tx_antennas = priv->cfg->scan_tx_antennas[band];
960
961 priv->scan_tx_ant[band] = iwl4965_toggle_tx_ant(priv,
962 priv->scan_tx_ant[band],
963 scan_tx_antennas);
964 rate_flags |= iwl4965_ant_idx_to_flags(priv->scan_tx_ant[band]);
965 scan->tx_cmd.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate, rate_flags);
966
967 /* In power save mode use one chain, otherwise use all chains */
968 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
969 /* rx_ant has been set to all valid chains previously */
970 active_chains = rx_ant &
971 ((u8)(priv->chain_noise_data.active_chains));
972 if (!active_chains)
973 active_chains = rx_ant;
974
975 IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
976 priv->chain_noise_data.active_chains);
977
978 rx_ant = iwl4965_first_antenna(active_chains);
979 }
980
981 /* MIMO is not used here, but value is required */
982 rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
983 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
984 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
985 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
986 scan->rx_chain = cpu_to_le16(rx_chain);
987 if (!priv->is_internal_short_scan) {
988 cmd_len = iwl_legacy_fill_probe_req(priv,
989 (struct ieee80211_mgmt *)scan->data,
990 vif->addr,
991 priv->scan_request->ie,
992 priv->scan_request->ie_len,
993 IWL_MAX_SCAN_SIZE - sizeof(*scan));
994 } else {
995 /* use bcast addr, will not be transmitted but must be valid */
996 cmd_len = iwl_legacy_fill_probe_req(priv,
997 (struct ieee80211_mgmt *)scan->data,
998 iwlegacy_bcast_addr, NULL, 0,
999 IWL_MAX_SCAN_SIZE - sizeof(*scan));
1000
1001 }
1002 scan->tx_cmd.len = cpu_to_le16(cmd_len);
1003
1004 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
1005 RXON_FILTER_BCON_AWARE_MSK);
1006
1007 if (priv->is_internal_short_scan) {
1008 scan->channel_count =
1009 iwl4965_get_single_channel_for_scan(priv, vif, band,
1010 (void *)&scan->data[le16_to_cpu(
1011 scan->tx_cmd.len)]);
1012 } else {
1013 scan->channel_count =
1014 iwl4965_get_channels_for_scan(priv, vif, band,
1015 is_active, n_probes,
1016 (void *)&scan->data[le16_to_cpu(
1017 scan->tx_cmd.len)]);
1018 }
1019 if (scan->channel_count == 0) {
1020 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
1021 return -EIO;
1022 }
1023
1024 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
1025 scan->channel_count * sizeof(struct iwl_scan_channel);
1026 cmd.data = scan;
1027 scan->len = cpu_to_le16(cmd.len);
1028
1029 set_bit(STATUS_SCAN_HW, &priv->status);
1030
1031 ret = iwl_legacy_send_cmd_sync(priv, &cmd);
1032 if (ret)
1033 clear_bit(STATUS_SCAN_HW, &priv->status);
1034
1035 return ret;
1036}
1037
1038int iwl4965_manage_ibss_station(struct iwl_priv *priv,
1039 struct ieee80211_vif *vif, bool add)
1040{
1041 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1042
1043 if (add)
1044 return iwl4965_add_bssid_station(priv, vif_priv->ctx,
1045 vif->bss_conf.bssid,
1046 &vif_priv->ibss_bssid_sta_id);
1047 return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
1048 vif->bss_conf.bssid);
1049}
1050
1051void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
1052 int sta_id, int tid, int freed)
1053{
1054 lockdep_assert_held(&priv->sta_lock);
1055
1056 if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
1057 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1058 else {
1059 IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
1060 priv->stations[sta_id].tid[tid].tfds_in_queue,
1061 freed);
1062 priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
1063 }
1064}
1065
1066#define IWL_TX_QUEUE_MSK 0xfffff
1067
1068static bool iwl4965_is_single_rx_stream(struct iwl_priv *priv)
1069{
1070 return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
1071 priv->current_ht_config.single_chain_sufficient;
1072}
1073
1074#define IWL_NUM_RX_CHAINS_MULTIPLE 3
1075#define IWL_NUM_RX_CHAINS_SINGLE 2
1076#define IWL_NUM_IDLE_CHAINS_DUAL 2
1077#define IWL_NUM_IDLE_CHAINS_SINGLE 1
1078
1079/*
1080 * Determine how many receiver/antenna chains to use.
1081 *
1082 * More provides better reception via diversity. Fewer saves power
1083 * at the expense of throughput, but only when not in powersave to
1084 * start with.
1085 *
1086 * MIMO (dual stream) requires at least 2, but works better with 3.
1087 * This does not determine *which* chains to use, just how many.
1088 */
1089static int iwl4965_get_active_rx_chain_count(struct iwl_priv *priv)
1090{
1091 /* # of Rx chains to use when expecting MIMO. */
1092 if (iwl4965_is_single_rx_stream(priv))
1093 return IWL_NUM_RX_CHAINS_SINGLE;
1094 else
1095 return IWL_NUM_RX_CHAINS_MULTIPLE;
1096}
1097
1098/*
1099 * When we are in power saving mode, unless device support spatial
1100 * multiplexing power save, use the active count for rx chain count.
1101 */
1102static int
1103iwl4965_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
1104{
1105 /* # Rx chains when idling, depending on SMPS mode */
1106 switch (priv->current_ht_config.smps) {
1107 case IEEE80211_SMPS_STATIC:
1108 case IEEE80211_SMPS_DYNAMIC:
1109 return IWL_NUM_IDLE_CHAINS_SINGLE;
1110 case IEEE80211_SMPS_OFF:
1111 return active_cnt;
1112 default:
1113 WARN(1, "invalid SMPS mode %d",
1114 priv->current_ht_config.smps);
1115 return active_cnt;
1116 }
1117}
1118
1119/* up to 4 chains */
1120static u8 iwl4965_count_chain_bitmap(u32 chain_bitmap)
1121{
1122 u8 res;
1123 res = (chain_bitmap & BIT(0)) >> 0;
1124 res += (chain_bitmap & BIT(1)) >> 1;
1125 res += (chain_bitmap & BIT(2)) >> 2;
1126 res += (chain_bitmap & BIT(3)) >> 3;
1127 return res;
1128}
1129
1130/**
1131 * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
1132 *
1133 * Selects how many and which Rx receivers/antennas/chains to use.
1134 * This should not be used for scan command ... it puts data in wrong place.
1135 */
1136void iwl4965_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1137{
1138 bool is_single = iwl4965_is_single_rx_stream(priv);
1139 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
1140 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
1141 u32 active_chains;
1142 u16 rx_chain;
1143
1144 /* Tell uCode which antennas are actually connected.
1145 * Before first association, we assume all antennas are connected.
1146 * Just after first association, iwl4965_chain_noise_calibration()
1147 * checks which antennas actually *are* connected. */
1148 if (priv->chain_noise_data.active_chains)
1149 active_chains = priv->chain_noise_data.active_chains;
1150 else
1151 active_chains = priv->hw_params.valid_rx_ant;
1152
1153 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
1154
1155 /* How many receivers should we use? */
1156 active_rx_cnt = iwl4965_get_active_rx_chain_count(priv);
1157 idle_rx_cnt = iwl4965_get_idle_rx_chain_count(priv, active_rx_cnt);
1158
1159
1160 /* correct rx chain count according hw settings
1161 * and chain noise calibration
1162 */
1163 valid_rx_cnt = iwl4965_count_chain_bitmap(active_chains);
1164 if (valid_rx_cnt < active_rx_cnt)
1165 active_rx_cnt = valid_rx_cnt;
1166
1167 if (valid_rx_cnt < idle_rx_cnt)
1168 idle_rx_cnt = valid_rx_cnt;
1169
1170 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
1171 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
1172
1173 ctx->staging.rx_chain = cpu_to_le16(rx_chain);
1174
1175 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
1176 ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
1177 else
1178 ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
1179
1180 IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
1181 ctx->staging.rx_chain,
1182 active_rx_cnt, idle_rx_cnt);
1183
1184 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
1185 active_rx_cnt < idle_rx_cnt);
1186}
1187
1188u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
1189{
1190 int i;
1191 u8 ind = ant;
1192
1193 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
1194 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
1195 if (valid & BIT(ind))
1196 return ind;
1197 }
1198 return ant;
1199}
1200
1201static const char *iwl4965_get_fh_string(int cmd)
1202{
1203 switch (cmd) {
1204 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
1205 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
1206 IWL_CMD(FH_RSCSR_CHNL0_WPTR);
1207 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
1208 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
1209 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
1210 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1211 IWL_CMD(FH_TSSR_TX_STATUS_REG);
1212 IWL_CMD(FH_TSSR_TX_ERROR_REG);
1213 default:
1214 return "UNKNOWN";
1215 }
1216}
1217
1218int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display)
1219{
1220 int i;
1221#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1222 int pos = 0;
1223 size_t bufsz = 0;
1224#endif
1225 static const u32 fh_tbl[] = {
1226 FH_RSCSR_CHNL0_STTS_WPTR_REG,
1227 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1228 FH_RSCSR_CHNL0_WPTR,
1229 FH_MEM_RCSR_CHNL0_CONFIG_REG,
1230 FH_MEM_RSSR_SHARED_CTRL_REG,
1231 FH_MEM_RSSR_RX_STATUS_REG,
1232 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1233 FH_TSSR_TX_STATUS_REG,
1234 FH_TSSR_TX_ERROR_REG
1235 };
1236#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1237 if (display) {
1238 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1239 *buf = kmalloc(bufsz, GFP_KERNEL);
1240 if (!*buf)
1241 return -ENOMEM;
1242 pos += scnprintf(*buf + pos, bufsz - pos,
1243 "FH register values:\n");
1244 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1245 pos += scnprintf(*buf + pos, bufsz - pos,
1246 " %34s: 0X%08x\n",
1247 iwl4965_get_fh_string(fh_tbl[i]),
1248 iwl_legacy_read_direct32(priv, fh_tbl[i]));
1249 }
1250 return pos;
1251 }
1252#endif
1253 IWL_ERR(priv, "FH register values:\n");
1254 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1255 IWL_ERR(priv, " %34s: 0X%08x\n",
1256 iwl4965_get_fh_string(fh_tbl[i]),
1257 iwl_legacy_read_direct32(priv, fh_tbl[i]));
1258 }
1259 return 0;
1260}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
new file mode 100644
index 000000000000..31ac672b64e1
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
@@ -0,0 +1,2870 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26#include <linux/kernel.h>
27#include <linux/init.h>
28#include <linux/skbuff.h>
29#include <linux/slab.h>
30#include <linux/wireless.h>
31#include <net/mac80211.h>
32
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/delay.h>
36
37#include <linux/workqueue.h>
38
39#include "iwl-dev.h"
40#include "iwl-sta.h"
41#include "iwl-core.h"
42#include "iwl-4965.h"
43
44#define IWL4965_RS_NAME "iwl-4965-rs"
45
46#define NUM_TRY_BEFORE_ANT_TOGGLE 1
47#define IWL_NUMBER_TRY 1
48#define IWL_HT_NUMBER_TRY 3
49
50#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
51#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
52#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
53
54/* max allowed rate miss before sync LQ cmd */
55#define IWL_MISSED_RATE_MAX 15
56/* max time to accum history 2 seconds */
57#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ)
58
59static u8 rs_ht_to_legacy[] = {
60 IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
61 IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
62 IWL_RATE_6M_INDEX,
63 IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
64 IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
65 IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
66 IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
67};
68
69static const u8 ant_toggle_lookup[] = {
70 /*ANT_NONE -> */ ANT_NONE,
71 /*ANT_A -> */ ANT_B,
72 /*ANT_B -> */ ANT_C,
73 /*ANT_AB -> */ ANT_BC,
74 /*ANT_C -> */ ANT_A,
75 /*ANT_AC -> */ ANT_AB,
76 /*ANT_BC -> */ ANT_AC,
77 /*ANT_ABC -> */ ANT_ABC,
78};
79
80#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
81 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
82 IWL_RATE_SISO_##s##M_PLCP, \
83 IWL_RATE_MIMO2_##s##M_PLCP,\
84 IWL_RATE_##r##M_IEEE, \
85 IWL_RATE_##ip##M_INDEX, \
86 IWL_RATE_##in##M_INDEX, \
87 IWL_RATE_##rp##M_INDEX, \
88 IWL_RATE_##rn##M_INDEX, \
89 IWL_RATE_##pp##M_INDEX, \
90 IWL_RATE_##np##M_INDEX }
91
92/*
93 * Parameter order:
94 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
95 *
96 * If there isn't a valid next or previous rate then INV is used which
97 * maps to IWL_RATE_INVALID
98 *
99 */
100const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT] = {
101 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
102 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
103 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
104 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
105 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
106 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
107 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
108 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
109 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
110 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
111 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
112 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
113 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
114};
115
116static int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
117{
118 int idx = 0;
119
120 /* HT rate format */
121 if (rate_n_flags & RATE_MCS_HT_MSK) {
122 idx = (rate_n_flags & 0xff);
123
124 if (idx >= IWL_RATE_MIMO2_6M_PLCP)
125 idx = idx - IWL_RATE_MIMO2_6M_PLCP;
126
127 idx += IWL_FIRST_OFDM_RATE;
128 /* skip 9M not supported in ht*/
129 if (idx >= IWL_RATE_9M_INDEX)
130 idx += 1;
131 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
132 return idx;
133
134 /* legacy rate format, search for match in table */
135 } else {
136 for (idx = 0; idx < ARRAY_SIZE(iwlegacy_rates); idx++)
137 if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
138 return idx;
139 }
140
141 return -1;
142}
143
144static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
145 struct sk_buff *skb,
146 struct ieee80211_sta *sta,
147 struct iwl_lq_sta *lq_sta);
148static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
149 struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
150static void iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta,
151 bool force_search);
152
153#ifdef CONFIG_MAC80211_DEBUGFS
154static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
155 u32 *rate_n_flags, int index);
156#else
157static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
158 u32 *rate_n_flags, int index)
159{}
160#endif
161
162/**
163 * The following tables contain the expected throughput metrics for all rates
164 *
165 * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
166 *
167 * where invalid entries are zeros.
168 *
169 * CCK rates are only valid in legacy table and will only be used in G
170 * (2.4 GHz) band.
171 */
172
173static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
174 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
175};
176
177static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
178 {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */
179 {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */
180 {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */
181 {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
182};
183
184static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
185 {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
186 {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
187 {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
188 {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
189};
190
191static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
192 {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
193 {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
194 {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
195 {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/
196};
197
198static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
199 {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
200 {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
201 {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
202 {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
203};
204
205/* mbps, mcs */
206static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
207 { "1", "BPSK DSSS"},
208 { "2", "QPSK DSSS"},
209 {"5.5", "BPSK CCK"},
210 { "11", "QPSK CCK"},
211 { "6", "BPSK 1/2"},
212 { "9", "BPSK 1/2"},
213 { "12", "QPSK 1/2"},
214 { "18", "QPSK 3/4"},
215 { "24", "16QAM 1/2"},
216 { "36", "16QAM 3/4"},
217 { "48", "64QAM 2/3"},
218 { "54", "64QAM 3/4"},
219 { "60", "64QAM 5/6"},
220};
221
222#define MCS_INDEX_PER_STREAM (8)
223
224static inline u8 iwl4965_rs_extract_rate(u32 rate_n_flags)
225{
226 return (u8)(rate_n_flags & 0xFF);
227}
228
229static void
230iwl4965_rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
231{
232 window->data = 0;
233 window->success_counter = 0;
234 window->success_ratio = IWL_INVALID_VALUE;
235 window->counter = 0;
236 window->average_tpt = IWL_INVALID_VALUE;
237 window->stamp = 0;
238}
239
240static inline u8 iwl4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
241{
242 return (ant_type & valid_antenna) == ant_type;
243}
244
245/*
246 * removes the old data from the statistics. All data that is older than
247 * TID_MAX_TIME_DIFF, will be deleted.
248 */
249static void
250iwl4965_rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
251{
252 /* The oldest age we want to keep */
253 u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
254
255 while (tl->queue_count &&
256 (tl->time_stamp < oldest_time)) {
257 tl->total -= tl->packet_count[tl->head];
258 tl->packet_count[tl->head] = 0;
259 tl->time_stamp += TID_QUEUE_CELL_SPACING;
260 tl->queue_count--;
261 tl->head++;
262 if (tl->head >= TID_QUEUE_MAX_SIZE)
263 tl->head = 0;
264 }
265}
266
267/*
268 * increment traffic load value for tid and also remove
269 * any old values if passed the certain time period
270 */
271static u8 iwl4965_rs_tl_add_packet(struct iwl_lq_sta *lq_data,
272 struct ieee80211_hdr *hdr)
273{
274 u32 curr_time = jiffies_to_msecs(jiffies);
275 u32 time_diff;
276 s32 index;
277 struct iwl_traffic_load *tl = NULL;
278 u8 tid;
279
280 if (ieee80211_is_data_qos(hdr->frame_control)) {
281 u8 *qc = ieee80211_get_qos_ctl(hdr);
282 tid = qc[0] & 0xf;
283 } else
284 return MAX_TID_COUNT;
285
286 if (unlikely(tid >= TID_MAX_LOAD_COUNT))
287 return MAX_TID_COUNT;
288
289 tl = &lq_data->load[tid];
290
291 curr_time -= curr_time % TID_ROUND_VALUE;
292
293 /* Happens only for the first packet. Initialize the data */
294 if (!(tl->queue_count)) {
295 tl->total = 1;
296 tl->time_stamp = curr_time;
297 tl->queue_count = 1;
298 tl->head = 0;
299 tl->packet_count[0] = 1;
300 return MAX_TID_COUNT;
301 }
302
303 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
304 index = time_diff / TID_QUEUE_CELL_SPACING;
305
306 /* The history is too long: remove data that is older than */
307 /* TID_MAX_TIME_DIFF */
308 if (index >= TID_QUEUE_MAX_SIZE)
309 iwl4965_rs_tl_rm_old_stats(tl, curr_time);
310
311 index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
312 tl->packet_count[index] = tl->packet_count[index] + 1;
313 tl->total = tl->total + 1;
314
315 if ((index + 1) > tl->queue_count)
316 tl->queue_count = index + 1;
317
318 return tid;
319}
320
321/*
322 get the traffic load value for tid
323*/
324static u32 iwl4965_rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
325{
326 u32 curr_time = jiffies_to_msecs(jiffies);
327 u32 time_diff;
328 s32 index;
329 struct iwl_traffic_load *tl = NULL;
330
331 if (tid >= TID_MAX_LOAD_COUNT)
332 return 0;
333
334 tl = &(lq_data->load[tid]);
335
336 curr_time -= curr_time % TID_ROUND_VALUE;
337
338 if (!(tl->queue_count))
339 return 0;
340
341 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
342 index = time_diff / TID_QUEUE_CELL_SPACING;
343
344 /* The history is too long: remove data that is older than */
345 /* TID_MAX_TIME_DIFF */
346 if (index >= TID_QUEUE_MAX_SIZE)
347 iwl4965_rs_tl_rm_old_stats(tl, curr_time);
348
349 return tl->total;
350}
351
352static int iwl4965_rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
353 struct iwl_lq_sta *lq_data, u8 tid,
354 struct ieee80211_sta *sta)
355{
356 int ret = -EAGAIN;
357 u32 load;
358
359 load = iwl4965_rs_tl_get_load(lq_data, tid);
360
361 if (load > IWL_AGG_LOAD_THRESHOLD) {
362 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
363 sta->addr, tid);
364 ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
365 if (ret == -EAGAIN) {
366 /*
367 * driver and mac80211 is out of sync
368 * this might be cause by reloading firmware
369 * stop the tx ba session here
370 */
371 IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
372 tid);
373 ieee80211_stop_tx_ba_session(sta, tid);
374 }
375 } else {
376 IWL_ERR(priv, "Aggregation not enabled for tid %d "
377 "because load = %u\n", tid, load);
378 }
379 return ret;
380}
381
382static void iwl4965_rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
383 struct iwl_lq_sta *lq_data,
384 struct ieee80211_sta *sta)
385{
386 if (tid < TID_MAX_LOAD_COUNT)
387 iwl4965_rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
388 else
389 IWL_ERR(priv, "tid exceeds max load count: %d/%d\n",
390 tid, TID_MAX_LOAD_COUNT);
391}
392
393static inline int iwl4965_get_iwl4965_num_of_ant_from_rate(u32 rate_n_flags)
394{
395 return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
396 !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
397 !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
398}
399
400/*
401 * Static function to get the expected throughput from an iwl_scale_tbl_info
402 * that wraps a NULL pointer check
403 */
404static s32
405iwl4965_get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
406{
407 if (tbl->expected_tpt)
408 return tbl->expected_tpt[rs_index];
409 return 0;
410}
411
412/**
413 * iwl4965_rs_collect_tx_data - Update the success/failure sliding window
414 *
415 * We keep a sliding window of the last 62 packets transmitted
416 * at this rate. window->data contains the bitmask of successful
417 * packets.
418 */
419static int iwl4965_rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
420 int scale_index, int attempts, int successes)
421{
422 struct iwl_rate_scale_data *window = NULL;
423 static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
424 s32 fail_count, tpt;
425
426 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
427 return -EINVAL;
428
429 /* Select window for current tx bit rate */
430 window = &(tbl->win[scale_index]);
431
432 /* Get expected throughput */
433 tpt = iwl4965_get_expected_tpt(tbl, scale_index);
434
435 /*
436 * Keep track of only the latest 62 tx frame attempts in this rate's
437 * history window; anything older isn't really relevant any more.
438 * If we have filled up the sliding window, drop the oldest attempt;
439 * if the oldest attempt (highest bit in bitmap) shows "success",
440 * subtract "1" from the success counter (this is the main reason
441 * we keep these bitmaps!).
442 */
443 while (attempts > 0) {
444 if (window->counter >= IWL_RATE_MAX_WINDOW) {
445
446 /* remove earliest */
447 window->counter = IWL_RATE_MAX_WINDOW - 1;
448
449 if (window->data & mask) {
450 window->data &= ~mask;
451 window->success_counter--;
452 }
453 }
454
455 /* Increment frames-attempted counter */
456 window->counter++;
457
458 /* Shift bitmap by one frame to throw away oldest history */
459 window->data <<= 1;
460
461 /* Mark the most recent #successes attempts as successful */
462 if (successes > 0) {
463 window->success_counter++;
464 window->data |= 0x1;
465 successes--;
466 }
467
468 attempts--;
469 }
470
471 /* Calculate current success ratio, avoid divide-by-0! */
472 if (window->counter > 0)
473 window->success_ratio = 128 * (100 * window->success_counter)
474 / window->counter;
475 else
476 window->success_ratio = IWL_INVALID_VALUE;
477
478 fail_count = window->counter - window->success_counter;
479
480 /* Calculate average throughput, if we have enough history. */
481 if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
482 (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
483 window->average_tpt = (window->success_ratio * tpt + 64) / 128;
484 else
485 window->average_tpt = IWL_INVALID_VALUE;
486
487 /* Tag this window as having been updated */
488 window->stamp = jiffies;
489
490 return 0;
491}
492
493/*
494 * Fill uCode API rate_n_flags field, based on "search" or "active" table.
495 */
496static u32 iwl4965_rate_n_flags_from_tbl(struct iwl_priv *priv,
497 struct iwl_scale_tbl_info *tbl,
498 int index, u8 use_green)
499{
500 u32 rate_n_flags = 0;
501
502 if (is_legacy(tbl->lq_type)) {
503 rate_n_flags = iwlegacy_rates[index].plcp;
504 if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
505 rate_n_flags |= RATE_MCS_CCK_MSK;
506
507 } else if (is_Ht(tbl->lq_type)) {
508 if (index > IWL_LAST_OFDM_RATE) {
509 IWL_ERR(priv, "Invalid HT rate index %d\n", index);
510 index = IWL_LAST_OFDM_RATE;
511 }
512 rate_n_flags = RATE_MCS_HT_MSK;
513
514 if (is_siso(tbl->lq_type))
515 rate_n_flags |= iwlegacy_rates[index].plcp_siso;
516 else
517 rate_n_flags |= iwlegacy_rates[index].plcp_mimo2;
518 } else {
519 IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type);
520 }
521
522 rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
523 RATE_MCS_ANT_ABC_MSK);
524
525 if (is_Ht(tbl->lq_type)) {
526 if (tbl->is_ht40) {
527 if (tbl->is_dup)
528 rate_n_flags |= RATE_MCS_DUP_MSK;
529 else
530 rate_n_flags |= RATE_MCS_HT40_MSK;
531 }
532 if (tbl->is_SGI)
533 rate_n_flags |= RATE_MCS_SGI_MSK;
534
535 if (use_green) {
536 rate_n_flags |= RATE_MCS_GF_MSK;
537 if (is_siso(tbl->lq_type) && tbl->is_SGI) {
538 rate_n_flags &= ~RATE_MCS_SGI_MSK;
539 IWL_ERR(priv, "GF was set with SGI:SISO\n");
540 }
541 }
542 }
543 return rate_n_flags;
544}
545
546/*
547 * Interpret uCode API's rate_n_flags format,
548 * fill "search" or "active" tx mode table.
549 */
550static int iwl4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
551 enum ieee80211_band band,
552 struct iwl_scale_tbl_info *tbl,
553 int *rate_idx)
554{
555 u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
556 u8 iwl4965_num_of_ant = iwl4965_get_iwl4965_num_of_ant_from_rate(rate_n_flags);
557 u8 mcs;
558
559 memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
560 *rate_idx = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
561
562 if (*rate_idx == IWL_RATE_INVALID) {
563 *rate_idx = -1;
564 return -EINVAL;
565 }
566 tbl->is_SGI = 0; /* default legacy setup */
567 tbl->is_ht40 = 0;
568 tbl->is_dup = 0;
569 tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
570 tbl->lq_type = LQ_NONE;
571 tbl->max_search = IWL_MAX_SEARCH;
572
573 /* legacy rate format */
574 if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
575 if (iwl4965_num_of_ant == 1) {
576 if (band == IEEE80211_BAND_5GHZ)
577 tbl->lq_type = LQ_A;
578 else
579 tbl->lq_type = LQ_G;
580 }
581 /* HT rate format */
582 } else {
583 if (rate_n_flags & RATE_MCS_SGI_MSK)
584 tbl->is_SGI = 1;
585
586 if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
587 (rate_n_flags & RATE_MCS_DUP_MSK))
588 tbl->is_ht40 = 1;
589
590 if (rate_n_flags & RATE_MCS_DUP_MSK)
591 tbl->is_dup = 1;
592
593 mcs = iwl4965_rs_extract_rate(rate_n_flags);
594
595 /* SISO */
596 if (mcs <= IWL_RATE_SISO_60M_PLCP) {
597 if (iwl4965_num_of_ant == 1)
598 tbl->lq_type = LQ_SISO; /*else NONE*/
599 /* MIMO2 */
600 } else {
601 if (iwl4965_num_of_ant == 2)
602 tbl->lq_type = LQ_MIMO2;
603 }
604 }
605 return 0;
606}
607
608/* switch to another antenna/antennas and return 1 */
609/* if no other valid antenna found, return 0 */
610static int iwl4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
611 struct iwl_scale_tbl_info *tbl)
612{
613 u8 new_ant_type;
614
615 if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
616 return 0;
617
618 if (!iwl4965_rs_is_valid_ant(valid_ant, tbl->ant_type))
619 return 0;
620
621 new_ant_type = ant_toggle_lookup[tbl->ant_type];
622
623 while ((new_ant_type != tbl->ant_type) &&
624 !iwl4965_rs_is_valid_ant(valid_ant, new_ant_type))
625 new_ant_type = ant_toggle_lookup[new_ant_type];
626
627 if (new_ant_type == tbl->ant_type)
628 return 0;
629
630 tbl->ant_type = new_ant_type;
631 *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
632 *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
633 return 1;
634}
635
636/**
637 * Green-field mode is valid if the station supports it and
638 * there are no non-GF stations present in the BSS.
639 */
640static bool iwl4965_rs_use_green(struct ieee80211_sta *sta)
641{
642 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
643 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
644
645 return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
646 !(ctx->ht.non_gf_sta_present);
647}
648
649/**
650 * iwl4965_rs_get_supported_rates - get the available rates
651 *
652 * if management frame or broadcast frame only return
653 * basic available rates.
654 *
655 */
656static u16 iwl4965_rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
657 struct ieee80211_hdr *hdr,
658 enum iwl_table_type rate_type)
659{
660 if (is_legacy(rate_type)) {
661 return lq_sta->active_legacy_rate;
662 } else {
663 if (is_siso(rate_type))
664 return lq_sta->active_siso_rate;
665 else
666 return lq_sta->active_mimo2_rate;
667 }
668}
669
670static u16
671iwl4965_rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
672 int rate_type)
673{
674 u8 high = IWL_RATE_INVALID;
675 u8 low = IWL_RATE_INVALID;
676
677 /* 802.11A or ht walks to the next literal adjacent rate in
678 * the rate table */
679 if (is_a_band(rate_type) || !is_legacy(rate_type)) {
680 int i;
681 u32 mask;
682
683 /* Find the previous rate that is in the rate mask */
684 i = index - 1;
685 for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
686 if (rate_mask & mask) {
687 low = i;
688 break;
689 }
690 }
691
692 /* Find the next rate that is in the rate mask */
693 i = index + 1;
694 for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
695 if (rate_mask & mask) {
696 high = i;
697 break;
698 }
699 }
700
701 return (high << 8) | low;
702 }
703
704 low = index;
705 while (low != IWL_RATE_INVALID) {
706 low = iwlegacy_rates[low].prev_rs;
707 if (low == IWL_RATE_INVALID)
708 break;
709 if (rate_mask & (1 << low))
710 break;
711 IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
712 }
713
714 high = index;
715 while (high != IWL_RATE_INVALID) {
716 high = iwlegacy_rates[high].next_rs;
717 if (high == IWL_RATE_INVALID)
718 break;
719 if (rate_mask & (1 << high))
720 break;
721 IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
722 }
723
724 return (high << 8) | low;
725}
726
727static u32 iwl4965_rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
728 struct iwl_scale_tbl_info *tbl,
729 u8 scale_index, u8 ht_possible)
730{
731 s32 low;
732 u16 rate_mask;
733 u16 high_low;
734 u8 switch_to_legacy = 0;
735 u8 is_green = lq_sta->is_green;
736 struct iwl_priv *priv = lq_sta->drv;
737
738 /* check if we need to switch from HT to legacy rates.
739 * assumption is that mandatory rates (1Mbps or 6Mbps)
740 * are always supported (spec demand) */
741 if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
742 switch_to_legacy = 1;
743 scale_index = rs_ht_to_legacy[scale_index];
744 if (lq_sta->band == IEEE80211_BAND_5GHZ)
745 tbl->lq_type = LQ_A;
746 else
747 tbl->lq_type = LQ_G;
748
749 if (iwl4965_num_of_ant(tbl->ant_type) > 1)
750 tbl->ant_type =
751 iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
752
753 tbl->is_ht40 = 0;
754 tbl->is_SGI = 0;
755 tbl->max_search = IWL_MAX_SEARCH;
756 }
757
758 rate_mask = iwl4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
759
760 /* Mask with station rate restriction */
761 if (is_legacy(tbl->lq_type)) {
762 /* supp_rates has no CCK bits in A mode */
763 if (lq_sta->band == IEEE80211_BAND_5GHZ)
764 rate_mask = (u16)(rate_mask &
765 (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
766 else
767 rate_mask = (u16)(rate_mask & lq_sta->supp_rates);
768 }
769
770 /* If we switched from HT to legacy, check current rate */
771 if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
772 low = scale_index;
773 goto out;
774 }
775
776 high_low = iwl4965_rs_get_adjacent_rate(lq_sta->drv,
777 scale_index, rate_mask,
778 tbl->lq_type);
779 low = high_low & 0xff;
780
781 if (low == IWL_RATE_INVALID)
782 low = scale_index;
783
784out:
785 return iwl4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
786}
787
788/*
789 * Simple function to compare two rate scale table types
790 */
791static bool iwl4965_table_type_matches(struct iwl_scale_tbl_info *a,
792 struct iwl_scale_tbl_info *b)
793{
794 return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
795 (a->is_SGI == b->is_SGI);
796}
797
798/*
799 * mac80211 sends us Tx status
800 */
801static void
802iwl4965_rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
803 struct ieee80211_sta *sta, void *priv_sta,
804 struct sk_buff *skb)
805{
806 int legacy_success;
807 int retries;
808 int rs_index, mac_index, i;
809 struct iwl_lq_sta *lq_sta = priv_sta;
810 struct iwl_link_quality_cmd *table;
811 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
812 struct iwl_priv *priv = (struct iwl_priv *)priv_r;
813 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
814 enum mac80211_rate_control_flags mac_flags;
815 u32 tx_rate;
816 struct iwl_scale_tbl_info tbl_type;
817 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
818 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
819 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
820
821 IWL_DEBUG_RATE_LIMIT(priv,
822 "get frame ack response, update rate scale window\n");
823
824 /* Treat uninitialized rate scaling data same as non-existing. */
825 if (!lq_sta) {
826 IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n");
827 return;
828 } else if (!lq_sta->drv) {
829 IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
830 return;
831 }
832
833 if (!ieee80211_is_data(hdr->frame_control) ||
834 info->flags & IEEE80211_TX_CTL_NO_ACK)
835 return;
836
837 /* This packet was aggregated but doesn't carry status info */
838 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
839 !(info->flags & IEEE80211_TX_STAT_AMPDU))
840 return;
841
842 /*
843 * Ignore this Tx frame response if its initial rate doesn't match
844 * that of latest Link Quality command. There may be stragglers
845 * from a previous Link Quality command, but we're no longer interested
846 * in those; they're either from the "active" mode while we're trying
847 * to check "search" mode, or a prior "search" mode after we've moved
848 * to a new "search" mode (which might become the new "active" mode).
849 */
850 table = &lq_sta->lq;
851 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
852 iwl4965_rs_get_tbl_info_from_mcs(tx_rate,
853 priv->band, &tbl_type, &rs_index);
854 if (priv->band == IEEE80211_BAND_5GHZ)
855 rs_index -= IWL_FIRST_OFDM_RATE;
856 mac_flags = info->status.rates[0].flags;
857 mac_index = info->status.rates[0].idx;
858 /* For HT packets, map MCS to PLCP */
859 if (mac_flags & IEEE80211_TX_RC_MCS) {
860 mac_index &= RATE_MCS_CODE_MSK; /* Remove # of streams */
861 if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
862 mac_index++;
863 /*
864 * mac80211 HT index is always zero-indexed; we need to move
865 * HT OFDM rates after CCK rates in 2.4 GHz band
866 */
867 if (priv->band == IEEE80211_BAND_2GHZ)
868 mac_index += IWL_FIRST_OFDM_RATE;
869 }
870 /* Here we actually compare this rate to the latest LQ command */
871 if ((mac_index < 0) ||
872 (tbl_type.is_SGI !=
873 !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
874 (tbl_type.is_ht40 !=
875 !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
876 (tbl_type.is_dup !=
877 !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) ||
878 (tbl_type.ant_type != info->antenna_sel_tx) ||
879 (!!(tx_rate & RATE_MCS_HT_MSK) !=
880 !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
881 (!!(tx_rate & RATE_MCS_GF_MSK) !=
882 !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
883 (rs_index != mac_index)) {
884 IWL_DEBUG_RATE(priv,
885 "initial rate %d does not match %d (0x%x)\n",
886 mac_index, rs_index, tx_rate);
887 /*
888 * Since rates mis-match, the last LQ command may have failed.
889 * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
890 * ... driver.
891 */
892 lq_sta->missed_rate_counter++;
893 if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
894 lq_sta->missed_rate_counter = 0;
895 iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq,
896 CMD_ASYNC, false);
897 }
898 /* Regardless, ignore this status info for outdated rate */
899 return;
900 } else
901 /* Rate did match, so reset the missed_rate_counter */
902 lq_sta->missed_rate_counter = 0;
903
904 /* Figure out if rate scale algorithm is in active or search table */
905 if (iwl4965_table_type_matches(&tbl_type,
906 &(lq_sta->lq_info[lq_sta->active_tbl]))) {
907 curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
908 other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
909 } else if (iwl4965_table_type_matches(&tbl_type,
910 &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
911 curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
912 other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
913 } else {
914 IWL_DEBUG_RATE(priv,
915 "Neither active nor search matches tx rate\n");
916 tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
917 IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n",
918 tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
919 tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
920 IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n",
921 tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
922 IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n",
923 tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI);
924 /*
925 * no matching table found, let's by-pass the data collection
926 * and continue to perform rate scale to find the rate table
927 */
928 iwl4965_rs_stay_in_table(lq_sta, true);
929 goto done;
930 }
931
932 /*
933 * Updating the frame history depends on whether packets were
934 * aggregated.
935 *
936 * For aggregation, all packets were transmitted at the same rate, the
937 * first index into rate scale table.
938 */
939 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
940 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
941 iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
942 &rs_index);
943 iwl4965_rs_collect_tx_data(curr_tbl, rs_index,
944 info->status.ampdu_len,
945 info->status.ampdu_ack_len);
946
947 /* Update success/fail counts if not searching for new mode */
948 if (lq_sta->stay_in_tbl) {
949 lq_sta->total_success += info->status.ampdu_ack_len;
950 lq_sta->total_failed += (info->status.ampdu_len -
951 info->status.ampdu_ack_len);
952 }
953 } else {
954 /*
955 * For legacy, update frame history with for each Tx retry.
956 */
957 retries = info->status.rates[0].count - 1;
958 /* HW doesn't send more than 15 retries */
959 retries = min(retries, 15);
960
961 /* The last transmission may have been successful */
962 legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
963 /* Collect data for each rate used during failed TX attempts */
964 for (i = 0; i <= retries; ++i) {
965 tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
966 iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band,
967 &tbl_type, &rs_index);
968 /*
969 * Only collect stats if retried rate is in the same RS
970 * table as active/search.
971 */
972 if (iwl4965_table_type_matches(&tbl_type, curr_tbl))
973 tmp_tbl = curr_tbl;
974 else if (iwl4965_table_type_matches(&tbl_type,
975 other_tbl))
976 tmp_tbl = other_tbl;
977 else
978 continue;
979 iwl4965_rs_collect_tx_data(tmp_tbl, rs_index, 1,
980 i < retries ? 0 : legacy_success);
981 }
982
983 /* Update success/fail counts if not searching for new mode */
984 if (lq_sta->stay_in_tbl) {
985 lq_sta->total_success += legacy_success;
986 lq_sta->total_failed += retries + (1 - legacy_success);
987 }
988 }
989 /* The last TX rate is cached in lq_sta; it's set in if/else above */
990 lq_sta->last_rate_n_flags = tx_rate;
991done:
992 /* See if there's a better rate or modulation mode to try. */
993 if (sta && sta->supp_rates[sband->band])
994 iwl4965_rs_rate_scale_perform(priv, skb, sta, lq_sta);
995}
996
997/*
998 * Begin a period of staying with a selected modulation mode.
999 * Set "stay_in_tbl" flag to prevent any mode switches.
1000 * Set frame tx success limits according to legacy vs. high-throughput,
1001 * and reset overall (spanning all rates) tx success history statistics.
1002 * These control how long we stay using same modulation mode before
1003 * searching for a new mode.
1004 */
1005static void iwl4965_rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
1006 struct iwl_lq_sta *lq_sta)
1007{
1008 IWL_DEBUG_RATE(priv, "we are staying in the same table\n");
1009 lq_sta->stay_in_tbl = 1; /* only place this gets set */
1010 if (is_legacy) {
1011 lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
1012 lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
1013 lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT;
1014 } else {
1015 lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT;
1016 lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT;
1017 lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT;
1018 }
1019 lq_sta->table_count = 0;
1020 lq_sta->total_failed = 0;
1021 lq_sta->total_success = 0;
1022 lq_sta->flush_timer = jiffies;
1023 lq_sta->action_counter = 0;
1024}
1025
1026/*
1027 * Find correct throughput table for given mode of modulation
1028 */
1029static void iwl4965_rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1030 struct iwl_scale_tbl_info *tbl)
1031{
1032 /* Used to choose among HT tables */
1033 s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
1034
1035 /* Check for invalid LQ type */
1036 if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
1037 tbl->expected_tpt = expected_tpt_legacy;
1038 return;
1039 }
1040
1041 /* Legacy rates have only one table */
1042 if (is_legacy(tbl->lq_type)) {
1043 tbl->expected_tpt = expected_tpt_legacy;
1044 return;
1045 }
1046
1047 /* Choose among many HT tables depending on number of streams
1048 * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
1049 * status */
1050 if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1051 ht_tbl_pointer = expected_tpt_siso20MHz;
1052 else if (is_siso(tbl->lq_type))
1053 ht_tbl_pointer = expected_tpt_siso40MHz;
1054 else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1055 ht_tbl_pointer = expected_tpt_mimo2_20MHz;
1056 else /* if (is_mimo2(tbl->lq_type)) <-- must be true */
1057 ht_tbl_pointer = expected_tpt_mimo2_40MHz;
1058
1059 if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
1060 tbl->expected_tpt = ht_tbl_pointer[0];
1061 else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
1062 tbl->expected_tpt = ht_tbl_pointer[1];
1063 else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
1064 tbl->expected_tpt = ht_tbl_pointer[2];
1065 else /* AGG+SGI */
1066 tbl->expected_tpt = ht_tbl_pointer[3];
1067}
1068
1069/*
1070 * Find starting rate for new "search" high-throughput mode of modulation.
1071 * Goal is to find lowest expected rate (under perfect conditions) that is
1072 * above the current measured throughput of "active" mode, to give new mode
1073 * a fair chance to prove itself without too many challenges.
1074 *
1075 * This gets called when transitioning to more aggressive modulation
1076 * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
1077 * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
1078 * to decrease to match "active" throughput. When moving from MIMO to SISO,
1079 * bit rate will typically need to increase, but not if performance was bad.
1080 */
1081static s32 iwl4965_rs_get_best_rate(struct iwl_priv *priv,
1082 struct iwl_lq_sta *lq_sta,
1083 struct iwl_scale_tbl_info *tbl, /* "search" */
1084 u16 rate_mask, s8 index)
1085{
1086 /* "active" values */
1087 struct iwl_scale_tbl_info *active_tbl =
1088 &(lq_sta->lq_info[lq_sta->active_tbl]);
1089 s32 active_sr = active_tbl->win[index].success_ratio;
1090 s32 active_tpt = active_tbl->expected_tpt[index];
1091
1092 /* expected "search" throughput */
1093 s32 *tpt_tbl = tbl->expected_tpt;
1094
1095 s32 new_rate, high, low, start_hi;
1096 u16 high_low;
1097 s8 rate = index;
1098
1099 new_rate = high = low = start_hi = IWL_RATE_INVALID;
1100
1101 for (; ;) {
1102 high_low = iwl4965_rs_get_adjacent_rate(priv, rate, rate_mask,
1103 tbl->lq_type);
1104
1105 low = high_low & 0xff;
1106 high = (high_low >> 8) & 0xff;
1107
1108 /*
1109 * Lower the "search" bit rate, to give new "search" mode
1110 * approximately the same throughput as "active" if:
1111 *
1112 * 1) "Active" mode has been working modestly well (but not
1113 * great), and expected "search" throughput (under perfect
1114 * conditions) at candidate rate is above the actual
1115 * measured "active" throughput (but less than expected
1116 * "active" throughput under perfect conditions).
1117 * OR
1118 * 2) "Active" mode has been working perfectly or very well
1119 * and expected "search" throughput (under perfect
1120 * conditions) at candidate rate is above expected
1121 * "active" throughput (under perfect conditions).
1122 */
1123 if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
1124 ((active_sr > IWL_RATE_DECREASE_TH) &&
1125 (active_sr <= IWL_RATE_HIGH_TH) &&
1126 (tpt_tbl[rate] <= active_tpt))) ||
1127 ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
1128 (tpt_tbl[rate] > active_tpt))) {
1129
1130 /* (2nd or later pass)
1131 * If we've already tried to raise the rate, and are
1132 * now trying to lower it, use the higher rate. */
1133 if (start_hi != IWL_RATE_INVALID) {
1134 new_rate = start_hi;
1135 break;
1136 }
1137
1138 new_rate = rate;
1139
1140 /* Loop again with lower rate */
1141 if (low != IWL_RATE_INVALID)
1142 rate = low;
1143
1144 /* Lower rate not available, use the original */
1145 else
1146 break;
1147
1148 /* Else try to raise the "search" rate to match "active" */
1149 } else {
1150 /* (2nd or later pass)
1151 * If we've already tried to lower the rate, and are
1152 * now trying to raise it, use the lower rate. */
1153 if (new_rate != IWL_RATE_INVALID)
1154 break;
1155
1156 /* Loop again with higher rate */
1157 else if (high != IWL_RATE_INVALID) {
1158 start_hi = high;
1159 rate = high;
1160
1161 /* Higher rate not available, use the original */
1162 } else {
1163 new_rate = rate;
1164 break;
1165 }
1166 }
1167 }
1168
1169 return new_rate;
1170}
1171
1172/*
1173 * Set up search table for MIMO2
1174 */
1175static int iwl4965_rs_switch_to_mimo2(struct iwl_priv *priv,
1176 struct iwl_lq_sta *lq_sta,
1177 struct ieee80211_conf *conf,
1178 struct ieee80211_sta *sta,
1179 struct iwl_scale_tbl_info *tbl, int index)
1180{
1181 u16 rate_mask;
1182 s32 rate;
1183 s8 is_green = lq_sta->is_green;
1184 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1185 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1186
1187 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1188 return -1;
1189
1190 if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
1191 == WLAN_HT_CAP_SM_PS_STATIC)
1192 return -1;
1193
1194 /* Need both Tx chains/antennas to support MIMO */
1195 if (priv->hw_params.tx_chains_num < 2)
1196 return -1;
1197
1198 IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
1199
1200 tbl->lq_type = LQ_MIMO2;
1201 tbl->is_dup = lq_sta->is_dup;
1202 tbl->action = 0;
1203 tbl->max_search = IWL_MAX_SEARCH;
1204 rate_mask = lq_sta->active_mimo2_rate;
1205
1206 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
1207 tbl->is_ht40 = 1;
1208 else
1209 tbl->is_ht40 = 0;
1210
1211 iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
1212
1213 rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1214
1215 IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n",
1216 rate, rate_mask);
1217 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1218 IWL_DEBUG_RATE(priv,
1219 "Can't switch with index %d rate mask %x\n",
1220 rate, rate_mask);
1221 return -1;
1222 }
1223 tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
1224 tbl, rate, is_green);
1225
1226 IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
1227 tbl->current_rate, is_green);
1228 return 0;
1229}
1230
1231/*
1232 * Set up search table for SISO
1233 */
1234static int iwl4965_rs_switch_to_siso(struct iwl_priv *priv,
1235 struct iwl_lq_sta *lq_sta,
1236 struct ieee80211_conf *conf,
1237 struct ieee80211_sta *sta,
1238 struct iwl_scale_tbl_info *tbl, int index)
1239{
1240 u16 rate_mask;
1241 u8 is_green = lq_sta->is_green;
1242 s32 rate;
1243 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1244 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1245
1246 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1247 return -1;
1248
1249 IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n");
1250
1251 tbl->is_dup = lq_sta->is_dup;
1252 tbl->lq_type = LQ_SISO;
1253 tbl->action = 0;
1254 tbl->max_search = IWL_MAX_SEARCH;
1255 rate_mask = lq_sta->active_siso_rate;
1256
1257 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
1258 tbl->is_ht40 = 1;
1259 else
1260 tbl->is_ht40 = 0;
1261
1262 if (is_green)
1263 tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
1264
1265 iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
1266 rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1267
1268 IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask);
1269 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1270 IWL_DEBUG_RATE(priv,
1271 "can not switch with index %d rate mask %x\n",
1272 rate, rate_mask);
1273 return -1;
1274 }
1275 tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
1276 tbl, rate, is_green);
1277 IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
1278 tbl->current_rate, is_green);
1279 return 0;
1280}
1281
1282/*
1283 * Try to switch to new modulation mode from legacy
1284 */
1285static int iwl4965_rs_move_legacy_other(struct iwl_priv *priv,
1286 struct iwl_lq_sta *lq_sta,
1287 struct ieee80211_conf *conf,
1288 struct ieee80211_sta *sta,
1289 int index)
1290{
1291 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1292 struct iwl_scale_tbl_info *search_tbl =
1293 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1294 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1295 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1296 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1297 u8 start_action;
1298 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1299 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1300 int ret = 0;
1301 u8 update_search_tbl_counter = 0;
1302
1303 tbl->action = IWL_LEGACY_SWITCH_SISO;
1304
1305 start_action = tbl->action;
1306 for (; ;) {
1307 lq_sta->action_counter++;
1308 switch (tbl->action) {
1309 case IWL_LEGACY_SWITCH_ANTENNA1:
1310 case IWL_LEGACY_SWITCH_ANTENNA2:
1311 IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n");
1312
1313 if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
1314 tx_chains_num <= 1) ||
1315 (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
1316 tx_chains_num <= 2))
1317 break;
1318
1319 /* Don't change antenna if success has been great */
1320 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1321 break;
1322
1323 /* Set up search table to try other antenna */
1324 memcpy(search_tbl, tbl, sz);
1325
1326 if (iwl4965_rs_toggle_antenna(valid_tx_ant,
1327 &search_tbl->current_rate, search_tbl)) {
1328 update_search_tbl_counter = 1;
1329 iwl4965_rs_set_expected_tpt_table(lq_sta,
1330 search_tbl);
1331 goto out;
1332 }
1333 break;
1334 case IWL_LEGACY_SWITCH_SISO:
1335 IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n");
1336
1337 /* Set up search table to try SISO */
1338 memcpy(search_tbl, tbl, sz);
1339 search_tbl->is_SGI = 0;
1340 ret = iwl4965_rs_switch_to_siso(priv, lq_sta, conf, sta,
1341 search_tbl, index);
1342 if (!ret) {
1343 lq_sta->action_counter = 0;
1344 goto out;
1345 }
1346
1347 break;
1348 case IWL_LEGACY_SWITCH_MIMO2_AB:
1349 case IWL_LEGACY_SWITCH_MIMO2_AC:
1350 case IWL_LEGACY_SWITCH_MIMO2_BC:
1351 IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n");
1352
1353 /* Set up search table to try MIMO */
1354 memcpy(search_tbl, tbl, sz);
1355 search_tbl->is_SGI = 0;
1356
1357 if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
1358 search_tbl->ant_type = ANT_AB;
1359 else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
1360 search_tbl->ant_type = ANT_AC;
1361 else
1362 search_tbl->ant_type = ANT_BC;
1363
1364 if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
1365 search_tbl->ant_type))
1366 break;
1367
1368 ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
1369 conf, sta,
1370 search_tbl, index);
1371 if (!ret) {
1372 lq_sta->action_counter = 0;
1373 goto out;
1374 }
1375 break;
1376 }
1377 tbl->action++;
1378 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
1379 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1380
1381 if (tbl->action == start_action)
1382 break;
1383
1384 }
1385 search_tbl->lq_type = LQ_NONE;
1386 return 0;
1387
1388out:
1389 lq_sta->search_better_tbl = 1;
1390 tbl->action++;
1391 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
1392 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1393 if (update_search_tbl_counter)
1394 search_tbl->action = tbl->action;
1395 return 0;
1396
1397}
1398
1399/*
1400 * Try to switch to new modulation mode from SISO
1401 */
1402static int iwl4965_rs_move_siso_to_other(struct iwl_priv *priv,
1403 struct iwl_lq_sta *lq_sta,
1404 struct ieee80211_conf *conf,
1405 struct ieee80211_sta *sta, int index)
1406{
1407 u8 is_green = lq_sta->is_green;
1408 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1409 struct iwl_scale_tbl_info *search_tbl =
1410 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1411 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1412 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1413 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1414 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1415 u8 start_action;
1416 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1417 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1418 u8 update_search_tbl_counter = 0;
1419 int ret;
1420
1421 start_action = tbl->action;
1422
1423 for (;;) {
1424 lq_sta->action_counter++;
1425 switch (tbl->action) {
1426 case IWL_SISO_SWITCH_ANTENNA1:
1427 case IWL_SISO_SWITCH_ANTENNA2:
1428 IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n");
1429 if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
1430 tx_chains_num <= 1) ||
1431 (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
1432 tx_chains_num <= 2))
1433 break;
1434
1435 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1436 break;
1437
1438 memcpy(search_tbl, tbl, sz);
1439 if (iwl4965_rs_toggle_antenna(valid_tx_ant,
1440 &search_tbl->current_rate, search_tbl)) {
1441 update_search_tbl_counter = 1;
1442 goto out;
1443 }
1444 break;
1445 case IWL_SISO_SWITCH_MIMO2_AB:
1446 case IWL_SISO_SWITCH_MIMO2_AC:
1447 case IWL_SISO_SWITCH_MIMO2_BC:
1448 IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n");
1449 memcpy(search_tbl, tbl, sz);
1450 search_tbl->is_SGI = 0;
1451
1452 if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
1453 search_tbl->ant_type = ANT_AB;
1454 else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
1455 search_tbl->ant_type = ANT_AC;
1456 else
1457 search_tbl->ant_type = ANT_BC;
1458
1459 if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
1460 search_tbl->ant_type))
1461 break;
1462
1463 ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
1464 conf, sta,
1465 search_tbl, index);
1466 if (!ret)
1467 goto out;
1468 break;
1469 case IWL_SISO_SWITCH_GI:
1470 if (!tbl->is_ht40 && !(ht_cap->cap &
1471 IEEE80211_HT_CAP_SGI_20))
1472 break;
1473 if (tbl->is_ht40 && !(ht_cap->cap &
1474 IEEE80211_HT_CAP_SGI_40))
1475 break;
1476
1477 IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n");
1478
1479 memcpy(search_tbl, tbl, sz);
1480 if (is_green) {
1481 if (!tbl->is_SGI)
1482 break;
1483 else
1484 IWL_ERR(priv,
1485 "SGI was set in GF+SISO\n");
1486 }
1487 search_tbl->is_SGI = !tbl->is_SGI;
1488 iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
1489 if (tbl->is_SGI) {
1490 s32 tpt = lq_sta->last_tpt / 100;
1491 if (tpt >= search_tbl->expected_tpt[index])
1492 break;
1493 }
1494 search_tbl->current_rate =
1495 iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
1496 index, is_green);
1497 update_search_tbl_counter = 1;
1498 goto out;
1499 }
1500 tbl->action++;
1501 if (tbl->action > IWL_SISO_SWITCH_GI)
1502 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1503
1504 if (tbl->action == start_action)
1505 break;
1506 }
1507 search_tbl->lq_type = LQ_NONE;
1508 return 0;
1509
1510 out:
1511 lq_sta->search_better_tbl = 1;
1512 tbl->action++;
1513 if (tbl->action > IWL_SISO_SWITCH_GI)
1514 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1515 if (update_search_tbl_counter)
1516 search_tbl->action = tbl->action;
1517
1518 return 0;
1519}
1520
1521/*
1522 * Try to switch to new modulation mode from MIMO2
1523 */
1524static int iwl4965_rs_move_mimo2_to_other(struct iwl_priv *priv,
1525 struct iwl_lq_sta *lq_sta,
1526 struct ieee80211_conf *conf,
1527 struct ieee80211_sta *sta, int index)
1528{
1529 s8 is_green = lq_sta->is_green;
1530 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1531 struct iwl_scale_tbl_info *search_tbl =
1532 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1533 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1534 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1535 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1536 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1537 u8 start_action;
1538 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1539 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1540 u8 update_search_tbl_counter = 0;
1541 int ret;
1542
1543 start_action = tbl->action;
1544 for (;;) {
1545 lq_sta->action_counter++;
1546 switch (tbl->action) {
1547 case IWL_MIMO2_SWITCH_ANTENNA1:
1548 case IWL_MIMO2_SWITCH_ANTENNA2:
1549 IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n");
1550
1551 if (tx_chains_num <= 2)
1552 break;
1553
1554 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1555 break;
1556
1557 memcpy(search_tbl, tbl, sz);
1558 if (iwl4965_rs_toggle_antenna(valid_tx_ant,
1559 &search_tbl->current_rate, search_tbl)) {
1560 update_search_tbl_counter = 1;
1561 goto out;
1562 }
1563 break;
1564 case IWL_MIMO2_SWITCH_SISO_A:
1565 case IWL_MIMO2_SWITCH_SISO_B:
1566 case IWL_MIMO2_SWITCH_SISO_C:
1567 IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n");
1568
1569 /* Set up new search table for SISO */
1570 memcpy(search_tbl, tbl, sz);
1571
1572 if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
1573 search_tbl->ant_type = ANT_A;
1574 else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
1575 search_tbl->ant_type = ANT_B;
1576 else
1577 search_tbl->ant_type = ANT_C;
1578
1579 if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
1580 search_tbl->ant_type))
1581 break;
1582
1583 ret = iwl4965_rs_switch_to_siso(priv, lq_sta,
1584 conf, sta,
1585 search_tbl, index);
1586 if (!ret)
1587 goto out;
1588
1589 break;
1590
1591 case IWL_MIMO2_SWITCH_GI:
1592 if (!tbl->is_ht40 && !(ht_cap->cap &
1593 IEEE80211_HT_CAP_SGI_20))
1594 break;
1595 if (tbl->is_ht40 && !(ht_cap->cap &
1596 IEEE80211_HT_CAP_SGI_40))
1597 break;
1598
1599 IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n");
1600
1601 /* Set up new search table for MIMO2 */
1602 memcpy(search_tbl, tbl, sz);
1603 search_tbl->is_SGI = !tbl->is_SGI;
1604 iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
1605 /*
1606 * If active table already uses the fastest possible
1607 * modulation (dual stream with short guard interval),
1608 * and it's working well, there's no need to look
1609 * for a better type of modulation!
1610 */
1611 if (tbl->is_SGI) {
1612 s32 tpt = lq_sta->last_tpt / 100;
1613 if (tpt >= search_tbl->expected_tpt[index])
1614 break;
1615 }
1616 search_tbl->current_rate =
1617 iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
1618 index, is_green);
1619 update_search_tbl_counter = 1;
1620 goto out;
1621
1622 }
1623 tbl->action++;
1624 if (tbl->action > IWL_MIMO2_SWITCH_GI)
1625 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
1626
1627 if (tbl->action == start_action)
1628 break;
1629 }
1630 search_tbl->lq_type = LQ_NONE;
1631 return 0;
1632 out:
1633 lq_sta->search_better_tbl = 1;
1634 tbl->action++;
1635 if (tbl->action > IWL_MIMO2_SWITCH_GI)
1636 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
1637 if (update_search_tbl_counter)
1638 search_tbl->action = tbl->action;
1639
1640 return 0;
1641
1642}
1643
1644/*
1645 * Check whether we should continue using same modulation mode, or
1646 * begin search for a new mode, based on:
1647 * 1) # tx successes or failures while using this mode
1648 * 2) # times calling this function
1649 * 3) elapsed time in this mode (not used, for now)
1650 */
1651static void
1652iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
1653{
1654 struct iwl_scale_tbl_info *tbl;
1655 int i;
1656 int active_tbl;
1657 int flush_interval_passed = 0;
1658 struct iwl_priv *priv;
1659
1660 priv = lq_sta->drv;
1661 active_tbl = lq_sta->active_tbl;
1662
1663 tbl = &(lq_sta->lq_info[active_tbl]);
1664
1665 /* If we've been disallowing search, see if we should now allow it */
1666 if (lq_sta->stay_in_tbl) {
1667
1668 /* Elapsed time using current modulation mode */
1669 if (lq_sta->flush_timer)
1670 flush_interval_passed =
1671 time_after(jiffies,
1672 (unsigned long)(lq_sta->flush_timer +
1673 IWL_RATE_SCALE_FLUSH_INTVL));
1674
1675 /*
1676 * Check if we should allow search for new modulation mode.
1677 * If many frames have failed or succeeded, or we've used
1678 * this same modulation for a long time, allow search, and
1679 * reset history stats that keep track of whether we should
1680 * allow a new search. Also (below) reset all bitmaps and
1681 * stats in active history.
1682 */
1683 if (force_search ||
1684 (lq_sta->total_failed > lq_sta->max_failure_limit) ||
1685 (lq_sta->total_success > lq_sta->max_success_limit) ||
1686 ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
1687 && (flush_interval_passed))) {
1688 IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:",
1689 lq_sta->total_failed,
1690 lq_sta->total_success,
1691 flush_interval_passed);
1692
1693 /* Allow search for new mode */
1694 lq_sta->stay_in_tbl = 0; /* only place reset */
1695 lq_sta->total_failed = 0;
1696 lq_sta->total_success = 0;
1697 lq_sta->flush_timer = 0;
1698
1699 /*
1700 * Else if we've used this modulation mode enough repetitions
1701 * (regardless of elapsed time or success/failure), reset
1702 * history bitmaps and rate-specific stats for all rates in
1703 * active table.
1704 */
1705 } else {
1706 lq_sta->table_count++;
1707 if (lq_sta->table_count >=
1708 lq_sta->table_count_limit) {
1709 lq_sta->table_count = 0;
1710
1711 IWL_DEBUG_RATE(priv,
1712 "LQ: stay in table clear win\n");
1713 for (i = 0; i < IWL_RATE_COUNT; i++)
1714 iwl4965_rs_rate_scale_clear_window(
1715 &(tbl->win[i]));
1716 }
1717 }
1718
1719 /* If transitioning to allow "search", reset all history
1720 * bitmaps and stats in active table (this will become the new
1721 * "search" table). */
1722 if (!lq_sta->stay_in_tbl) {
1723 for (i = 0; i < IWL_RATE_COUNT; i++)
1724 iwl4965_rs_rate_scale_clear_window(
1725 &(tbl->win[i]));
1726 }
1727 }
1728}
1729
1730/*
1731 * setup rate table in uCode
1732 * return rate_n_flags as used in the table
1733 */
1734static u32 iwl4965_rs_update_rate_tbl(struct iwl_priv *priv,
1735 struct iwl_rxon_context *ctx,
1736 struct iwl_lq_sta *lq_sta,
1737 struct iwl_scale_tbl_info *tbl,
1738 int index, u8 is_green)
1739{
1740 u32 rate;
1741
1742 /* Update uCode's rate table. */
1743 rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, index, is_green);
1744 iwl4965_rs_fill_link_cmd(priv, lq_sta, rate);
1745 iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
1746
1747 return rate;
1748}
1749
1750/*
1751 * Do rate scaling and search for new modulation mode.
1752 */
1753static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
1754 struct sk_buff *skb,
1755 struct ieee80211_sta *sta,
1756 struct iwl_lq_sta *lq_sta)
1757{
1758 struct ieee80211_hw *hw = priv->hw;
1759 struct ieee80211_conf *conf = &hw->conf;
1760 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1761 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1762 int low = IWL_RATE_INVALID;
1763 int high = IWL_RATE_INVALID;
1764 int index;
1765 int i;
1766 struct iwl_rate_scale_data *window = NULL;
1767 int current_tpt = IWL_INVALID_VALUE;
1768 int low_tpt = IWL_INVALID_VALUE;
1769 int high_tpt = IWL_INVALID_VALUE;
1770 u32 fail_count;
1771 s8 scale_action = 0;
1772 u16 rate_mask;
1773 u8 update_lq = 0;
1774 struct iwl_scale_tbl_info *tbl, *tbl1;
1775 u16 rate_scale_index_msk = 0;
1776 u32 rate;
1777 u8 is_green = 0;
1778 u8 active_tbl = 0;
1779 u8 done_search = 0;
1780 u16 high_low;
1781 s32 sr;
1782 u8 tid = MAX_TID_COUNT;
1783 struct iwl_tid_data *tid_data;
1784 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1785 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1786
1787 IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n");
1788
1789 /* Send management frames and NO_ACK data using lowest rate. */
1790 /* TODO: this could probably be improved.. */
1791 if (!ieee80211_is_data(hdr->frame_control) ||
1792 info->flags & IEEE80211_TX_CTL_NO_ACK)
1793 return;
1794
1795 if (!sta || !lq_sta)
1796 return;
1797
1798 lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
1799
1800 tid = iwl4965_rs_tl_add_packet(lq_sta, hdr);
1801 if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) {
1802 tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid];
1803 if (tid_data->agg.state == IWL_AGG_OFF)
1804 lq_sta->is_agg = 0;
1805 else
1806 lq_sta->is_agg = 1;
1807 } else
1808 lq_sta->is_agg = 0;
1809
1810 /*
1811 * Select rate-scale / modulation-mode table to work with in
1812 * the rest of this function: "search" if searching for better
1813 * modulation mode, or "active" if doing rate scaling within a mode.
1814 */
1815 if (!lq_sta->search_better_tbl)
1816 active_tbl = lq_sta->active_tbl;
1817 else
1818 active_tbl = 1 - lq_sta->active_tbl;
1819
1820 tbl = &(lq_sta->lq_info[active_tbl]);
1821 if (is_legacy(tbl->lq_type))
1822 lq_sta->is_green = 0;
1823 else
1824 lq_sta->is_green = iwl4965_rs_use_green(sta);
1825 is_green = lq_sta->is_green;
1826
1827 /* current tx rate */
1828 index = lq_sta->last_txrate_idx;
1829
1830 IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index,
1831 tbl->lq_type);
1832
1833 /* rates available for this association, and for modulation mode */
1834 rate_mask = iwl4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
1835
1836 IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask);
1837
1838 /* mask with station rate restriction */
1839 if (is_legacy(tbl->lq_type)) {
1840 if (lq_sta->band == IEEE80211_BAND_5GHZ)
1841 /* supp_rates has no CCK bits in A mode */
1842 rate_scale_index_msk = (u16) (rate_mask &
1843 (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
1844 else
1845 rate_scale_index_msk = (u16) (rate_mask &
1846 lq_sta->supp_rates);
1847
1848 } else
1849 rate_scale_index_msk = rate_mask;
1850
1851 if (!rate_scale_index_msk)
1852 rate_scale_index_msk = rate_mask;
1853
1854 if (!((1 << index) & rate_scale_index_msk)) {
1855 IWL_ERR(priv, "Current Rate is not valid\n");
1856 if (lq_sta->search_better_tbl) {
1857 /* revert to active table if search table is not valid*/
1858 tbl->lq_type = LQ_NONE;
1859 lq_sta->search_better_tbl = 0;
1860 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1861 /* get "active" rate info */
1862 index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
1863 rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
1864 tbl, index, is_green);
1865 }
1866 return;
1867 }
1868
1869 /* Get expected throughput table and history window for current rate */
1870 if (!tbl->expected_tpt) {
1871 IWL_ERR(priv, "tbl->expected_tpt is NULL\n");
1872 return;
1873 }
1874
1875 /* force user max rate if set by user */
1876 if ((lq_sta->max_rate_idx != -1) &&
1877 (lq_sta->max_rate_idx < index)) {
1878 index = lq_sta->max_rate_idx;
1879 update_lq = 1;
1880 window = &(tbl->win[index]);
1881 goto lq_update;
1882 }
1883
1884 window = &(tbl->win[index]);
1885
1886 /*
1887 * If there is not enough history to calculate actual average
1888 * throughput, keep analyzing results of more tx frames, without
1889 * changing rate or mode (bypass most of the rest of this function).
1890 * Set up new rate table in uCode only if old rate is not supported
1891 * in current association (use new rate found above).
1892 */
1893 fail_count = window->counter - window->success_counter;
1894 if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
1895 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
1896 IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d "
1897 "for index %d\n",
1898 window->success_counter, window->counter, index);
1899
1900 /* Can't calculate this yet; not enough history */
1901 window->average_tpt = IWL_INVALID_VALUE;
1902
1903 /* Should we stay with this modulation mode,
1904 * or search for a new one? */
1905 iwl4965_rs_stay_in_table(lq_sta, false);
1906
1907 goto out;
1908 }
1909 /* Else we have enough samples; calculate estimate of
1910 * actual average throughput */
1911 if (window->average_tpt != ((window->success_ratio *
1912 tbl->expected_tpt[index] + 64) / 128)) {
1913 IWL_ERR(priv,
1914 "expected_tpt should have been calculated by now\n");
1915 window->average_tpt = ((window->success_ratio *
1916 tbl->expected_tpt[index] + 64) / 128);
1917 }
1918
1919 /* If we are searching for better modulation mode, check success. */
1920 if (lq_sta->search_better_tbl) {
1921 /* If good success, continue using the "search" mode;
1922 * no need to send new link quality command, since we're
1923 * continuing to use the setup that we've been trying. */
1924 if (window->average_tpt > lq_sta->last_tpt) {
1925
1926 IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE "
1927 "suc=%d cur-tpt=%d old-tpt=%d\n",
1928 window->success_ratio,
1929 window->average_tpt,
1930 lq_sta->last_tpt);
1931
1932 if (!is_legacy(tbl->lq_type))
1933 lq_sta->enable_counter = 1;
1934
1935 /* Swap tables; "search" becomes "active" */
1936 lq_sta->active_tbl = active_tbl;
1937 current_tpt = window->average_tpt;
1938
1939 /* Else poor success; go back to mode in "active" table */
1940 } else {
1941
1942 IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE "
1943 "suc=%d cur-tpt=%d old-tpt=%d\n",
1944 window->success_ratio,
1945 window->average_tpt,
1946 lq_sta->last_tpt);
1947
1948 /* Nullify "search" table */
1949 tbl->lq_type = LQ_NONE;
1950
1951 /* Revert to "active" table */
1952 active_tbl = lq_sta->active_tbl;
1953 tbl = &(lq_sta->lq_info[active_tbl]);
1954
1955 /* Revert to "active" rate and throughput info */
1956 index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
1957 current_tpt = lq_sta->last_tpt;
1958
1959 /* Need to set up a new rate table in uCode */
1960 update_lq = 1;
1961 }
1962
1963 /* Either way, we've made a decision; modulation mode
1964 * search is done, allow rate adjustment next time. */
1965 lq_sta->search_better_tbl = 0;
1966 done_search = 1; /* Don't switch modes below! */
1967 goto lq_update;
1968 }
1969
1970 /* (Else) not in search of better modulation mode, try for better
1971 * starting rate, while staying in this mode. */
1972 high_low = iwl4965_rs_get_adjacent_rate(priv, index,
1973 rate_scale_index_msk,
1974 tbl->lq_type);
1975 low = high_low & 0xff;
1976 high = (high_low >> 8) & 0xff;
1977
1978 /* If user set max rate, dont allow higher than user constrain */
1979 if ((lq_sta->max_rate_idx != -1) &&
1980 (lq_sta->max_rate_idx < high))
1981 high = IWL_RATE_INVALID;
1982
1983 sr = window->success_ratio;
1984
1985 /* Collect measured throughputs for current and adjacent rates */
1986 current_tpt = window->average_tpt;
1987 if (low != IWL_RATE_INVALID)
1988 low_tpt = tbl->win[low].average_tpt;
1989 if (high != IWL_RATE_INVALID)
1990 high_tpt = tbl->win[high].average_tpt;
1991
1992 scale_action = 0;
1993
1994 /* Too many failures, decrease rate */
1995 if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
1996 IWL_DEBUG_RATE(priv,
1997 "decrease rate because of low success_ratio\n");
1998 scale_action = -1;
1999
2000 /* No throughput measured yet for adjacent rates; try increase. */
2001 } else if ((low_tpt == IWL_INVALID_VALUE) &&
2002 (high_tpt == IWL_INVALID_VALUE)) {
2003
2004 if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
2005 scale_action = 1;
2006 else if (low != IWL_RATE_INVALID)
2007 scale_action = 0;
2008 }
2009
2010 /* Both adjacent throughputs are measured, but neither one has better
2011 * throughput; we're using the best rate, don't change it! */
2012 else if ((low_tpt != IWL_INVALID_VALUE) &&
2013 (high_tpt != IWL_INVALID_VALUE) &&
2014 (low_tpt < current_tpt) &&
2015 (high_tpt < current_tpt))
2016 scale_action = 0;
2017
2018 /* At least one adjacent rate's throughput is measured,
2019 * and may have better performance. */
2020 else {
2021 /* Higher adjacent rate's throughput is measured */
2022 if (high_tpt != IWL_INVALID_VALUE) {
2023 /* Higher rate has better throughput */
2024 if (high_tpt > current_tpt &&
2025 sr >= IWL_RATE_INCREASE_TH) {
2026 scale_action = 1;
2027 } else {
2028 scale_action = 0;
2029 }
2030
2031 /* Lower adjacent rate's throughput is measured */
2032 } else if (low_tpt != IWL_INVALID_VALUE) {
2033 /* Lower rate has better throughput */
2034 if (low_tpt > current_tpt) {
2035 IWL_DEBUG_RATE(priv,
2036 "decrease rate because of low tpt\n");
2037 scale_action = -1;
2038 } else if (sr >= IWL_RATE_INCREASE_TH) {
2039 scale_action = 1;
2040 }
2041 }
2042 }
2043
2044 /* Sanity check; asked for decrease, but success rate or throughput
2045 * has been good at old rate. Don't change it. */
2046 if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
2047 ((sr > IWL_RATE_HIGH_TH) ||
2048 (current_tpt > (100 * tbl->expected_tpt[low]))))
2049 scale_action = 0;
2050
2051 switch (scale_action) {
2052 case -1:
2053 /* Decrease starting rate, update uCode's rate table */
2054 if (low != IWL_RATE_INVALID) {
2055 update_lq = 1;
2056 index = low;
2057 }
2058
2059 break;
2060 case 1:
2061 /* Increase starting rate, update uCode's rate table */
2062 if (high != IWL_RATE_INVALID) {
2063 update_lq = 1;
2064 index = high;
2065 }
2066
2067 break;
2068 case 0:
2069 /* No change */
2070 default:
2071 break;
2072 }
2073
2074 IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d "
2075 "high %d type %d\n",
2076 index, scale_action, low, high, tbl->lq_type);
2077
2078lq_update:
2079 /* Replace uCode's rate table for the destination station. */
2080 if (update_lq)
2081 rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
2082 tbl, index, is_green);
2083
2084 /* Should we stay with this modulation mode,
2085 * or search for a new one? */
2086 iwl4965_rs_stay_in_table(lq_sta, false);
2087
2088 /*
2089 * Search for new modulation mode if we're:
2090 * 1) Not changing rates right now
2091 * 2) Not just finishing up a search
2092 * 3) Allowing a new search
2093 */
2094 if (!update_lq && !done_search &&
2095 !lq_sta->stay_in_tbl && window->counter) {
2096 /* Save current throughput to compare with "search" throughput*/
2097 lq_sta->last_tpt = current_tpt;
2098
2099 /* Select a new "search" modulation mode to try.
2100 * If one is found, set up the new "search" table. */
2101 if (is_legacy(tbl->lq_type))
2102 iwl4965_rs_move_legacy_other(priv, lq_sta,
2103 conf, sta, index);
2104 else if (is_siso(tbl->lq_type))
2105 iwl4965_rs_move_siso_to_other(priv, lq_sta,
2106 conf, sta, index);
2107 else /* (is_mimo2(tbl->lq_type)) */
2108 iwl4965_rs_move_mimo2_to_other(priv, lq_sta,
2109 conf, sta, index);
2110
2111 /* If new "search" mode was selected, set up in uCode table */
2112 if (lq_sta->search_better_tbl) {
2113 /* Access the "search" table, clear its history. */
2114 tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
2115 for (i = 0; i < IWL_RATE_COUNT; i++)
2116 iwl4965_rs_rate_scale_clear_window(
2117 &(tbl->win[i]));
2118
2119 /* Use new "search" start rate */
2120 index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
2121
2122 IWL_DEBUG_RATE(priv,
2123 "Switch current mcs: %X index: %d\n",
2124 tbl->current_rate, index);
2125 iwl4965_rs_fill_link_cmd(priv, lq_sta,
2126 tbl->current_rate);
2127 iwl_legacy_send_lq_cmd(priv, ctx,
2128 &lq_sta->lq, CMD_ASYNC, false);
2129 } else
2130 done_search = 1;
2131 }
2132
2133 if (done_search && !lq_sta->stay_in_tbl) {
2134 /* If the "active" (non-search) mode was legacy,
2135 * and we've tried switching antennas,
2136 * but we haven't been able to try HT modes (not available),
2137 * stay with best antenna legacy modulation for a while
2138 * before next round of mode comparisons. */
2139 tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
2140 if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
2141 lq_sta->action_counter > tbl1->max_search) {
2142 IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n");
2143 iwl4965_rs_set_stay_in_table(priv, 1, lq_sta);
2144 }
2145
2146 /* If we're in an HT mode, and all 3 mode switch actions
2147 * have been tried and compared, stay in this best modulation
2148 * mode for a while before next round of mode comparisons. */
2149 if (lq_sta->enable_counter &&
2150 (lq_sta->action_counter >= tbl1->max_search)) {
2151 if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
2152 (lq_sta->tx_agg_tid_en & (1 << tid)) &&
2153 (tid != MAX_TID_COUNT)) {
2154 tid_data =
2155 &priv->stations[lq_sta->lq.sta_id].tid[tid];
2156 if (tid_data->agg.state == IWL_AGG_OFF) {
2157 IWL_DEBUG_RATE(priv,
2158 "try to aggregate tid %d\n",
2159 tid);
2160 iwl4965_rs_tl_turn_on_agg(priv, tid,
2161 lq_sta, sta);
2162 }
2163 }
2164 iwl4965_rs_set_stay_in_table(priv, 0, lq_sta);
2165 }
2166 }
2167
2168out:
2169 tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv, tbl,
2170 index, is_green);
2171 i = index;
2172 lq_sta->last_txrate_idx = i;
2173}
2174
2175/**
2176 * iwl4965_rs_initialize_lq - Initialize a station's hardware rate table
2177 *
2178 * The uCode's station table contains a table of fallback rates
2179 * for automatic fallback during transmission.
2180 *
2181 * NOTE: This sets up a default set of values. These will be replaced later
2182 * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
2183 * rc80211_simple.
2184 *
2185 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
2186 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
2187 * which requires station table entry to exist).
2188 */
2189static void iwl4965_rs_initialize_lq(struct iwl_priv *priv,
2190 struct ieee80211_conf *conf,
2191 struct ieee80211_sta *sta,
2192 struct iwl_lq_sta *lq_sta)
2193{
2194 struct iwl_scale_tbl_info *tbl;
2195 int rate_idx;
2196 int i;
2197 u32 rate;
2198 u8 use_green = iwl4965_rs_use_green(sta);
2199 u8 active_tbl = 0;
2200 u8 valid_tx_ant;
2201 struct iwl_station_priv *sta_priv;
2202 struct iwl_rxon_context *ctx;
2203
2204 if (!sta || !lq_sta)
2205 return;
2206
2207 sta_priv = (void *)sta->drv_priv;
2208 ctx = sta_priv->common.ctx;
2209
2210 i = lq_sta->last_txrate_idx;
2211
2212 valid_tx_ant = priv->hw_params.valid_tx_ant;
2213
2214 if (!lq_sta->search_better_tbl)
2215 active_tbl = lq_sta->active_tbl;
2216 else
2217 active_tbl = 1 - lq_sta->active_tbl;
2218
2219 tbl = &(lq_sta->lq_info[active_tbl]);
2220
2221 if ((i < 0) || (i >= IWL_RATE_COUNT))
2222 i = 0;
2223
2224 rate = iwlegacy_rates[i].plcp;
2225 tbl->ant_type = iwl4965_first_antenna(valid_tx_ant);
2226 rate |= tbl->ant_type << RATE_MCS_ANT_POS;
2227
2228 if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
2229 rate |= RATE_MCS_CCK_MSK;
2230
2231 iwl4965_rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx);
2232 if (!iwl4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
2233 iwl4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl);
2234
2235 rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green);
2236 tbl->current_rate = rate;
2237 iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
2238 iwl4965_rs_fill_link_cmd(NULL, lq_sta, rate);
2239 priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
2240 iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true);
2241}
2242
2243static void
2244iwl4965_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
2245 struct ieee80211_tx_rate_control *txrc)
2246{
2247
2248 struct sk_buff *skb = txrc->skb;
2249 struct ieee80211_supported_band *sband = txrc->sband;
2250 struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
2251 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2252 struct iwl_lq_sta *lq_sta = priv_sta;
2253 int rate_idx;
2254
2255 IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n");
2256
2257 /* Get max rate if user set max rate */
2258 if (lq_sta) {
2259 lq_sta->max_rate_idx = txrc->max_rate_idx;
2260 if ((sband->band == IEEE80211_BAND_5GHZ) &&
2261 (lq_sta->max_rate_idx != -1))
2262 lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
2263 if ((lq_sta->max_rate_idx < 0) ||
2264 (lq_sta->max_rate_idx >= IWL_RATE_COUNT))
2265 lq_sta->max_rate_idx = -1;
2266 }
2267
2268 /* Treat uninitialized rate scaling data same as non-existing. */
2269 if (lq_sta && !lq_sta->drv) {
2270 IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
2271 priv_sta = NULL;
2272 }
2273
2274 /* Send management frames and NO_ACK data using lowest rate. */
2275 if (rate_control_send_low(sta, priv_sta, txrc))
2276 return;
2277
2278 rate_idx = lq_sta->last_txrate_idx;
2279
2280 if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
2281 rate_idx -= IWL_FIRST_OFDM_RATE;
2282 /* 6M and 9M shared same MCS index */
2283 rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
2284 if (iwl4965_rs_extract_rate(lq_sta->last_rate_n_flags) >=
2285 IWL_RATE_MIMO2_6M_PLCP)
2286 rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
2287 info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
2288 if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
2289 info->control.rates[0].flags |=
2290 IEEE80211_TX_RC_SHORT_GI;
2291 if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
2292 info->control.rates[0].flags |=
2293 IEEE80211_TX_RC_DUP_DATA;
2294 if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
2295 info->control.rates[0].flags |=
2296 IEEE80211_TX_RC_40_MHZ_WIDTH;
2297 if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
2298 info->control.rates[0].flags |=
2299 IEEE80211_TX_RC_GREEN_FIELD;
2300 } else {
2301 /* Check for invalid rates */
2302 if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
2303 ((sband->band == IEEE80211_BAND_5GHZ) &&
2304 (rate_idx < IWL_FIRST_OFDM_RATE)))
2305 rate_idx = rate_lowest_index(sband, sta);
2306 /* On valid 5 GHz rate, adjust index */
2307 else if (sband->band == IEEE80211_BAND_5GHZ)
2308 rate_idx -= IWL_FIRST_OFDM_RATE;
2309 info->control.rates[0].flags = 0;
2310 }
2311 info->control.rates[0].idx = rate_idx;
2312
2313}
2314
2315static void *iwl4965_rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
2316 gfp_t gfp)
2317{
2318 struct iwl_lq_sta *lq_sta;
2319 struct iwl_station_priv *sta_priv =
2320 (struct iwl_station_priv *) sta->drv_priv;
2321 struct iwl_priv *priv;
2322
2323 priv = (struct iwl_priv *)priv_rate;
2324 IWL_DEBUG_RATE(priv, "create station rate scale window\n");
2325
2326 lq_sta = &sta_priv->lq_sta;
2327
2328 return lq_sta;
2329}
2330
2331/*
2332 * Called after adding a new station to initialize rate scaling
2333 */
2334void
2335iwl4965_rs_rate_init(struct iwl_priv *priv,
2336 struct ieee80211_sta *sta,
2337 u8 sta_id)
2338{
2339 int i, j;
2340 struct ieee80211_hw *hw = priv->hw;
2341 struct ieee80211_conf *conf = &priv->hw->conf;
2342 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2343 struct iwl_station_priv *sta_priv;
2344 struct iwl_lq_sta *lq_sta;
2345 struct ieee80211_supported_band *sband;
2346
2347 sta_priv = (struct iwl_station_priv *) sta->drv_priv;
2348 lq_sta = &sta_priv->lq_sta;
2349 sband = hw->wiphy->bands[conf->channel->band];
2350
2351
2352 lq_sta->lq.sta_id = sta_id;
2353
2354 for (j = 0; j < LQ_SIZE; j++)
2355 for (i = 0; i < IWL_RATE_COUNT; i++)
2356 iwl4965_rs_rate_scale_clear_window(
2357 &lq_sta->lq_info[j].win[i]);
2358
2359 lq_sta->flush_timer = 0;
2360 lq_sta->supp_rates = sta->supp_rates[sband->band];
2361 for (j = 0; j < LQ_SIZE; j++)
2362 for (i = 0; i < IWL_RATE_COUNT; i++)
2363 iwl4965_rs_rate_scale_clear_window(
2364 &lq_sta->lq_info[j].win[i]);
2365
2366 IWL_DEBUG_RATE(priv, "LQ:"
2367 "*** rate scale station global init for station %d ***\n",
2368 sta_id);
2369 /* TODO: what is a good starting rate for STA? About middle? Maybe not
2370 * the lowest or the highest rate.. Could consider using RSSI from
2371 * previous packets? Need to have IEEE 802.1X auth succeed immediately
2372 * after assoc.. */
2373
2374 lq_sta->is_dup = 0;
2375 lq_sta->max_rate_idx = -1;
2376 lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
2377 lq_sta->is_green = iwl4965_rs_use_green(sta);
2378 lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
2379 lq_sta->band = priv->band;
2380 /*
2381 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
2382 * supp_rates[] does not; shift to convert format, force 9 MBits off.
2383 */
2384 lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
2385 lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
2386 lq_sta->active_siso_rate &= ~((u16)0x2);
2387 lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
2388
2389 /* Same here */
2390 lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
2391 lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
2392 lq_sta->active_mimo2_rate &= ~((u16)0x2);
2393 lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
2394
2395 /* These values will be overridden later */
2396 lq_sta->lq.general_params.single_stream_ant_msk =
2397 iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
2398 lq_sta->lq.general_params.dual_stream_ant_msk =
2399 priv->hw_params.valid_tx_ant &
2400 ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
2401 if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
2402 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
2403 } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
2404 lq_sta->lq.general_params.dual_stream_ant_msk =
2405 priv->hw_params.valid_tx_ant;
2406 }
2407
2408 /* as default allow aggregation for all tids */
2409 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
2410 lq_sta->drv = priv;
2411
2412 /* Set last_txrate_idx to lowest rate */
2413 lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
2414 if (sband->band == IEEE80211_BAND_5GHZ)
2415 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
2416 lq_sta->is_agg = 0;
2417
2418#ifdef CONFIG_MAC80211_DEBUGFS
2419 lq_sta->dbg_fixed_rate = 0;
2420#endif
2421
2422 iwl4965_rs_initialize_lq(priv, conf, sta, lq_sta);
2423}
2424
2425static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
2426 struct iwl_lq_sta *lq_sta, u32 new_rate)
2427{
2428 struct iwl_scale_tbl_info tbl_type;
2429 int index = 0;
2430 int rate_idx;
2431 int repeat_rate = 0;
2432 u8 ant_toggle_cnt = 0;
2433 u8 use_ht_possible = 1;
2434 u8 valid_tx_ant = 0;
2435 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
2436
2437 /* Override starting rate (index 0) if needed for debug purposes */
2438 iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2439
2440 /* Interpret new_rate (rate_n_flags) */
2441 iwl4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
2442 &tbl_type, &rate_idx);
2443
2444 /* How many times should we repeat the initial rate? */
2445 if (is_legacy(tbl_type.lq_type)) {
2446 ant_toggle_cnt = 1;
2447 repeat_rate = IWL_NUMBER_TRY;
2448 } else {
2449 repeat_rate = IWL_HT_NUMBER_TRY;
2450 }
2451
2452 lq_cmd->general_params.mimo_delimiter =
2453 is_mimo(tbl_type.lq_type) ? 1 : 0;
2454
2455 /* Fill 1st table entry (index 0) */
2456 lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
2457
2458 if (iwl4965_num_of_ant(tbl_type.ant_type) == 1) {
2459 lq_cmd->general_params.single_stream_ant_msk =
2460 tbl_type.ant_type;
2461 } else if (iwl4965_num_of_ant(tbl_type.ant_type) == 2) {
2462 lq_cmd->general_params.dual_stream_ant_msk =
2463 tbl_type.ant_type;
2464 } /* otherwise we don't modify the existing value */
2465
2466 index++;
2467 repeat_rate--;
2468 if (priv)
2469 valid_tx_ant = priv->hw_params.valid_tx_ant;
2470
2471 /* Fill rest of rate table */
2472 while (index < LINK_QUAL_MAX_RETRY_NUM) {
2473 /* Repeat initial/next rate.
2474 * For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
2475 * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
2476 while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
2477 if (is_legacy(tbl_type.lq_type)) {
2478 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2479 ant_toggle_cnt++;
2480 else if (priv &&
2481 iwl4965_rs_toggle_antenna(valid_tx_ant,
2482 &new_rate, &tbl_type))
2483 ant_toggle_cnt = 1;
2484 }
2485
2486 /* Override next rate if needed for debug purposes */
2487 iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2488
2489 /* Fill next table entry */
2490 lq_cmd->rs_table[index].rate_n_flags =
2491 cpu_to_le32(new_rate);
2492 repeat_rate--;
2493 index++;
2494 }
2495
2496 iwl4965_rs_get_tbl_info_from_mcs(new_rate,
2497 lq_sta->band, &tbl_type,
2498 &rate_idx);
2499
2500 /* Indicate to uCode which entries might be MIMO.
2501 * If initial rate was MIMO, this will finally end up
2502 * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
2503 if (is_mimo(tbl_type.lq_type))
2504 lq_cmd->general_params.mimo_delimiter = index;
2505
2506 /* Get next rate */
2507 new_rate = iwl4965_rs_get_lower_rate(lq_sta,
2508 &tbl_type, rate_idx,
2509 use_ht_possible);
2510
2511 /* How many times should we repeat the next rate? */
2512 if (is_legacy(tbl_type.lq_type)) {
2513 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2514 ant_toggle_cnt++;
2515 else if (priv &&
2516 iwl4965_rs_toggle_antenna(valid_tx_ant,
2517 &new_rate, &tbl_type))
2518 ant_toggle_cnt = 1;
2519
2520 repeat_rate = IWL_NUMBER_TRY;
2521 } else {
2522 repeat_rate = IWL_HT_NUMBER_TRY;
2523 }
2524
2525 /* Don't allow HT rates after next pass.
2526 * iwl4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */
2527 use_ht_possible = 0;
2528
2529 /* Override next rate if needed for debug purposes */
2530 iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2531
2532 /* Fill next table entry */
2533 lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
2534
2535 index++;
2536 repeat_rate--;
2537 }
2538
2539 lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
2540 lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
2541
2542 lq_cmd->agg_params.agg_time_limit =
2543 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
2544}
2545
2546static void
2547*iwl4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
2548{
2549 return hw->priv;
2550}
2551/* rate scale requires free function to be implemented */
2552static void iwl4965_rs_free(void *priv_rate)
2553{
2554 return;
2555}
2556
2557static void iwl4965_rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
2558 void *priv_sta)
2559{
2560 struct iwl_priv *priv __maybe_unused = priv_r;
2561
2562 IWL_DEBUG_RATE(priv, "enter\n");
2563 IWL_DEBUG_RATE(priv, "leave\n");
2564}
2565
2566
2567#ifdef CONFIG_MAC80211_DEBUGFS
2568static int iwl4965_open_file_generic(struct inode *inode, struct file *file)
2569{
2570 file->private_data = inode->i_private;
2571 return 0;
2572}
2573static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
2574 u32 *rate_n_flags, int index)
2575{
2576 struct iwl_priv *priv;
2577 u8 valid_tx_ant;
2578 u8 ant_sel_tx;
2579
2580 priv = lq_sta->drv;
2581 valid_tx_ant = priv->hw_params.valid_tx_ant;
2582 if (lq_sta->dbg_fixed_rate) {
2583 ant_sel_tx =
2584 ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
2585 >> RATE_MCS_ANT_POS);
2586 if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
2587 *rate_n_flags = lq_sta->dbg_fixed_rate;
2588 IWL_DEBUG_RATE(priv, "Fixed rate ON\n");
2589 } else {
2590 lq_sta->dbg_fixed_rate = 0;
2591 IWL_ERR(priv,
2592 "Invalid antenna selection 0x%X, Valid is 0x%X\n",
2593 ant_sel_tx, valid_tx_ant);
2594 IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
2595 }
2596 } else {
2597 IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
2598 }
2599}
2600
2601static ssize_t iwl4965_rs_sta_dbgfs_scale_table_write(struct file *file,
2602 const char __user *user_buf, size_t count, loff_t *ppos)
2603{
2604 struct iwl_lq_sta *lq_sta = file->private_data;
2605 struct iwl_priv *priv;
2606 char buf[64];
2607 int buf_size;
2608 u32 parsed_rate;
2609 struct iwl_station_priv *sta_priv =
2610 container_of(lq_sta, struct iwl_station_priv, lq_sta);
2611 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
2612
2613 priv = lq_sta->drv;
2614 memset(buf, 0, sizeof(buf));
2615 buf_size = min(count, sizeof(buf) - 1);
2616 if (copy_from_user(buf, user_buf, buf_size))
2617 return -EFAULT;
2618
2619 if (sscanf(buf, "%x", &parsed_rate) == 1)
2620 lq_sta->dbg_fixed_rate = parsed_rate;
2621 else
2622 lq_sta->dbg_fixed_rate = 0;
2623
2624 lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
2625 lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2626 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2627
2628 IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
2629 lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
2630
2631 if (lq_sta->dbg_fixed_rate) {
2632 iwl4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
2633 iwl_legacy_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
2634 false);
2635 }
2636
2637 return count;
2638}
2639
2640static ssize_t iwl4965_rs_sta_dbgfs_scale_table_read(struct file *file,
2641 char __user *user_buf, size_t count, loff_t *ppos)
2642{
2643 char *buff;
2644 int desc = 0;
2645 int i = 0;
2646 int index = 0;
2647 ssize_t ret;
2648
2649 struct iwl_lq_sta *lq_sta = file->private_data;
2650 struct iwl_priv *priv;
2651 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
2652
2653 priv = lq_sta->drv;
2654 buff = kmalloc(1024, GFP_KERNEL);
2655 if (!buff)
2656 return -ENOMEM;
2657
2658 desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
2659 desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
2660 lq_sta->total_failed, lq_sta->total_success,
2661 lq_sta->active_legacy_rate);
2662 desc += sprintf(buff+desc, "fixed rate 0x%X\n",
2663 lq_sta->dbg_fixed_rate);
2664 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
2665 (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
2666 (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
2667 (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
2668 desc += sprintf(buff+desc, "lq type %s\n",
2669 (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
2670 if (is_Ht(tbl->lq_type)) {
2671 desc += sprintf(buff+desc, " %s",
2672 (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
2673 desc += sprintf(buff+desc, " %s",
2674 (tbl->is_ht40) ? "40MHz" : "20MHz");
2675 desc += sprintf(buff+desc, " %s %s %s\n",
2676 (tbl->is_SGI) ? "SGI" : "",
2677 (lq_sta->is_green) ? "GF enabled" : "",
2678 (lq_sta->is_agg) ? "AGG on" : "");
2679 }
2680 desc += sprintf(buff+desc, "last tx rate=0x%X\n",
2681 lq_sta->last_rate_n_flags);
2682 desc += sprintf(buff+desc, "general:"
2683 "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
2684 lq_sta->lq.general_params.flags,
2685 lq_sta->lq.general_params.mimo_delimiter,
2686 lq_sta->lq.general_params.single_stream_ant_msk,
2687 lq_sta->lq.general_params.dual_stream_ant_msk);
2688
2689 desc += sprintf(buff+desc, "agg:"
2690 "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
2691 le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
2692 lq_sta->lq.agg_params.agg_dis_start_th,
2693 lq_sta->lq.agg_params.agg_frame_cnt_limit);
2694
2695 desc += sprintf(buff+desc,
2696 "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
2697 lq_sta->lq.general_params.start_rate_index[0],
2698 lq_sta->lq.general_params.start_rate_index[1],
2699 lq_sta->lq.general_params.start_rate_index[2],
2700 lq_sta->lq.general_params.start_rate_index[3]);
2701
2702 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
2703 index = iwl4965_hwrate_to_plcp_idx(
2704 le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags));
2705 if (is_legacy(tbl->lq_type)) {
2706 desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
2707 i,
2708 le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
2709 iwl_rate_mcs[index].mbps);
2710 } else {
2711 desc += sprintf(buff+desc,
2712 " rate[%d] 0x%X %smbps (%s)\n",
2713 i,
2714 le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
2715 iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs);
2716 }
2717 }
2718
2719 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2720 kfree(buff);
2721 return ret;
2722}
2723
2724static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
2725 .write = iwl4965_rs_sta_dbgfs_scale_table_write,
2726 .read = iwl4965_rs_sta_dbgfs_scale_table_read,
2727 .open = iwl4965_open_file_generic,
2728 .llseek = default_llseek,
2729};
2730static ssize_t iwl4965_rs_sta_dbgfs_stats_table_read(struct file *file,
2731 char __user *user_buf, size_t count, loff_t *ppos)
2732{
2733 char *buff;
2734 int desc = 0;
2735 int i, j;
2736 ssize_t ret;
2737
2738 struct iwl_lq_sta *lq_sta = file->private_data;
2739
2740 buff = kmalloc(1024, GFP_KERNEL);
2741 if (!buff)
2742 return -ENOMEM;
2743
2744 for (i = 0; i < LQ_SIZE; i++) {
2745 desc += sprintf(buff+desc,
2746 "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
2747 "rate=0x%X\n",
2748 lq_sta->active_tbl == i ? "*" : "x",
2749 lq_sta->lq_info[i].lq_type,
2750 lq_sta->lq_info[i].is_SGI,
2751 lq_sta->lq_info[i].is_ht40,
2752 lq_sta->lq_info[i].is_dup,
2753 lq_sta->is_green,
2754 lq_sta->lq_info[i].current_rate);
2755 for (j = 0; j < IWL_RATE_COUNT; j++) {
2756 desc += sprintf(buff+desc,
2757 "counter=%d success=%d %%=%d\n",
2758 lq_sta->lq_info[i].win[j].counter,
2759 lq_sta->lq_info[i].win[j].success_counter,
2760 lq_sta->lq_info[i].win[j].success_ratio);
2761 }
2762 }
2763 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2764 kfree(buff);
2765 return ret;
2766}
2767
2768static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
2769 .read = iwl4965_rs_sta_dbgfs_stats_table_read,
2770 .open = iwl4965_open_file_generic,
2771 .llseek = default_llseek,
2772};
2773
2774static ssize_t iwl4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file,
2775 char __user *user_buf, size_t count, loff_t *ppos)
2776{
2777 char buff[120];
2778 int desc = 0;
2779 ssize_t ret;
2780
2781 struct iwl_lq_sta *lq_sta = file->private_data;
2782 struct iwl_priv *priv;
2783 struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
2784
2785 priv = lq_sta->drv;
2786
2787 if (is_Ht(tbl->lq_type))
2788 desc += sprintf(buff+desc,
2789 "Bit Rate= %d Mb/s\n",
2790 tbl->expected_tpt[lq_sta->last_txrate_idx]);
2791 else
2792 desc += sprintf(buff+desc,
2793 "Bit Rate= %d Mb/s\n",
2794 iwlegacy_rates[lq_sta->last_txrate_idx].ieee >> 1);
2795
2796 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2797 return ret;
2798}
2799
2800static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
2801 .read = iwl4965_rs_sta_dbgfs_rate_scale_data_read,
2802 .open = iwl4965_open_file_generic,
2803 .llseek = default_llseek,
2804};
2805
2806static void iwl4965_rs_add_debugfs(void *priv, void *priv_sta,
2807 struct dentry *dir)
2808{
2809 struct iwl_lq_sta *lq_sta = priv_sta;
2810 lq_sta->rs_sta_dbgfs_scale_table_file =
2811 debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
2812 lq_sta, &rs_sta_dbgfs_scale_table_ops);
2813 lq_sta->rs_sta_dbgfs_stats_table_file =
2814 debugfs_create_file("rate_stats_table", S_IRUSR, dir,
2815 lq_sta, &rs_sta_dbgfs_stats_table_ops);
2816 lq_sta->rs_sta_dbgfs_rate_scale_data_file =
2817 debugfs_create_file("rate_scale_data", S_IRUSR, dir,
2818 lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
2819 lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
2820 debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
2821 &lq_sta->tx_agg_tid_en);
2822
2823}
2824
2825static void iwl4965_rs_remove_debugfs(void *priv, void *priv_sta)
2826{
2827 struct iwl_lq_sta *lq_sta = priv_sta;
2828 debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
2829 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
2830 debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
2831 debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
2832}
2833#endif
2834
2835/*
2836 * Initialization of rate scaling information is done by driver after
2837 * the station is added. Since mac80211 calls this function before a
2838 * station is added we ignore it.
2839 */
2840static void
2841iwl4965_rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
2842 struct ieee80211_sta *sta, void *priv_sta)
2843{
2844}
2845static struct rate_control_ops rs_4965_ops = {
2846 .module = NULL,
2847 .name = IWL4965_RS_NAME,
2848 .tx_status = iwl4965_rs_tx_status,
2849 .get_rate = iwl4965_rs_get_rate,
2850 .rate_init = iwl4965_rs_rate_init_stub,
2851 .alloc = iwl4965_rs_alloc,
2852 .free = iwl4965_rs_free,
2853 .alloc_sta = iwl4965_rs_alloc_sta,
2854 .free_sta = iwl4965_rs_free_sta,
2855#ifdef CONFIG_MAC80211_DEBUGFS
2856 .add_sta_debugfs = iwl4965_rs_add_debugfs,
2857 .remove_sta_debugfs = iwl4965_rs_remove_debugfs,
2858#endif
2859};
2860
2861int iwl4965_rate_control_register(void)
2862{
2863 pr_err("Registering 4965 rate control operations\n");
2864 return ieee80211_rate_control_register(&rs_4965_ops);
2865}
2866
2867void iwl4965_rate_control_unregister(void)
2868{
2869 ieee80211_rate_control_unregister(&rs_4965_ops);
2870}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
index bbd40b7dd597..b9fa2f6411a7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * GPL LICENSE SUMMARY 3 * GPL LICENSE SUMMARY
4 * 4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as 8 * it under the terms of version 2 of the GNU General Public License as
@@ -34,14 +34,14 @@
34 34
35#include "iwl-dev.h" 35#include "iwl-dev.h"
36#include "iwl-core.h" 36#include "iwl-core.h"
37#include "iwl-agn-calib.h" 37#include "iwl-4965-calib.h"
38#include "iwl-sta.h" 38#include "iwl-sta.h"
39#include "iwl-io.h" 39#include "iwl-io.h"
40#include "iwl-helpers.h" 40#include "iwl-helpers.h"
41#include "iwl-agn-hw.h" 41#include "iwl-4965-hw.h"
42#include "iwl-agn.h" 42#include "iwl-4965.h"
43 43
44void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, 44void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
45 struct iwl_rx_mem_buffer *rxb) 45 struct iwl_rx_mem_buffer *rxb)
46 46
47{ 47{
@@ -58,14 +58,14 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
58 le32_to_cpu(missed_beacon->num_recvd_beacons), 58 le32_to_cpu(missed_beacon->num_recvd_beacons),
59 le32_to_cpu(missed_beacon->num_expected_beacons)); 59 le32_to_cpu(missed_beacon->num_expected_beacons));
60 if (!test_bit(STATUS_SCANNING, &priv->status)) 60 if (!test_bit(STATUS_SCANNING, &priv->status))
61 iwl_init_sensitivity(priv); 61 iwl4965_init_sensitivity(priv);
62 } 62 }
63} 63}
64 64
65/* Calculate noise level, based on measurements during network silence just 65/* Calculate noise level, based on measurements during network silence just
66 * before arriving beacon. This measurement can be done only if we know 66 * before arriving beacon. This measurement can be done only if we know
67 * exactly when to expect beacons, therefore only when we're associated. */ 67 * exactly when to expect beacons, therefore only when we're associated. */
68static void iwl_rx_calc_noise(struct iwl_priv *priv) 68static void iwl4965_rx_calc_noise(struct iwl_priv *priv)
69{ 69{
70 struct statistics_rx_non_phy *rx_info; 70 struct statistics_rx_non_phy *rx_info;
71 int num_active_rx = 0; 71 int num_active_rx = 0;
@@ -73,11 +73,7 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
73 int bcn_silence_a, bcn_silence_b, bcn_silence_c; 73 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
74 int last_rx_noise; 74 int last_rx_noise;
75 75
76 if (priv->cfg->bt_params && 76 rx_info = &(priv->_4965.statistics.rx.general);
77 priv->cfg->bt_params->bt_statistics)
78 rx_info = &(priv->_agn.statistics_bt.rx.general.common);
79 else
80 rx_info = &(priv->_agn.statistics.rx.general);
81 bcn_silence_a = 77 bcn_silence_a =
82 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER; 78 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
83 bcn_silence_b = 79 bcn_silence_b =
@@ -109,13 +105,13 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
109 last_rx_noise); 105 last_rx_noise);
110} 106}
111 107
112#ifdef CONFIG_IWLWIFI_DEBUGFS 108#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
113/* 109/*
114 * based on the assumption of all statistics counter are in DWORD 110 * based on the assumption of all statistics counter are in DWORD
115 * FIXME: This function is for debugging, do not deal with 111 * FIXME: This function is for debugging, do not deal with
116 * the case of counters roll-over. 112 * the case of counters roll-over.
117 */ 113 */
118static void iwl_accumulative_statistics(struct iwl_priv *priv, 114static void iwl4965_accumulative_statistics(struct iwl_priv *priv,
119 __le32 *stats) 115 __le32 *stats)
120{ 116{
121 int i, size; 117 int i, size;
@@ -125,28 +121,16 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
125 struct statistics_general_common *general, *accum_general; 121 struct statistics_general_common *general, *accum_general;
126 struct statistics_tx *tx, *accum_tx; 122 struct statistics_tx *tx, *accum_tx;
127 123
128 if (priv->cfg->bt_params && 124 prev_stats = (__le32 *)&priv->_4965.statistics;
129 priv->cfg->bt_params->bt_statistics) { 125 accum_stats = (u32 *)&priv->_4965.accum_statistics;
130 prev_stats = (__le32 *)&priv->_agn.statistics_bt; 126 size = sizeof(struct iwl_notif_statistics);
131 accum_stats = (u32 *)&priv->_agn.accum_statistics_bt; 127 general = &priv->_4965.statistics.general.common;
132 size = sizeof(struct iwl_bt_notif_statistics); 128 accum_general = &priv->_4965.accum_statistics.general.common;
133 general = &priv->_agn.statistics_bt.general.common; 129 tx = &priv->_4965.statistics.tx;
134 accum_general = &priv->_agn.accum_statistics_bt.general.common; 130 accum_tx = &priv->_4965.accum_statistics.tx;
135 tx = &priv->_agn.statistics_bt.tx; 131 delta = (u32 *)&priv->_4965.delta_statistics;
136 accum_tx = &priv->_agn.accum_statistics_bt.tx; 132 max_delta = (u32 *)&priv->_4965.max_delta;
137 delta = (u32 *)&priv->_agn.delta_statistics_bt; 133
138 max_delta = (u32 *)&priv->_agn.max_delta_bt;
139 } else {
140 prev_stats = (__le32 *)&priv->_agn.statistics;
141 accum_stats = (u32 *)&priv->_agn.accum_statistics;
142 size = sizeof(struct iwl_notif_statistics);
143 general = &priv->_agn.statistics.general.common;
144 accum_general = &priv->_agn.accum_statistics.general.common;
145 tx = &priv->_agn.statistics.tx;
146 accum_tx = &priv->_agn.accum_statistics.tx;
147 delta = (u32 *)&priv->_agn.delta_statistics;
148 max_delta = (u32 *)&priv->_agn.max_delta;
149 }
150 for (i = sizeof(__le32); i < size; 134 for (i = sizeof(__le32); i < size;
151 i += sizeof(__le32), stats++, prev_stats++, delta++, 135 i += sizeof(__le32), stats++, prev_stats++, delta++,
152 max_delta++, accum_stats++) { 136 max_delta++, accum_stats++) {
@@ -161,23 +145,19 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
161 145
162 /* reset accumulative statistics for "no-counter" type statistics */ 146 /* reset accumulative statistics for "no-counter" type statistics */
163 accum_general->temperature = general->temperature; 147 accum_general->temperature = general->temperature;
164 accum_general->temperature_m = general->temperature_m;
165 accum_general->ttl_timestamp = general->ttl_timestamp; 148 accum_general->ttl_timestamp = general->ttl_timestamp;
166 accum_tx->tx_power.ant_a = tx->tx_power.ant_a;
167 accum_tx->tx_power.ant_b = tx->tx_power.ant_b;
168 accum_tx->tx_power.ant_c = tx->tx_power.ant_c;
169} 149}
170#endif 150#endif
171 151
172#define REG_RECALIB_PERIOD (60) 152#define REG_RECALIB_PERIOD (60)
173 153
174/** 154/**
175 * iwl_good_plcp_health - checks for plcp error. 155 * iwl4965_good_plcp_health - checks for plcp error.
176 * 156 *
177 * When the plcp error is exceeding the thresholds, reset the radio 157 * When the plcp error is exceeding the thresholds, reset the radio
178 * to improve the throughput. 158 * to improve the throughput.
179 */ 159 */
180bool iwl_good_plcp_health(struct iwl_priv *priv, 160bool iwl4965_good_plcp_health(struct iwl_priv *priv,
181 struct iwl_rx_packet *pkt) 161 struct iwl_rx_packet *pkt)
182{ 162{
183 bool rc = true; 163 bool rc = true;
@@ -207,28 +187,15 @@ bool iwl_good_plcp_health(struct iwl_priv *priv,
207 struct statistics_rx_phy *ofdm; 187 struct statistics_rx_phy *ofdm;
208 struct statistics_rx_ht_phy *ofdm_ht; 188 struct statistics_rx_ht_phy *ofdm_ht;
209 189
210 if (priv->cfg->bt_params && 190 ofdm = &pkt->u.stats.rx.ofdm;
211 priv->cfg->bt_params->bt_statistics) { 191 ofdm_ht = &pkt->u.stats.rx.ofdm_ht;
212 ofdm = &pkt->u.stats_bt.rx.ofdm; 192 combined_plcp_delta =
213 ofdm_ht = &pkt->u.stats_bt.rx.ofdm_ht; 193 (le32_to_cpu(ofdm->plcp_err) -
214 combined_plcp_delta = 194 le32_to_cpu(priv->_4965.statistics.
215 (le32_to_cpu(ofdm->plcp_err) - 195 rx.ofdm.plcp_err)) +
216 le32_to_cpu(priv->_agn.statistics_bt. 196 (le32_to_cpu(ofdm_ht->plcp_err) -
217 rx.ofdm.plcp_err)) + 197 le32_to_cpu(priv->_4965.statistics.
218 (le32_to_cpu(ofdm_ht->plcp_err) - 198 rx.ofdm_ht.plcp_err));
219 le32_to_cpu(priv->_agn.statistics_bt.
220 rx.ofdm_ht.plcp_err));
221 } else {
222 ofdm = &pkt->u.stats.rx.ofdm;
223 ofdm_ht = &pkt->u.stats.rx.ofdm_ht;
224 combined_plcp_delta =
225 (le32_to_cpu(ofdm->plcp_err) -
226 le32_to_cpu(priv->_agn.statistics.
227 rx.ofdm.plcp_err)) +
228 (le32_to_cpu(ofdm_ht->plcp_err) -
229 le32_to_cpu(priv->_agn.statistics.
230 rx.ofdm_ht.plcp_err));
231 }
232 199
233 if ((combined_plcp_delta > 0) && 200 if ((combined_plcp_delta > 0) &&
234 ((combined_plcp_delta * 100) / plcp_msec) > 201 ((combined_plcp_delta * 100) / plcp_msec) >
@@ -259,58 +226,32 @@ bool iwl_good_plcp_health(struct iwl_priv *priv,
259 return rc; 226 return rc;
260} 227}
261 228
262void iwl_rx_statistics(struct iwl_priv *priv, 229void iwl4965_rx_statistics(struct iwl_priv *priv,
263 struct iwl_rx_mem_buffer *rxb) 230 struct iwl_rx_mem_buffer *rxb)
264{ 231{
265 int change; 232 int change;
266 struct iwl_rx_packet *pkt = rxb_addr(rxb); 233 struct iwl_rx_packet *pkt = rxb_addr(rxb);
267 234
268 if (priv->cfg->bt_params && 235 IWL_DEBUG_RX(priv,
269 priv->cfg->bt_params->bt_statistics) { 236 "Statistics notification received (%d vs %d).\n",
270 IWL_DEBUG_RX(priv, 237 (int)sizeof(struct iwl_notif_statistics),
271 "Statistics notification received (%d vs %d).\n", 238 le32_to_cpu(pkt->len_n_flags) &
272 (int)sizeof(struct iwl_bt_notif_statistics), 239 FH_RSCSR_FRAME_SIZE_MSK);
273 le32_to_cpu(pkt->len_n_flags) & 240
274 FH_RSCSR_FRAME_SIZE_MSK); 241 change = ((priv->_4965.statistics.general.common.temperature !=
275 242 pkt->u.stats.general.common.temperature) ||
276 change = ((priv->_agn.statistics_bt.general.common.temperature != 243 ((priv->_4965.statistics.flag &
277 pkt->u.stats_bt.general.common.temperature) || 244 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
278 ((priv->_agn.statistics_bt.flag & 245 (pkt->u.stats.flag &
279 STATISTICS_REPLY_FLG_HT40_MODE_MSK) != 246 STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
280 (pkt->u.stats_bt.flag & 247#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
281 STATISTICS_REPLY_FLG_HT40_MODE_MSK))); 248 iwl4965_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
282#ifdef CONFIG_IWLWIFI_DEBUGFS
283 iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats_bt);
284#endif 249#endif
285 250
286 } else { 251 iwl_legacy_recover_from_statistics(priv, pkt);
287 IWL_DEBUG_RX(priv,
288 "Statistics notification received (%d vs %d).\n",
289 (int)sizeof(struct iwl_notif_statistics),
290 le32_to_cpu(pkt->len_n_flags) &
291 FH_RSCSR_FRAME_SIZE_MSK);
292 252
293 change = ((priv->_agn.statistics.general.common.temperature != 253 memcpy(&priv->_4965.statistics, &pkt->u.stats,
294 pkt->u.stats.general.common.temperature) || 254 sizeof(priv->_4965.statistics));
295 ((priv->_agn.statistics.flag &
296 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
297 (pkt->u.stats.flag &
298 STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
299#ifdef CONFIG_IWLWIFI_DEBUGFS
300 iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
301#endif
302
303 }
304
305 iwl_recover_from_statistics(priv, pkt);
306
307 if (priv->cfg->bt_params &&
308 priv->cfg->bt_params->bt_statistics)
309 memcpy(&priv->_agn.statistics_bt, &pkt->u.stats_bt,
310 sizeof(priv->_agn.statistics_bt));
311 else
312 memcpy(&priv->_agn.statistics, &pkt->u.stats,
313 sizeof(priv->_agn.statistics));
314 255
315 set_bit(STATUS_STATISTICS, &priv->status); 256 set_bit(STATUS_STATISTICS, &priv->status);
316 257
@@ -323,34 +264,28 @@ void iwl_rx_statistics(struct iwl_priv *priv,
323 264
324 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) && 265 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
325 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) { 266 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
326 iwl_rx_calc_noise(priv); 267 iwl4965_rx_calc_noise(priv);
327 queue_work(priv->workqueue, &priv->run_time_calib_work); 268 queue_work(priv->workqueue, &priv->run_time_calib_work);
328 } 269 }
329 if (priv->cfg->ops->lib->temp_ops.temperature && change) 270 if (priv->cfg->ops->lib->temp_ops.temperature && change)
330 priv->cfg->ops->lib->temp_ops.temperature(priv); 271 priv->cfg->ops->lib->temp_ops.temperature(priv);
331} 272}
332 273
333void iwl_reply_statistics(struct iwl_priv *priv, 274void iwl4965_reply_statistics(struct iwl_priv *priv,
334 struct iwl_rx_mem_buffer *rxb) 275 struct iwl_rx_mem_buffer *rxb)
335{ 276{
336 struct iwl_rx_packet *pkt = rxb_addr(rxb); 277 struct iwl_rx_packet *pkt = rxb_addr(rxb);
337 278
338 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) { 279 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
339#ifdef CONFIG_IWLWIFI_DEBUGFS 280#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
340 memset(&priv->_agn.accum_statistics, 0, 281 memset(&priv->_4965.accum_statistics, 0,
341 sizeof(struct iwl_notif_statistics)); 282 sizeof(struct iwl_notif_statistics));
342 memset(&priv->_agn.delta_statistics, 0, 283 memset(&priv->_4965.delta_statistics, 0,
343 sizeof(struct iwl_notif_statistics)); 284 sizeof(struct iwl_notif_statistics));
344 memset(&priv->_agn.max_delta, 0, 285 memset(&priv->_4965.max_delta, 0,
345 sizeof(struct iwl_notif_statistics)); 286 sizeof(struct iwl_notif_statistics));
346 memset(&priv->_agn.accum_statistics_bt, 0,
347 sizeof(struct iwl_bt_notif_statistics));
348 memset(&priv->_agn.delta_statistics_bt, 0,
349 sizeof(struct iwl_bt_notif_statistics));
350 memset(&priv->_agn.max_delta_bt, 0,
351 sizeof(struct iwl_bt_notif_statistics));
352#endif 287#endif
353 IWL_DEBUG_RX(priv, "Statistics have been cleared\n"); 288 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
354 } 289 }
355 iwl_rx_statistics(priv, rxb); 290 iwl4965_rx_statistics(priv, rxb);
356} 291}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
new file mode 100644
index 000000000000..a262c23553d2
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
@@ -0,0 +1,721 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <net/mac80211.h>
31
32#include "iwl-dev.h"
33#include "iwl-core.h"
34#include "iwl-sta.h"
35#include "iwl-4965.h"
36
37static struct iwl_link_quality_cmd *
38iwl4965_sta_alloc_lq(struct iwl_priv *priv, u8 sta_id)
39{
40 int i, r;
41 struct iwl_link_quality_cmd *link_cmd;
42 u32 rate_flags = 0;
43 __le32 rate_n_flags;
44
45 link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
46 if (!link_cmd) {
47 IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
48 return NULL;
49 }
50 /* Set up the rate scaling to start at selected rate, fall back
51 * all the way down to 1M in IEEE order, and then spin on 1M */
52 if (priv->band == IEEE80211_BAND_5GHZ)
53 r = IWL_RATE_6M_INDEX;
54 else
55 r = IWL_RATE_1M_INDEX;
56
57 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
58 rate_flags |= RATE_MCS_CCK_MSK;
59
60 rate_flags |= iwl4965_first_antenna(priv->hw_params.valid_tx_ant) <<
61 RATE_MCS_ANT_POS;
62 rate_n_flags = iwl4965_hw_set_rate_n_flags(iwlegacy_rates[r].plcp,
63 rate_flags);
64 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
65 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
66
67 link_cmd->general_params.single_stream_ant_msk =
68 iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
69
70 link_cmd->general_params.dual_stream_ant_msk =
71 priv->hw_params.valid_tx_ant &
72 ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
73 if (!link_cmd->general_params.dual_stream_ant_msk) {
74 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
75 } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
76 link_cmd->general_params.dual_stream_ant_msk =
77 priv->hw_params.valid_tx_ant;
78 }
79
80 link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
81 link_cmd->agg_params.agg_time_limit =
82 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
83
84 link_cmd->sta_id = sta_id;
85
86 return link_cmd;
87}
88
89/*
90 * iwl4965_add_bssid_station - Add the special IBSS BSSID station
91 *
92 * Function sleeps.
93 */
94int
95iwl4965_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
96 const u8 *addr, u8 *sta_id_r)
97{
98 int ret;
99 u8 sta_id;
100 struct iwl_link_quality_cmd *link_cmd;
101 unsigned long flags;
102
103 if (sta_id_r)
104 *sta_id_r = IWL_INVALID_STATION;
105
106 ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
107 if (ret) {
108 IWL_ERR(priv, "Unable to add station %pM\n", addr);
109 return ret;
110 }
111
112 if (sta_id_r)
113 *sta_id_r = sta_id;
114
115 spin_lock_irqsave(&priv->sta_lock, flags);
116 priv->stations[sta_id].used |= IWL_STA_LOCAL;
117 spin_unlock_irqrestore(&priv->sta_lock, flags);
118
119 /* Set up default rate scaling table in device's station table */
120 link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
121 if (!link_cmd) {
122 IWL_ERR(priv,
123 "Unable to initialize rate scaling for station %pM.\n",
124 addr);
125 return -ENOMEM;
126 }
127
128 ret = iwl_legacy_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true);
129 if (ret)
130 IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
131
132 spin_lock_irqsave(&priv->sta_lock, flags);
133 priv->stations[sta_id].lq = link_cmd;
134 spin_unlock_irqrestore(&priv->sta_lock, flags);
135
136 return 0;
137}
138
139static int iwl4965_static_wepkey_cmd(struct iwl_priv *priv,
140 struct iwl_rxon_context *ctx,
141 bool send_if_empty)
142{
143 int i, not_empty = 0;
144 u8 buff[sizeof(struct iwl_wep_cmd) +
145 sizeof(struct iwl_wep_key) * WEP_KEYS_MAX];
146 struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff;
147 size_t cmd_size = sizeof(struct iwl_wep_cmd);
148 struct iwl_host_cmd cmd = {
149 .id = ctx->wep_key_cmd,
150 .data = wep_cmd,
151 .flags = CMD_SYNC,
152 };
153
154 might_sleep();
155
156 memset(wep_cmd, 0, cmd_size +
157 (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
158
159 for (i = 0; i < WEP_KEYS_MAX ; i++) {
160 wep_cmd->key[i].key_index = i;
161 if (ctx->wep_keys[i].key_size) {
162 wep_cmd->key[i].key_offset = i;
163 not_empty = 1;
164 } else {
165 wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
166 }
167
168 wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
169 memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
170 ctx->wep_keys[i].key_size);
171 }
172
173 wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
174 wep_cmd->num_keys = WEP_KEYS_MAX;
175
176 cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX;
177
178 cmd.len = cmd_size;
179
180 if (not_empty || send_if_empty)
181 return iwl_legacy_send_cmd(priv, &cmd);
182 else
183 return 0;
184}
185
186int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
187 struct iwl_rxon_context *ctx)
188{
189 lockdep_assert_held(&priv->mutex);
190
191 return iwl4965_static_wepkey_cmd(priv, ctx, false);
192}
193
194int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
195 struct iwl_rxon_context *ctx,
196 struct ieee80211_key_conf *keyconf)
197{
198 int ret;
199
200 lockdep_assert_held(&priv->mutex);
201
202 IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
203 keyconf->keyidx);
204
205 memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
206 if (iwl_legacy_is_rfkill(priv)) {
207 IWL_DEBUG_WEP(priv,
208 "Not sending REPLY_WEPKEY command due to RFKILL.\n");
209 /* but keys in device are clear anyway so return success */
210 return 0;
211 }
212 ret = iwl4965_static_wepkey_cmd(priv, ctx, 1);
213 IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
214 keyconf->keyidx, ret);
215
216 return ret;
217}
218
219int iwl4965_set_default_wep_key(struct iwl_priv *priv,
220 struct iwl_rxon_context *ctx,
221 struct ieee80211_key_conf *keyconf)
222{
223 int ret;
224
225 lockdep_assert_held(&priv->mutex);
226
227 if (keyconf->keylen != WEP_KEY_LEN_128 &&
228 keyconf->keylen != WEP_KEY_LEN_64) {
229 IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen);
230 return -EINVAL;
231 }
232
233 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
234 keyconf->hw_key_idx = HW_KEY_DEFAULT;
235 priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
236
237 ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
238 memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
239 keyconf->keylen);
240
241 ret = iwl4965_static_wepkey_cmd(priv, ctx, false);
242 IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
243 keyconf->keylen, keyconf->keyidx, ret);
244
245 return ret;
246}
247
248static int iwl4965_set_wep_dynamic_key_info(struct iwl_priv *priv,
249 struct iwl_rxon_context *ctx,
250 struct ieee80211_key_conf *keyconf,
251 u8 sta_id)
252{
253 unsigned long flags;
254 __le16 key_flags = 0;
255 struct iwl_legacy_addsta_cmd sta_cmd;
256
257 lockdep_assert_held(&priv->mutex);
258
259 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
260
261 key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
262 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
263 key_flags &= ~STA_KEY_FLG_INVALID;
264
265 if (keyconf->keylen == WEP_KEY_LEN_128)
266 key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
267
268 if (sta_id == ctx->bcast_sta_id)
269 key_flags |= STA_KEY_MULTICAST_MSK;
270
271 spin_lock_irqsave(&priv->sta_lock, flags);
272
273 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
274 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
275 priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
276
277 memcpy(priv->stations[sta_id].keyinfo.key,
278 keyconf->key, keyconf->keylen);
279
280 memcpy(&priv->stations[sta_id].sta.key.key[3],
281 keyconf->key, keyconf->keylen);
282
283 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
284 == STA_KEY_FLG_NO_ENC)
285 priv->stations[sta_id].sta.key.key_offset =
286 iwl_legacy_get_free_ucode_key_index(priv);
287 /* else, we are overriding an existing key => no need to allocated room
288 * in uCode. */
289
290 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
291 "no space for a new key");
292
293 priv->stations[sta_id].sta.key.key_flags = key_flags;
294 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
295 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
296
297 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
298 sizeof(struct iwl_legacy_addsta_cmd));
299 spin_unlock_irqrestore(&priv->sta_lock, flags);
300
301 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
302}
303
304static int iwl4965_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
305 struct iwl_rxon_context *ctx,
306 struct ieee80211_key_conf *keyconf,
307 u8 sta_id)
308{
309 unsigned long flags;
310 __le16 key_flags = 0;
311 struct iwl_legacy_addsta_cmd sta_cmd;
312
313 lockdep_assert_held(&priv->mutex);
314
315 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
316 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
317 key_flags &= ~STA_KEY_FLG_INVALID;
318
319 if (sta_id == ctx->bcast_sta_id)
320 key_flags |= STA_KEY_MULTICAST_MSK;
321
322 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
323
324 spin_lock_irqsave(&priv->sta_lock, flags);
325 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
326 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
327
328 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
329 keyconf->keylen);
330
331 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
332 keyconf->keylen);
333
334 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
335 == STA_KEY_FLG_NO_ENC)
336 priv->stations[sta_id].sta.key.key_offset =
337 iwl_legacy_get_free_ucode_key_index(priv);
338 /* else, we are overriding an existing key => no need to allocated room
339 * in uCode. */
340
341 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
342 "no space for a new key");
343
344 priv->stations[sta_id].sta.key.key_flags = key_flags;
345 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
346 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
347
348 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
349 sizeof(struct iwl_legacy_addsta_cmd));
350 spin_unlock_irqrestore(&priv->sta_lock, flags);
351
352 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
353}
354
355static int iwl4965_set_tkip_dynamic_key_info(struct iwl_priv *priv,
356 struct iwl_rxon_context *ctx,
357 struct ieee80211_key_conf *keyconf,
358 u8 sta_id)
359{
360 unsigned long flags;
361 int ret = 0;
362 __le16 key_flags = 0;
363
364 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
365 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
366 key_flags &= ~STA_KEY_FLG_INVALID;
367
368 if (sta_id == ctx->bcast_sta_id)
369 key_flags |= STA_KEY_MULTICAST_MSK;
370
371 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
372 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
373
374 spin_lock_irqsave(&priv->sta_lock, flags);
375
376 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
377 priv->stations[sta_id].keyinfo.keylen = 16;
378
379 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
380 == STA_KEY_FLG_NO_ENC)
381 priv->stations[sta_id].sta.key.key_offset =
382 iwl_legacy_get_free_ucode_key_index(priv);
383 /* else, we are overriding an existing key => no need to allocated room
384 * in uCode. */
385
386 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
387 "no space for a new key");
388
389 priv->stations[sta_id].sta.key.key_flags = key_flags;
390
391
392 /* This copy is acutally not needed: we get the key with each TX */
393 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
394
395 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16);
396
397 spin_unlock_irqrestore(&priv->sta_lock, flags);
398
399 return ret;
400}
401
402void iwl4965_update_tkip_key(struct iwl_priv *priv,
403 struct iwl_rxon_context *ctx,
404 struct ieee80211_key_conf *keyconf,
405 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
406{
407 u8 sta_id;
408 unsigned long flags;
409 int i;
410
411 if (iwl_legacy_scan_cancel(priv)) {
412 /* cancel scan failed, just live w/ bad key and rely
413 briefly on SW decryption */
414 return;
415 }
416
417 sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, sta);
418 if (sta_id == IWL_INVALID_STATION)
419 return;
420
421 spin_lock_irqsave(&priv->sta_lock, flags);
422
423 priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
424
425 for (i = 0; i < 5; i++)
426 priv->stations[sta_id].sta.key.tkip_rx_ttak[i] =
427 cpu_to_le16(phase1key[i]);
428
429 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
430 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
431
432 iwl_legacy_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
433
434 spin_unlock_irqrestore(&priv->sta_lock, flags);
435
436}
437
438int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
439 struct iwl_rxon_context *ctx,
440 struct ieee80211_key_conf *keyconf,
441 u8 sta_id)
442{
443 unsigned long flags;
444 u16 key_flags;
445 u8 keyidx;
446 struct iwl_legacy_addsta_cmd sta_cmd;
447
448 lockdep_assert_held(&priv->mutex);
449
450 ctx->key_mapping_keys--;
451
452 spin_lock_irqsave(&priv->sta_lock, flags);
453 key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
454 keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
455
456 IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n",
457 keyconf->keyidx, sta_id);
458
459 if (keyconf->keyidx != keyidx) {
460 /* We need to remove a key with index different that the one
461 * in the uCode. This means that the key we need to remove has
462 * been replaced by another one with different index.
463 * Don't do anything and return ok
464 */
465 spin_unlock_irqrestore(&priv->sta_lock, flags);
466 return 0;
467 }
468
469 if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
470 IWL_WARN(priv, "Removing wrong key %d 0x%x\n",
471 keyconf->keyidx, key_flags);
472 spin_unlock_irqrestore(&priv->sta_lock, flags);
473 return 0;
474 }
475
476 if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
477 &priv->ucode_key_table))
478 IWL_ERR(priv, "index %d not used in uCode key table.\n",
479 priv->stations[sta_id].sta.key.key_offset);
480 memset(&priv->stations[sta_id].keyinfo, 0,
481 sizeof(struct iwl_hw_key));
482 memset(&priv->stations[sta_id].sta.key, 0,
483 sizeof(struct iwl4965_keyinfo));
484 priv->stations[sta_id].sta.key.key_flags =
485 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
486 priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
487 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
488 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
489
490 if (iwl_legacy_is_rfkill(priv)) {
491 IWL_DEBUG_WEP(priv,
492 "Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
493 spin_unlock_irqrestore(&priv->sta_lock, flags);
494 return 0;
495 }
496 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
497 sizeof(struct iwl_legacy_addsta_cmd));
498 spin_unlock_irqrestore(&priv->sta_lock, flags);
499
500 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
501}
502
503int iwl4965_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
504 struct ieee80211_key_conf *keyconf, u8 sta_id)
505{
506 int ret;
507
508 lockdep_assert_held(&priv->mutex);
509
510 ctx->key_mapping_keys++;
511 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
512
513 switch (keyconf->cipher) {
514 case WLAN_CIPHER_SUITE_CCMP:
515 ret = iwl4965_set_ccmp_dynamic_key_info(priv, ctx,
516 keyconf, sta_id);
517 break;
518 case WLAN_CIPHER_SUITE_TKIP:
519 ret = iwl4965_set_tkip_dynamic_key_info(priv, ctx,
520 keyconf, sta_id);
521 break;
522 case WLAN_CIPHER_SUITE_WEP40:
523 case WLAN_CIPHER_SUITE_WEP104:
524 ret = iwl4965_set_wep_dynamic_key_info(priv, ctx,
525 keyconf, sta_id);
526 break;
527 default:
528 IWL_ERR(priv,
529 "Unknown alg: %s cipher = %x\n", __func__,
530 keyconf->cipher);
531 ret = -EINVAL;
532 }
533
534 IWL_DEBUG_WEP(priv,
535 "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
536 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
537 sta_id, ret);
538
539 return ret;
540}
541
542/**
543 * iwl4965_alloc_bcast_station - add broadcast station into driver's station table.
544 *
545 * This adds the broadcast station into the driver's station table
546 * and marks it driver active, so that it will be restored to the
547 * device at the next best time.
548 */
549int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
550 struct iwl_rxon_context *ctx)
551{
552 struct iwl_link_quality_cmd *link_cmd;
553 unsigned long flags;
554 u8 sta_id;
555
556 spin_lock_irqsave(&priv->sta_lock, flags);
557 sta_id = iwl_legacy_prep_station(priv, ctx, iwlegacy_bcast_addr,
558 false, NULL);
559 if (sta_id == IWL_INVALID_STATION) {
560 IWL_ERR(priv, "Unable to prepare broadcast station\n");
561 spin_unlock_irqrestore(&priv->sta_lock, flags);
562
563 return -EINVAL;
564 }
565
566 priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
567 priv->stations[sta_id].used |= IWL_STA_BCAST;
568 spin_unlock_irqrestore(&priv->sta_lock, flags);
569
570 link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
571 if (!link_cmd) {
572 IWL_ERR(priv,
573 "Unable to initialize rate scaling for bcast station.\n");
574 return -ENOMEM;
575 }
576
577 spin_lock_irqsave(&priv->sta_lock, flags);
578 priv->stations[sta_id].lq = link_cmd;
579 spin_unlock_irqrestore(&priv->sta_lock, flags);
580
581 return 0;
582}
583
584/**
585 * iwl4965_update_bcast_station - update broadcast station's LQ command
586 *
587 * Only used by iwl4965. Placed here to have all bcast station management
588 * code together.
589 */
590static int iwl4965_update_bcast_station(struct iwl_priv *priv,
591 struct iwl_rxon_context *ctx)
592{
593 unsigned long flags;
594 struct iwl_link_quality_cmd *link_cmd;
595 u8 sta_id = ctx->bcast_sta_id;
596
597 link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
598 if (!link_cmd) {
599 IWL_ERR(priv,
600 "Unable to initialize rate scaling for bcast station.\n");
601 return -ENOMEM;
602 }
603
604 spin_lock_irqsave(&priv->sta_lock, flags);
605 if (priv->stations[sta_id].lq)
606 kfree(priv->stations[sta_id].lq);
607 else
608 IWL_DEBUG_INFO(priv,
609 "Bcast station rate scaling has not been initialized yet.\n");
610 priv->stations[sta_id].lq = link_cmd;
611 spin_unlock_irqrestore(&priv->sta_lock, flags);
612
613 return 0;
614}
615
616int iwl4965_update_bcast_stations(struct iwl_priv *priv)
617{
618 struct iwl_rxon_context *ctx;
619 int ret = 0;
620
621 for_each_context(priv, ctx) {
622 ret = iwl4965_update_bcast_station(priv, ctx);
623 if (ret)
624 break;
625 }
626
627 return ret;
628}
629
630/**
631 * iwl4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
632 */
633int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
634{
635 unsigned long flags;
636 struct iwl_legacy_addsta_cmd sta_cmd;
637
638 lockdep_assert_held(&priv->mutex);
639
640 /* Remove "disable" flag, to enable Tx for this TID */
641 spin_lock_irqsave(&priv->sta_lock, flags);
642 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
643 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
644 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
645 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
646 sizeof(struct iwl_legacy_addsta_cmd));
647 spin_unlock_irqrestore(&priv->sta_lock, flags);
648
649 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
650}
651
652int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
653 int tid, u16 ssn)
654{
655 unsigned long flags;
656 int sta_id;
657 struct iwl_legacy_addsta_cmd sta_cmd;
658
659 lockdep_assert_held(&priv->mutex);
660
661 sta_id = iwl_legacy_sta_id(sta);
662 if (sta_id == IWL_INVALID_STATION)
663 return -ENXIO;
664
665 spin_lock_irqsave(&priv->sta_lock, flags);
666 priv->stations[sta_id].sta.station_flags_msk = 0;
667 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
668 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
669 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
670 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
671 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
672 sizeof(struct iwl_legacy_addsta_cmd));
673 spin_unlock_irqrestore(&priv->sta_lock, flags);
674
675 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
676}
677
678int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
679 int tid)
680{
681 unsigned long flags;
682 int sta_id;
683 struct iwl_legacy_addsta_cmd sta_cmd;
684
685 lockdep_assert_held(&priv->mutex);
686
687 sta_id = iwl_legacy_sta_id(sta);
688 if (sta_id == IWL_INVALID_STATION) {
689 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
690 return -ENXIO;
691 }
692
693 spin_lock_irqsave(&priv->sta_lock, flags);
694 priv->stations[sta_id].sta.station_flags_msk = 0;
695 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
696 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
697 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
698 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
699 sizeof(struct iwl_legacy_addsta_cmd));
700 spin_unlock_irqrestore(&priv->sta_lock, flags);
701
702 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
703}
704
705void
706iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
707{
708 unsigned long flags;
709
710 spin_lock_irqsave(&priv->sta_lock, flags);
711 priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
712 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
713 priv->stations[sta_id].sta.sta.modify_mask =
714 STA_MODIFY_SLEEP_TX_COUNT_MSK;
715 priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
716 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
717 iwl_legacy_send_add_sta(priv,
718 &priv->stations[sta_id].sta, CMD_ASYNC);
719 spin_unlock_irqrestore(&priv->sta_lock, flags);
720
721}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
new file mode 100644
index 000000000000..5c40502f869a
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
@@ -0,0 +1,1369 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-helpers.h"
40#include "iwl-4965-hw.h"
41#include "iwl-4965.h"
42
43/*
44 * mac80211 queues, ACs, hardware queues, FIFOs.
45 *
46 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
47 *
48 * Mac80211 uses the following numbers, which we get as from it
49 * by way of skb_get_queue_mapping(skb):
50 *
51 * VO 0
52 * VI 1
53 * BE 2
54 * BK 3
55 *
56 *
57 * Regular (not A-MPDU) frames are put into hardware queues corresponding
58 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
59 * own queue per aggregation session (RA/TID combination), such queues are
60 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
61 * order to map frames to the right queue, we also need an AC->hw queue
62 * mapping. This is implemented here.
63 *
64 * Due to the way hw queues are set up (by the hw specific modules like
65 * iwl-4965.c), the AC->hw queue mapping is the identity
66 * mapping.
67 */
68
69static const u8 tid_to_ac[] = {
70 IEEE80211_AC_BE,
71 IEEE80211_AC_BK,
72 IEEE80211_AC_BK,
73 IEEE80211_AC_BE,
74 IEEE80211_AC_VI,
75 IEEE80211_AC_VI,
76 IEEE80211_AC_VO,
77 IEEE80211_AC_VO
78};
79
80static inline int iwl4965_get_ac_from_tid(u16 tid)
81{
82 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
83 return tid_to_ac[tid];
84
85 /* no support for TIDs 8-15 yet */
86 return -EINVAL;
87}
88
89static inline int
90iwl4965_get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
91{
92 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
93 return ctx->ac_to_fifo[tid_to_ac[tid]];
94
95 /* no support for TIDs 8-15 yet */
96 return -EINVAL;
97}
98
99/*
100 * handle build REPLY_TX command notification.
101 */
102static void iwl4965_tx_cmd_build_basic(struct iwl_priv *priv,
103 struct sk_buff *skb,
104 struct iwl_tx_cmd *tx_cmd,
105 struct ieee80211_tx_info *info,
106 struct ieee80211_hdr *hdr,
107 u8 std_id)
108{
109 __le16 fc = hdr->frame_control;
110 __le32 tx_flags = tx_cmd->tx_flags;
111
112 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
113 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
114 tx_flags |= TX_CMD_FLG_ACK_MSK;
115 if (ieee80211_is_mgmt(fc))
116 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
117 if (ieee80211_is_probe_resp(fc) &&
118 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
119 tx_flags |= TX_CMD_FLG_TSF_MSK;
120 } else {
121 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
122 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
123 }
124
125 if (ieee80211_is_back_req(fc))
126 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
127
128 tx_cmd->sta_id = std_id;
129 if (ieee80211_has_morefrags(fc))
130 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
131
132 if (ieee80211_is_data_qos(fc)) {
133 u8 *qc = ieee80211_get_qos_ctl(hdr);
134 tx_cmd->tid_tspec = qc[0] & 0xf;
135 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
136 } else {
137 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
138 }
139
140 iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
141
142 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
143 if (ieee80211_is_mgmt(fc)) {
144 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
145 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
146 else
147 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
148 } else {
149 tx_cmd->timeout.pm_frame_timeout = 0;
150 }
151
152 tx_cmd->driver_txop = 0;
153 tx_cmd->tx_flags = tx_flags;
154 tx_cmd->next_frame_len = 0;
155}
156
157#define RTS_DFAULT_RETRY_LIMIT 60
158
159static void iwl4965_tx_cmd_build_rate(struct iwl_priv *priv,
160 struct iwl_tx_cmd *tx_cmd,
161 struct ieee80211_tx_info *info,
162 __le16 fc)
163{
164 u32 rate_flags;
165 int rate_idx;
166 u8 rts_retry_limit;
167 u8 data_retry_limit;
168 u8 rate_plcp;
169
170 /* Set retry limit on DATA packets and Probe Responses*/
171 if (ieee80211_is_probe_resp(fc))
172 data_retry_limit = 3;
173 else
174 data_retry_limit = IWL4965_DEFAULT_TX_RETRY;
175 tx_cmd->data_retry_limit = data_retry_limit;
176
177 /* Set retry limit on RTS packets */
178 rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
179 if (data_retry_limit < rts_retry_limit)
180 rts_retry_limit = data_retry_limit;
181 tx_cmd->rts_retry_limit = rts_retry_limit;
182
183 /* DATA packets will use the uCode station table for rate/antenna
184 * selection */
185 if (ieee80211_is_data(fc)) {
186 tx_cmd->initial_rate_index = 0;
187 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
188 return;
189 }
190
191 /**
192 * If the current TX rate stored in mac80211 has the MCS bit set, it's
193 * not really a TX rate. Thus, we use the lowest supported rate for
194 * this band. Also use the lowest supported rate if the stored rate
195 * index is invalid.
196 */
197 rate_idx = info->control.rates[0].idx;
198 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
199 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
200 rate_idx = rate_lowest_index(&priv->bands[info->band],
201 info->control.sta);
202 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
203 if (info->band == IEEE80211_BAND_5GHZ)
204 rate_idx += IWL_FIRST_OFDM_RATE;
205 /* Get PLCP rate for tx_cmd->rate_n_flags */
206 rate_plcp = iwlegacy_rates[rate_idx].plcp;
207 /* Zero out flags for this packet */
208 rate_flags = 0;
209
210 /* Set CCK flag as needed */
211 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
212 rate_flags |= RATE_MCS_CCK_MSK;
213
214 /* Set up antennas */
215 priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
216 priv->hw_params.valid_tx_ant);
217
218 rate_flags |= iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
219
220 /* Set the rate in the TX cmd */
221 tx_cmd->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
222}
223
224static void iwl4965_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
225 struct ieee80211_tx_info *info,
226 struct iwl_tx_cmd *tx_cmd,
227 struct sk_buff *skb_frag,
228 int sta_id)
229{
230 struct ieee80211_key_conf *keyconf = info->control.hw_key;
231
232 switch (keyconf->cipher) {
233 case WLAN_CIPHER_SUITE_CCMP:
234 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
235 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
236 if (info->flags & IEEE80211_TX_CTL_AMPDU)
237 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
238 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
239 break;
240
241 case WLAN_CIPHER_SUITE_TKIP:
242 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
243 ieee80211_get_tkip_key(keyconf, skb_frag,
244 IEEE80211_TKIP_P2_KEY, tx_cmd->key);
245 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
246 break;
247
248 case WLAN_CIPHER_SUITE_WEP104:
249 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
250 /* fall through */
251 case WLAN_CIPHER_SUITE_WEP40:
252 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
253 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
254
255 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
256
257 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
258 "with key %d\n", keyconf->keyidx);
259 break;
260
261 default:
262 IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
263 break;
264 }
265}
266
267/*
268 * start REPLY_TX command process
269 */
270int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
271{
272 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
273 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
274 struct ieee80211_sta *sta = info->control.sta;
275 struct iwl_station_priv *sta_priv = NULL;
276 struct iwl_tx_queue *txq;
277 struct iwl_queue *q;
278 struct iwl_device_cmd *out_cmd;
279 struct iwl_cmd_meta *out_meta;
280 struct iwl_tx_cmd *tx_cmd;
281 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
282 int txq_id;
283 dma_addr_t phys_addr;
284 dma_addr_t txcmd_phys;
285 dma_addr_t scratch_phys;
286 u16 len, firstlen, secondlen;
287 u16 seq_number = 0;
288 __le16 fc;
289 u8 hdr_len;
290 u8 sta_id;
291 u8 wait_write_ptr = 0;
292 u8 tid = 0;
293 u8 *qc = NULL;
294 unsigned long flags;
295 bool is_agg = false;
296
297 if (info->control.vif)
298 ctx = iwl_legacy_rxon_ctx_from_vif(info->control.vif);
299
300 spin_lock_irqsave(&priv->lock, flags);
301 if (iwl_legacy_is_rfkill(priv)) {
302 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
303 goto drop_unlock;
304 }
305
306 fc = hdr->frame_control;
307
308#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
309 if (ieee80211_is_auth(fc))
310 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
311 else if (ieee80211_is_assoc_req(fc))
312 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
313 else if (ieee80211_is_reassoc_req(fc))
314 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
315#endif
316
317 hdr_len = ieee80211_hdrlen(fc);
318
319 /* Find index into station table for destination station */
320 sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta);
321 if (sta_id == IWL_INVALID_STATION) {
322 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
323 hdr->addr1);
324 goto drop_unlock;
325 }
326
327 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
328
329 if (sta)
330 sta_priv = (void *)sta->drv_priv;
331
332 if (sta_priv && sta_priv->asleep &&
333 (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)) {
334 /*
335 * This sends an asynchronous command to the device,
336 * but we can rely on it being processed before the
337 * next frame is processed -- and the next frame to
338 * this station is the one that will consume this
339 * counter.
340 * For now set the counter to just 1 since we do not
341 * support uAPSD yet.
342 */
343 iwl4965_sta_modify_sleep_tx_count(priv, sta_id, 1);
344 }
345
346 /*
347 * Send this frame after DTIM -- there's a special queue
348 * reserved for this for contexts that support AP mode.
349 */
350 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
351 txq_id = ctx->mcast_queue;
352 /*
353 * The microcode will clear the more data
354 * bit in the last frame it transmits.
355 */
356 hdr->frame_control |=
357 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
358 } else
359 txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
360
361 /* irqs already disabled/saved above when locking priv->lock */
362 spin_lock(&priv->sta_lock);
363
364 if (ieee80211_is_data_qos(fc)) {
365 qc = ieee80211_get_qos_ctl(hdr);
366 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
367 if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
368 spin_unlock(&priv->sta_lock);
369 goto drop_unlock;
370 }
371 seq_number = priv->stations[sta_id].tid[tid].seq_number;
372 seq_number &= IEEE80211_SCTL_SEQ;
373 hdr->seq_ctrl = hdr->seq_ctrl &
374 cpu_to_le16(IEEE80211_SCTL_FRAG);
375 hdr->seq_ctrl |= cpu_to_le16(seq_number);
376 seq_number += 0x10;
377 /* aggregation is on for this <sta,tid> */
378 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
379 priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
380 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
381 is_agg = true;
382 }
383 }
384
385 txq = &priv->txq[txq_id];
386 q = &txq->q;
387
388 if (unlikely(iwl_legacy_queue_space(q) < q->high_mark)) {
389 spin_unlock(&priv->sta_lock);
390 goto drop_unlock;
391 }
392
393 if (ieee80211_is_data_qos(fc)) {
394 priv->stations[sta_id].tid[tid].tfds_in_queue++;
395 if (!ieee80211_has_morefrags(fc))
396 priv->stations[sta_id].tid[tid].seq_number = seq_number;
397 }
398
399 spin_unlock(&priv->sta_lock);
400
401 /* Set up driver data for this TFD */
402 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
403 txq->txb[q->write_ptr].skb = skb;
404 txq->txb[q->write_ptr].ctx = ctx;
405
406 /* Set up first empty entry in queue's array of Tx/cmd buffers */
407 out_cmd = txq->cmd[q->write_ptr];
408 out_meta = &txq->meta[q->write_ptr];
409 tx_cmd = &out_cmd->cmd.tx;
410 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
411 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
412
413 /*
414 * Set up the Tx-command (not MAC!) header.
415 * Store the chosen Tx queue and TFD index within the sequence field;
416 * after Tx, uCode's Tx response will return this value so driver can
417 * locate the frame within the tx queue and do post-tx processing.
418 */
419 out_cmd->hdr.cmd = REPLY_TX;
420 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
421 INDEX_TO_SEQ(q->write_ptr)));
422
423 /* Copy MAC header from skb into command buffer */
424 memcpy(tx_cmd->hdr, hdr, hdr_len);
425
426
427 /* Total # bytes to be transmitted */
428 len = (u16)skb->len;
429 tx_cmd->len = cpu_to_le16(len);
430
431 if (info->control.hw_key)
432 iwl4965_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
433
434 /* TODO need this for burst mode later on */
435 iwl4965_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
436 iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
437
438 iwl4965_tx_cmd_build_rate(priv, tx_cmd, info, fc);
439
440 iwl_legacy_update_stats(priv, true, fc, len);
441 /*
442 * Use the first empty entry in this queue's command buffer array
443 * to contain the Tx command and MAC header concatenated together
444 * (payload data will be in another buffer).
445 * Size of this varies, due to varying MAC header length.
446 * If end is not dword aligned, we'll have 2 extra bytes at the end
447 * of the MAC header (device reads on dword boundaries).
448 * We'll tell device about this padding later.
449 */
450 len = sizeof(struct iwl_tx_cmd) +
451 sizeof(struct iwl_cmd_header) + hdr_len;
452 firstlen = (len + 3) & ~3;
453
454 /* Tell NIC about any 2-byte padding after MAC header */
455 if (firstlen != len)
456 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
457
458 /* Physical address of this Tx command's header (not MAC header!),
459 * within command buffer array. */
460 txcmd_phys = pci_map_single(priv->pci_dev,
461 &out_cmd->hdr, firstlen,
462 PCI_DMA_BIDIRECTIONAL);
463 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
464 dma_unmap_len_set(out_meta, len, firstlen);
465 /* Add buffer containing Tx command and MAC(!) header to TFD's
466 * first entry */
467 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
468 txcmd_phys, firstlen, 1, 0);
469
470 if (!ieee80211_has_morefrags(hdr->frame_control)) {
471 txq->need_update = 1;
472 } else {
473 wait_write_ptr = 1;
474 txq->need_update = 0;
475 }
476
477 /* Set up TFD's 2nd entry to point directly to remainder of skb,
478 * if any (802.11 null frames have no payload). */
479 secondlen = skb->len - hdr_len;
480 if (secondlen > 0) {
481 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
482 secondlen, PCI_DMA_TODEVICE);
483 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
484 phys_addr, secondlen,
485 0, 0);
486 }
487
488 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
489 offsetof(struct iwl_tx_cmd, scratch);
490
491 /* take back ownership of DMA buffer to enable update */
492 pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
493 firstlen, PCI_DMA_BIDIRECTIONAL);
494 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
495 tx_cmd->dram_msb_ptr = iwl_legacy_get_dma_hi_addr(scratch_phys);
496
497 IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
498 le16_to_cpu(out_cmd->hdr.sequence));
499 IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
500 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
501 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
502
503 /* Set up entry for this TFD in Tx byte-count array */
504 if (info->flags & IEEE80211_TX_CTL_AMPDU)
505 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
506 le16_to_cpu(tx_cmd->len));
507
508 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
509 firstlen, PCI_DMA_BIDIRECTIONAL);
510
511 trace_iwlwifi_legacy_dev_tx(priv,
512 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
513 sizeof(struct iwl_tfd),
514 &out_cmd->hdr, firstlen,
515 skb->data + hdr_len, secondlen);
516
517 /* Tell device the write index *just past* this latest filled TFD */
518 q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
519 iwl_legacy_txq_update_write_ptr(priv, txq);
520 spin_unlock_irqrestore(&priv->lock, flags);
521
522 /*
523 * At this point the frame is "transmitted" successfully
524 * and we will get a TX status notification eventually,
525 * regardless of the value of ret. "ret" only indicates
526 * whether or not we should update the write pointer.
527 */
528
529 /*
530 * Avoid atomic ops if it isn't an associated client.
531 * Also, if this is a packet for aggregation, don't
532 * increase the counter because the ucode will stop
533 * aggregation queues when their respective station
534 * goes to sleep.
535 */
536 if (sta_priv && sta_priv->client && !is_agg)
537 atomic_inc(&sta_priv->pending_frames);
538
539 if ((iwl_legacy_queue_space(q) < q->high_mark) &&
540 priv->mac80211_registered) {
541 if (wait_write_ptr) {
542 spin_lock_irqsave(&priv->lock, flags);
543 txq->need_update = 1;
544 iwl_legacy_txq_update_write_ptr(priv, txq);
545 spin_unlock_irqrestore(&priv->lock, flags);
546 } else {
547 iwl_legacy_stop_queue(priv, txq);
548 }
549 }
550
551 return 0;
552
553drop_unlock:
554 spin_unlock_irqrestore(&priv->lock, flags);
555 return -1;
556}
557
558static inline int iwl4965_alloc_dma_ptr(struct iwl_priv *priv,
559 struct iwl_dma_ptr *ptr, size_t size)
560{
561 ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
562 GFP_KERNEL);
563 if (!ptr->addr)
564 return -ENOMEM;
565 ptr->size = size;
566 return 0;
567}
568
569static inline void iwl4965_free_dma_ptr(struct iwl_priv *priv,
570 struct iwl_dma_ptr *ptr)
571{
572 if (unlikely(!ptr->addr))
573 return;
574
575 dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
576 memset(ptr, 0, sizeof(*ptr));
577}
578
579/**
580 * iwl4965_hw_txq_ctx_free - Free TXQ Context
581 *
582 * Destroy all TX DMA queues and structures
583 */
584void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv)
585{
586 int txq_id;
587
588 /* Tx queues */
589 if (priv->txq) {
590 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
591 if (txq_id == priv->cmd_queue)
592 iwl_legacy_cmd_queue_free(priv);
593 else
594 iwl_legacy_tx_queue_free(priv, txq_id);
595 }
596 iwl4965_free_dma_ptr(priv, &priv->kw);
597
598 iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
599
600 /* free tx queue structure */
601 iwl_legacy_txq_mem(priv);
602}
603
604/**
605 * iwl4965_txq_ctx_alloc - allocate TX queue context
606 * Allocate all Tx DMA structures and initialize them
607 *
608 * @param priv
609 * @return error code
610 */
611int iwl4965_txq_ctx_alloc(struct iwl_priv *priv)
612{
613 int ret;
614 int txq_id, slots_num;
615 unsigned long flags;
616
617 /* Free all tx/cmd queues and keep-warm buffer */
618 iwl4965_hw_txq_ctx_free(priv);
619
620 ret = iwl4965_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
621 priv->hw_params.scd_bc_tbls_size);
622 if (ret) {
623 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
624 goto error_bc_tbls;
625 }
626 /* Alloc keep-warm buffer */
627 ret = iwl4965_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
628 if (ret) {
629 IWL_ERR(priv, "Keep Warm allocation failed\n");
630 goto error_kw;
631 }
632
633 /* allocate tx queue structure */
634 ret = iwl_legacy_alloc_txq_mem(priv);
635 if (ret)
636 goto error;
637
638 spin_lock_irqsave(&priv->lock, flags);
639
640 /* Turn off all Tx DMA fifos */
641 iwl4965_txq_set_sched(priv, 0);
642
643 /* Tell NIC where to find the "keep warm" buffer */
644 iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
645
646 spin_unlock_irqrestore(&priv->lock, flags);
647
648 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
649 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
650 slots_num = (txq_id == priv->cmd_queue) ?
651 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
652 ret = iwl_legacy_tx_queue_init(priv,
653 &priv->txq[txq_id], slots_num,
654 txq_id);
655 if (ret) {
656 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
657 goto error;
658 }
659 }
660
661 return ret;
662
663 error:
664 iwl4965_hw_txq_ctx_free(priv);
665 iwl4965_free_dma_ptr(priv, &priv->kw);
666 error_kw:
667 iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
668 error_bc_tbls:
669 return ret;
670}
671
672void iwl4965_txq_ctx_reset(struct iwl_priv *priv)
673{
674 int txq_id, slots_num;
675 unsigned long flags;
676
677 spin_lock_irqsave(&priv->lock, flags);
678
679 /* Turn off all Tx DMA fifos */
680 iwl4965_txq_set_sched(priv, 0);
681
682 /* Tell NIC where to find the "keep warm" buffer */
683 iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
684
685 spin_unlock_irqrestore(&priv->lock, flags);
686
687 /* Alloc and init all Tx queues, including the command queue (#4) */
688 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
689 slots_num = txq_id == priv->cmd_queue ?
690 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
691 iwl_legacy_tx_queue_reset(priv, &priv->txq[txq_id],
692 slots_num, txq_id);
693 }
694}
695
696/**
697 * iwl4965_txq_ctx_stop - Stop all Tx DMA channels
698 */
699void iwl4965_txq_ctx_stop(struct iwl_priv *priv)
700{
701 int ch, txq_id;
702 unsigned long flags;
703
704 /* Turn off all Tx DMA fifos */
705 spin_lock_irqsave(&priv->lock, flags);
706
707 iwl4965_txq_set_sched(priv, 0);
708
709 /* Stop each Tx DMA channel, and wait for it to be idle */
710 for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
711 iwl_legacy_write_direct32(priv,
712 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
713 if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
714 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
715 1000))
716 IWL_ERR(priv, "Failing on timeout while stopping"
717 " DMA channel %d [0x%08x]", ch,
718 iwl_legacy_read_direct32(priv,
719 FH_TSSR_TX_STATUS_REG));
720 }
721 spin_unlock_irqrestore(&priv->lock, flags);
722
723 if (!priv->txq)
724 return;
725
726 /* Unmap DMA from host system and free skb's */
727 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
728 if (txq_id == priv->cmd_queue)
729 iwl_legacy_cmd_queue_unmap(priv);
730 else
731 iwl_legacy_tx_queue_unmap(priv, txq_id);
732}
733
734/*
735 * Find first available (lowest unused) Tx Queue, mark it "active".
736 * Called only when finding queue for aggregation.
737 * Should never return anything < 7, because they should already
738 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
739 */
740static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv)
741{
742 int txq_id;
743
744 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
745 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
746 return txq_id;
747 return -1;
748}
749
750/**
751 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
752 */
753static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
754 u16 txq_id)
755{
756 /* Simply stop the queue, but don't change any configuration;
757 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
758 iwl_legacy_write_prph(priv,
759 IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
760 (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
761 (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
762}
763
764/**
765 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
766 */
767static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
768 u16 txq_id)
769{
770 u32 tbl_dw_addr;
771 u32 tbl_dw;
772 u16 scd_q2ratid;
773
774 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
775
776 tbl_dw_addr = priv->scd_base_addr +
777 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
778
779 tbl_dw = iwl_legacy_read_targ_mem(priv, tbl_dw_addr);
780
781 if (txq_id & 0x1)
782 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
783 else
784 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
785
786 iwl_legacy_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
787
788 return 0;
789}
790
791/**
792 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
793 *
794 * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
795 * i.e. it must be one of the higher queues used for aggregation
796 */
797static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
798 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
799{
800 unsigned long flags;
801 u16 ra_tid;
802 int ret;
803
804 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
805 (IWL49_FIRST_AMPDU_QUEUE +
806 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
807 IWL_WARN(priv,
808 "queue number out of range: %d, must be %d to %d\n",
809 txq_id, IWL49_FIRST_AMPDU_QUEUE,
810 IWL49_FIRST_AMPDU_QUEUE +
811 priv->cfg->base_params->num_of_ampdu_queues - 1);
812 return -EINVAL;
813 }
814
815 ra_tid = BUILD_RAxTID(sta_id, tid);
816
817 /* Modify device's station table to Tx this TID */
818 ret = iwl4965_sta_tx_modify_enable_tid(priv, sta_id, tid);
819 if (ret)
820 return ret;
821
822 spin_lock_irqsave(&priv->lock, flags);
823
824 /* Stop this Tx queue before configuring it */
825 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
826
827 /* Map receiver-address / traffic-ID to this queue */
828 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
829
830 /* Set this queue as a chain-building queue */
831 iwl_legacy_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
832
833 /* Place first TFD at index corresponding to start sequence number.
834 * Assumes that ssn_idx is valid (!= 0xFFF) */
835 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
836 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
837 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
838
839 /* Set up Tx window size and frame limit for this queue */
840 iwl_legacy_write_targ_mem(priv,
841 priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
842 (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
843 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
844
845 iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
846 IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
847 (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
848 & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
849
850 iwl_legacy_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
851
852 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
853 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
854
855 spin_unlock_irqrestore(&priv->lock, flags);
856
857 return 0;
858}
859
860
861int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
862 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
863{
864 int sta_id;
865 int tx_fifo;
866 int txq_id;
867 int ret;
868 unsigned long flags;
869 struct iwl_tid_data *tid_data;
870
871 tx_fifo = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
872 if (unlikely(tx_fifo < 0))
873 return tx_fifo;
874
875 IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
876 __func__, sta->addr, tid);
877
878 sta_id = iwl_legacy_sta_id(sta);
879 if (sta_id == IWL_INVALID_STATION) {
880 IWL_ERR(priv, "Start AGG on invalid station\n");
881 return -ENXIO;
882 }
883 if (unlikely(tid >= MAX_TID_COUNT))
884 return -EINVAL;
885
886 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
887 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
888 return -ENXIO;
889 }
890
891 txq_id = iwl4965_txq_ctx_activate_free(priv);
892 if (txq_id == -1) {
893 IWL_ERR(priv, "No free aggregation queue available\n");
894 return -ENXIO;
895 }
896
897 spin_lock_irqsave(&priv->sta_lock, flags);
898 tid_data = &priv->stations[sta_id].tid[tid];
899 *ssn = SEQ_TO_SN(tid_data->seq_number);
900 tid_data->agg.txq_id = txq_id;
901 iwl_legacy_set_swq_id(&priv->txq[txq_id],
902 iwl4965_get_ac_from_tid(tid), txq_id);
903 spin_unlock_irqrestore(&priv->sta_lock, flags);
904
905 ret = iwl4965_txq_agg_enable(priv, txq_id, tx_fifo,
906 sta_id, tid, *ssn);
907 if (ret)
908 return ret;
909
910 spin_lock_irqsave(&priv->sta_lock, flags);
911 tid_data = &priv->stations[sta_id].tid[tid];
912 if (tid_data->tfds_in_queue == 0) {
913 IWL_DEBUG_HT(priv, "HW queue is empty\n");
914 tid_data->agg.state = IWL_AGG_ON;
915 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
916 } else {
917 IWL_DEBUG_HT(priv,
918 "HW queue is NOT empty: %d packets in HW queue\n",
919 tid_data->tfds_in_queue);
920 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
921 }
922 spin_unlock_irqrestore(&priv->sta_lock, flags);
923 return ret;
924}
925
926/**
927 * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
928 * priv->lock must be held by the caller
929 */
930static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
931 u16 ssn_idx, u8 tx_fifo)
932{
933 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
934 (IWL49_FIRST_AMPDU_QUEUE +
935 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
936 IWL_WARN(priv,
937 "queue number out of range: %d, must be %d to %d\n",
938 txq_id, IWL49_FIRST_AMPDU_QUEUE,
939 IWL49_FIRST_AMPDU_QUEUE +
940 priv->cfg->base_params->num_of_ampdu_queues - 1);
941 return -EINVAL;
942 }
943
944 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
945
946 iwl_legacy_clear_bits_prph(priv,
947 IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
948
949 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
950 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
951 /* supposes that ssn_idx is valid (!= 0xFFF) */
952 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
953
954 iwl_legacy_clear_bits_prph(priv,
955 IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
956 iwl_txq_ctx_deactivate(priv, txq_id);
957 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
958
959 return 0;
960}
961
962int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
963 struct ieee80211_sta *sta, u16 tid)
964{
965 int tx_fifo_id, txq_id, sta_id, ssn;
966 struct iwl_tid_data *tid_data;
967 int write_ptr, read_ptr;
968 unsigned long flags;
969
970 tx_fifo_id = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
971 if (unlikely(tx_fifo_id < 0))
972 return tx_fifo_id;
973
974 sta_id = iwl_legacy_sta_id(sta);
975
976 if (sta_id == IWL_INVALID_STATION) {
977 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
978 return -ENXIO;
979 }
980
981 spin_lock_irqsave(&priv->sta_lock, flags);
982
983 tid_data = &priv->stations[sta_id].tid[tid];
984 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
985 txq_id = tid_data->agg.txq_id;
986
987 switch (priv->stations[sta_id].tid[tid].agg.state) {
988 case IWL_EMPTYING_HW_QUEUE_ADDBA:
989 /*
990 * This can happen if the peer stops aggregation
991 * again before we've had a chance to drain the
992 * queue we selected previously, i.e. before the
993 * session was really started completely.
994 */
995 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
996 goto turn_off;
997 case IWL_AGG_ON:
998 break;
999 default:
1000 IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
1001 }
1002
1003 write_ptr = priv->txq[txq_id].q.write_ptr;
1004 read_ptr = priv->txq[txq_id].q.read_ptr;
1005
1006 /* The queue is not empty */
1007 if (write_ptr != read_ptr) {
1008 IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
1009 priv->stations[sta_id].tid[tid].agg.state =
1010 IWL_EMPTYING_HW_QUEUE_DELBA;
1011 spin_unlock_irqrestore(&priv->sta_lock, flags);
1012 return 0;
1013 }
1014
1015 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1016 turn_off:
1017 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1018
1019 /* do not restore/save irqs */
1020 spin_unlock(&priv->sta_lock);
1021 spin_lock(&priv->lock);
1022
1023 /*
1024 * the only reason this call can fail is queue number out of range,
1025 * which can happen if uCode is reloaded and all the station
1026 * information are lost. if it is outside the range, there is no need
1027 * to deactivate the uCode queue, just return "success" to allow
1028 * mac80211 to clean up it own data.
1029 */
1030 iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id);
1031 spin_unlock_irqrestore(&priv->lock, flags);
1032
1033 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1034
1035 return 0;
1036}
1037
1038int iwl4965_txq_check_empty(struct iwl_priv *priv,
1039 int sta_id, u8 tid, int txq_id)
1040{
1041 struct iwl_queue *q = &priv->txq[txq_id].q;
1042 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1043 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1044 struct iwl_rxon_context *ctx;
1045
1046 ctx = &priv->contexts[priv->stations[sta_id].ctxid];
1047
1048 lockdep_assert_held(&priv->sta_lock);
1049
1050 switch (priv->stations[sta_id].tid[tid].agg.state) {
1051 case IWL_EMPTYING_HW_QUEUE_DELBA:
1052 /* We are reclaiming the last packet of the */
1053 /* aggregated HW queue */
1054 if ((txq_id == tid_data->agg.txq_id) &&
1055 (q->read_ptr == q->write_ptr)) {
1056 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1057 int tx_fifo = iwl4965_get_fifo_from_tid(ctx, tid);
1058 IWL_DEBUG_HT(priv,
1059 "HW queue empty: continue DELBA flow\n");
1060 iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo);
1061 tid_data->agg.state = IWL_AGG_OFF;
1062 ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
1063 }
1064 break;
1065 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1066 /* We are reclaiming the last packet of the queue */
1067 if (tid_data->tfds_in_queue == 0) {
1068 IWL_DEBUG_HT(priv,
1069 "HW queue empty: continue ADDBA flow\n");
1070 tid_data->agg.state = IWL_AGG_ON;
1071 ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
1072 }
1073 break;
1074 }
1075
1076 return 0;
1077}
1078
1079static void iwl4965_non_agg_tx_status(struct iwl_priv *priv,
1080 struct iwl_rxon_context *ctx,
1081 const u8 *addr1)
1082{
1083 struct ieee80211_sta *sta;
1084 struct iwl_station_priv *sta_priv;
1085
1086 rcu_read_lock();
1087 sta = ieee80211_find_sta(ctx->vif, addr1);
1088 if (sta) {
1089 sta_priv = (void *)sta->drv_priv;
1090 /* avoid atomic ops if this isn't a client */
1091 if (sta_priv->client &&
1092 atomic_dec_return(&sta_priv->pending_frames) == 0)
1093 ieee80211_sta_block_awake(priv->hw, sta, false);
1094 }
1095 rcu_read_unlock();
1096}
1097
1098static void
1099iwl4965_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info,
1100 bool is_agg)
1101{
1102 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
1103
1104 if (!is_agg)
1105 iwl4965_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1);
1106
1107 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
1108}
1109
1110int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1111{
1112 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1113 struct iwl_queue *q = &txq->q;
1114 struct iwl_tx_info *tx_info;
1115 int nfreed = 0;
1116 struct ieee80211_hdr *hdr;
1117
1118 if ((index >= q->n_bd) || (iwl_legacy_queue_used(q, index) == 0)) {
1119 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
1120 "is out of range [0-%d] %d %d.\n", txq_id,
1121 index, q->n_bd, q->write_ptr, q->read_ptr);
1122 return 0;
1123 }
1124
1125 for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
1126 q->read_ptr != index;
1127 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1128
1129 tx_info = &txq->txb[txq->q.read_ptr];
1130 iwl4965_tx_status(priv, tx_info,
1131 txq_id >= IWL4965_FIRST_AMPDU_QUEUE);
1132
1133 hdr = (struct ieee80211_hdr *)tx_info->skb->data;
1134 if (hdr && ieee80211_is_data_qos(hdr->frame_control))
1135 nfreed++;
1136 tx_info->skb = NULL;
1137
1138 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
1139 }
1140 return nfreed;
1141}
1142
1143/**
1144 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
1145 *
1146 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
1147 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
1148 */
1149static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1150 struct iwl_ht_agg *agg,
1151 struct iwl_compressed_ba_resp *ba_resp)
1152
1153{
1154 int i, sh, ack;
1155 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
1156 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1157 int successes = 0;
1158 struct ieee80211_tx_info *info;
1159 u64 bitmap, sent_bitmap;
1160
1161 if (unlikely(!agg->wait_for_ba)) {
1162 if (unlikely(ba_resp->bitmap))
1163 IWL_ERR(priv, "Received BA when not expected\n");
1164 return -EINVAL;
1165 }
1166
1167 /* Mark that the expected block-ack response arrived */
1168 agg->wait_for_ba = 0;
1169 IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx,
1170 ba_resp->seq_ctl);
1171
1172 /* Calculate shift to align block-ack bits with our Tx window bits */
1173 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
1174 if (sh < 0) /* tbw something is wrong with indices */
1175 sh += 0x100;
1176
1177 if (agg->frame_count > (64 - sh)) {
1178 IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
1179 return -1;
1180 }
1181
1182 /* don't use 64-bit values for now */
1183 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1184
1185 /* check for success or failure according to the
1186 * transmitted bitmap and block-ack bitmap */
1187 sent_bitmap = bitmap & agg->bitmap;
1188
1189 /* For each frame attempted in aggregation,
1190 * update driver's record of tx frame's status. */
1191 i = 0;
1192 while (sent_bitmap) {
1193 ack = sent_bitmap & 1ULL;
1194 successes += ack;
1195 IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
1196 ack ? "ACK" : "NACK", i,
1197 (agg->start_idx + i) & 0xff,
1198 agg->start_idx + i);
1199 sent_bitmap >>= 1;
1200 ++i;
1201 }
1202
1203 IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n",
1204 (unsigned long long)bitmap);
1205
1206 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
1207 memset(&info->status, 0, sizeof(info->status));
1208 info->flags |= IEEE80211_TX_STAT_ACK;
1209 info->flags |= IEEE80211_TX_STAT_AMPDU;
1210 info->status.ampdu_ack_len = successes;
1211 info->status.ampdu_len = agg->frame_count;
1212 iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1213
1214 return 0;
1215}
1216
1217/**
1218 * translate ucode response to mac80211 tx status control values
1219 */
1220void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
1221 struct ieee80211_tx_info *info)
1222{
1223 struct ieee80211_tx_rate *r = &info->control.rates[0];
1224
1225 info->antenna_sel_tx =
1226 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
1227 if (rate_n_flags & RATE_MCS_HT_MSK)
1228 r->flags |= IEEE80211_TX_RC_MCS;
1229 if (rate_n_flags & RATE_MCS_GF_MSK)
1230 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
1231 if (rate_n_flags & RATE_MCS_HT40_MSK)
1232 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
1233 if (rate_n_flags & RATE_MCS_DUP_MSK)
1234 r->flags |= IEEE80211_TX_RC_DUP_DATA;
1235 if (rate_n_flags & RATE_MCS_SGI_MSK)
1236 r->flags |= IEEE80211_TX_RC_SHORT_GI;
1237 r->idx = iwl4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
1238}
1239
1240/**
1241 * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1242 *
1243 * Handles block-acknowledge notification from device, which reports success
1244 * of frames sent via aggregation.
1245 */
1246void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
1247 struct iwl_rx_mem_buffer *rxb)
1248{
1249 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1250 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
1251 struct iwl_tx_queue *txq = NULL;
1252 struct iwl_ht_agg *agg;
1253 int index;
1254 int sta_id;
1255 int tid;
1256 unsigned long flags;
1257
1258 /* "flow" corresponds to Tx queue */
1259 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1260
1261 /* "ssn" is start of block-ack Tx window, corresponds to index
1262 * (in Tx queue's circular buffer) of first TFD/frame in window */
1263 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1264
1265 if (scd_flow >= priv->hw_params.max_txq_num) {
1266 IWL_ERR(priv,
1267 "BUG_ON scd_flow is bigger than number of queues\n");
1268 return;
1269 }
1270
1271 txq = &priv->txq[scd_flow];
1272 sta_id = ba_resp->sta_id;
1273 tid = ba_resp->tid;
1274 agg = &priv->stations[sta_id].tid[tid].agg;
1275 if (unlikely(agg->txq_id != scd_flow)) {
1276 /*
1277 * FIXME: this is a uCode bug which need to be addressed,
1278 * log the information and return for now!
1279 * since it is possible happen very often and in order
1280 * not to fill the syslog, don't enable the logging by default
1281 */
1282 IWL_DEBUG_TX_REPLY(priv,
1283 "BA scd_flow %d does not match txq_id %d\n",
1284 scd_flow, agg->txq_id);
1285 return;
1286 }
1287
1288 /* Find index just before block-ack window */
1289 index = iwl_legacy_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
1290
1291 spin_lock_irqsave(&priv->sta_lock, flags);
1292
1293 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1294 "sta_id = %d\n",
1295 agg->wait_for_ba,
1296 (u8 *) &ba_resp->sta_addr_lo32,
1297 ba_resp->sta_id);
1298 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx,"
1299 "scd_flow = "
1300 "%d, scd_ssn = %d\n",
1301 ba_resp->tid,
1302 ba_resp->seq_ctl,
1303 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1304 ba_resp->scd_flow,
1305 ba_resp->scd_ssn);
1306 IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
1307 agg->start_idx,
1308 (unsigned long long)agg->bitmap);
1309
1310 /* Update driver's record of ACK vs. not for each frame in window */
1311 iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
1312
1313 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1314 * block-ack window (we assume that they've been successfully
1315 * transmitted ... if not, it's too late anyway). */
1316 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
1317 /* calculate mac80211 ampdu sw queue to wake */
1318 int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
1319 iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
1320
1321 if ((iwl_legacy_queue_space(&txq->q) > txq->q.low_mark) &&
1322 priv->mac80211_registered &&
1323 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1324 iwl_legacy_wake_queue(priv, txq);
1325
1326 iwl4965_txq_check_empty(priv, sta_id, tid, scd_flow);
1327 }
1328
1329 spin_unlock_irqrestore(&priv->sta_lock, flags);
1330}
1331
1332#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1333const char *iwl4965_get_tx_fail_reason(u32 status)
1334{
1335#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
1336#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
1337
1338 switch (status & TX_STATUS_MSK) {
1339 case TX_STATUS_SUCCESS:
1340 return "SUCCESS";
1341 TX_STATUS_POSTPONE(DELAY);
1342 TX_STATUS_POSTPONE(FEW_BYTES);
1343 TX_STATUS_POSTPONE(QUIET_PERIOD);
1344 TX_STATUS_POSTPONE(CALC_TTAK);
1345 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
1346 TX_STATUS_FAIL(SHORT_LIMIT);
1347 TX_STATUS_FAIL(LONG_LIMIT);
1348 TX_STATUS_FAIL(FIFO_UNDERRUN);
1349 TX_STATUS_FAIL(DRAIN_FLOW);
1350 TX_STATUS_FAIL(RFKILL_FLUSH);
1351 TX_STATUS_FAIL(LIFE_EXPIRE);
1352 TX_STATUS_FAIL(DEST_PS);
1353 TX_STATUS_FAIL(HOST_ABORTED);
1354 TX_STATUS_FAIL(BT_RETRY);
1355 TX_STATUS_FAIL(STA_INVALID);
1356 TX_STATUS_FAIL(FRAG_DROPPED);
1357 TX_STATUS_FAIL(TID_DISABLE);
1358 TX_STATUS_FAIL(FIFO_FLUSHED);
1359 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
1360 TX_STATUS_FAIL(PASSIVE_NO_RX);
1361 TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
1362 }
1363
1364 return "UNKNOWN";
1365
1366#undef TX_STATUS_FAIL
1367#undef TX_STATUS_POSTPONE
1368}
1369#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c b/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
new file mode 100644
index 000000000000..001d148feb94
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
@@ -0,0 +1,166 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39#include "iwl-4965-hw.h"
40#include "iwl-4965.h"
41#include "iwl-4965-calib.h"
42
43#define IWL_AC_UNSET -1
44
45/**
46 * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
47 * using sample data 100 bytes apart. If these sample points are good,
48 * it's a pretty good bet that everything between them is good, too.
49 */
50static int
51iwl4965_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
52{
53 u32 val;
54 int ret = 0;
55 u32 errcnt = 0;
56 u32 i;
57
58 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
59
60 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
61 /* read data comes through single port, auto-incr addr */
62 /* NOTE: Use the debugless read so we don't flood kernel log
63 * if IWL_DL_IO is set */
64 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
65 i + IWL4965_RTC_INST_LOWER_BOUND);
66 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
67 if (val != le32_to_cpu(*image)) {
68 ret = -EIO;
69 errcnt++;
70 if (errcnt >= 3)
71 break;
72 }
73 }
74
75 return ret;
76}
77
78/**
79 * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host,
80 * looking at all data.
81 */
82static int iwl4965_verify_inst_full(struct iwl_priv *priv, __le32 *image,
83 u32 len)
84{
85 u32 val;
86 u32 save_len = len;
87 int ret = 0;
88 u32 errcnt;
89
90 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
91
92 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
93 IWL4965_RTC_INST_LOWER_BOUND);
94
95 errcnt = 0;
96 for (; len > 0; len -= sizeof(u32), image++) {
97 /* read data comes through single port, auto-incr addr */
98 /* NOTE: Use the debugless read so we don't flood kernel log
99 * if IWL_DL_IO is set */
100 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
101 if (val != le32_to_cpu(*image)) {
102 IWL_ERR(priv, "uCode INST section is invalid at "
103 "offset 0x%x, is 0x%x, s/b 0x%x\n",
104 save_len - len, val, le32_to_cpu(*image));
105 ret = -EIO;
106 errcnt++;
107 if (errcnt >= 20)
108 break;
109 }
110 }
111
112 if (!errcnt)
113 IWL_DEBUG_INFO(priv,
114 "ucode image in INSTRUCTION memory is good\n");
115
116 return ret;
117}
118
119/**
120 * iwl4965_verify_ucode - determine which instruction image is in SRAM,
121 * and verify its contents
122 */
123int iwl4965_verify_ucode(struct iwl_priv *priv)
124{
125 __le32 *image;
126 u32 len;
127 int ret;
128
129 /* Try bootstrap */
130 image = (__le32 *)priv->ucode_boot.v_addr;
131 len = priv->ucode_boot.len;
132 ret = iwl4965_verify_inst_sparse(priv, image, len);
133 if (!ret) {
134 IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
135 return 0;
136 }
137
138 /* Try initialize */
139 image = (__le32 *)priv->ucode_init.v_addr;
140 len = priv->ucode_init.len;
141 ret = iwl4965_verify_inst_sparse(priv, image, len);
142 if (!ret) {
143 IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
144 return 0;
145 }
146
147 /* Try runtime/protocol */
148 image = (__le32 *)priv->ucode_code.v_addr;
149 len = priv->ucode_code.len;
150 ret = iwl4965_verify_inst_sparse(priv, image, len);
151 if (!ret) {
152 IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
153 return 0;
154 }
155
156 IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
157
158 /* Since nothing seems to match, show first several data entries in
159 * instruction SRAM, so maybe visual inspection will give a clue.
160 * Selection of bootstrap image (vs. other images) is arbitrary. */
161 image = (__le32 *)priv->ucode_boot.v_addr;
162 len = priv->ucode_boot.len;
163 ret = iwl4965_verify_inst_full(priv, image, len);
164
165 return ret;
166}
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c
index 91a9f5253469..f5433c74b845 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -43,12 +43,11 @@
43#include "iwl-core.h" 43#include "iwl-core.h"
44#include "iwl-io.h" 44#include "iwl-io.h"
45#include "iwl-helpers.h" 45#include "iwl-helpers.h"
46#include "iwl-agn-calib.h" 46#include "iwl-4965-calib.h"
47#include "iwl-sta.h" 47#include "iwl-sta.h"
48#include "iwl-agn-led.h" 48#include "iwl-4965-led.h"
49#include "iwl-agn.h" 49#include "iwl-4965.h"
50#include "iwl-agn-debugfs.h" 50#include "iwl-4965-debugfs.h"
51#include "iwl-legacy.h"
52 51
53static int iwl4965_send_tx_power(struct iwl_priv *priv); 52static int iwl4965_send_tx_power(struct iwl_priv *priv);
54static int iwl4965_hw_get_temperature(struct iwl_priv *priv); 53static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
@@ -74,11 +73,11 @@ static int iwl4965_verify_bsm(struct iwl_priv *priv)
74 IWL_DEBUG_INFO(priv, "Begin verify bsm\n"); 73 IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
75 74
76 /* verify BSM SRAM contents */ 75 /* verify BSM SRAM contents */
77 val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG); 76 val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
78 for (reg = BSM_SRAM_LOWER_BOUND; 77 for (reg = BSM_SRAM_LOWER_BOUND;
79 reg < BSM_SRAM_LOWER_BOUND + len; 78 reg < BSM_SRAM_LOWER_BOUND + len;
80 reg += sizeof(u32), image++) { 79 reg += sizeof(u32), image++) {
81 val = iwl_read_prph(priv, reg); 80 val = iwl_legacy_read_prph(priv, reg);
82 if (val != le32_to_cpu(*image)) { 81 if (val != le32_to_cpu(*image)) {
83 IWL_ERR(priv, "BSM uCode verification failed at " 82 IWL_ERR(priv, "BSM uCode verification failed at "
84 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n", 83 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
@@ -158,33 +157,34 @@ static int iwl4965_load_bsm(struct iwl_priv *priv)
158 inst_len = priv->ucode_init.len; 157 inst_len = priv->ucode_init.len;
159 data_len = priv->ucode_init_data.len; 158 data_len = priv->ucode_init_data.len;
160 159
161 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); 160 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
162 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); 161 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
163 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); 162 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
164 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); 163 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
165 164
166 /* Fill BSM memory with bootstrap instructions */ 165 /* Fill BSM memory with bootstrap instructions */
167 for (reg_offset = BSM_SRAM_LOWER_BOUND; 166 for (reg_offset = BSM_SRAM_LOWER_BOUND;
168 reg_offset < BSM_SRAM_LOWER_BOUND + len; 167 reg_offset < BSM_SRAM_LOWER_BOUND + len;
169 reg_offset += sizeof(u32), image++) 168 reg_offset += sizeof(u32), image++)
170 _iwl_write_prph(priv, reg_offset, le32_to_cpu(*image)); 169 _iwl_legacy_write_prph(priv, reg_offset, le32_to_cpu(*image));
171 170
172 ret = iwl4965_verify_bsm(priv); 171 ret = iwl4965_verify_bsm(priv);
173 if (ret) 172 if (ret)
174 return ret; 173 return ret;
175 174
176 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ 175 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
177 iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0); 176 iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
178 iwl_write_prph(priv, BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND); 177 iwl_legacy_write_prph(priv,
179 iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); 178 BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND);
179 iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
180 180
181 /* Load bootstrap code into instruction SRAM now, 181 /* Load bootstrap code into instruction SRAM now,
182 * to prepare to load "initialize" uCode */ 182 * to prepare to load "initialize" uCode */
183 iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START); 183 iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
184 184
185 /* Wait for load of bootstrap uCode to finish */ 185 /* Wait for load of bootstrap uCode to finish */
186 for (i = 0; i < 100; i++) { 186 for (i = 0; i < 100; i++) {
187 done = iwl_read_prph(priv, BSM_WR_CTRL_REG); 187 done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
188 if (!(done & BSM_WR_CTRL_REG_BIT_START)) 188 if (!(done & BSM_WR_CTRL_REG_BIT_START))
189 break; 189 break;
190 udelay(10); 190 udelay(10);
@@ -198,7 +198,8 @@ static int iwl4965_load_bsm(struct iwl_priv *priv)
198 198
199 /* Enable future boot loads whenever power management unit triggers it 199 /* Enable future boot loads whenever power management unit triggers it
200 * (e.g. when powering back up after power-save shutdown) */ 200 * (e.g. when powering back up after power-save shutdown) */
201 iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN); 201 iwl_legacy_write_prph(priv,
202 BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
202 203
203 204
204 return 0; 205 return 0;
@@ -224,14 +225,14 @@ static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
224 pdata = priv->ucode_data_backup.p_addr >> 4; 225 pdata = priv->ucode_data_backup.p_addr >> 4;
225 226
226 /* Tell bootstrap uCode where to find image to load */ 227 /* Tell bootstrap uCode where to find image to load */
227 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); 228 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
228 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); 229 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
229 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, 230 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
230 priv->ucode_data.len); 231 priv->ucode_data.len);
231 232
232 /* Inst byte count must be last to set up, bit 31 signals uCode 233 /* Inst byte count must be last to set up, bit 31 signals uCode
233 * that all new ptr/size info is in place */ 234 * that all new ptr/size info is in place */
234 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, 235 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
235 priv->ucode_code.len | BSM_DRAM_INST_LOAD); 236 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
236 IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n"); 237 IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
237 238
@@ -251,18 +252,10 @@ static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
251*/ 252*/
252static void iwl4965_init_alive_start(struct iwl_priv *priv) 253static void iwl4965_init_alive_start(struct iwl_priv *priv)
253{ 254{
254 /* Check alive response for "valid" sign from uCode */
255 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
256 /* We had an error bringing up the hardware, so take it
257 * all the way back down so we can try again */
258 IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
259 goto restart;
260 }
261
262 /* Bootstrap uCode has loaded initialize uCode ... verify inst image. 255 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
263 * This is a paranoid check, because we would not have gotten the 256 * This is a paranoid check, because we would not have gotten the
264 * "initialize" alive if code weren't properly loaded. */ 257 * "initialize" alive if code weren't properly loaded. */
265 if (iwl_verify_ucode(priv)) { 258 if (iwl4965_verify_ucode(priv)) {
266 /* Runtime instruction load was bad; 259 /* Runtime instruction load was bad;
267 * take it all the way back down so we can try again */ 260 * take it all the way back down so we can try again */
268 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n"); 261 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
@@ -288,7 +281,7 @@ restart:
288 queue_work(priv->workqueue, &priv->restart); 281 queue_work(priv->workqueue, &priv->restart);
289} 282}
290 283
291static bool is_ht40_channel(__le32 rxon_flags) 284static bool iw4965_is_ht40_channel(__le32 rxon_flags)
292{ 285{
293 int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK) 286 int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK)
294 >> RXON_FLG_CHANNEL_MODE_POS; 287 >> RXON_FLG_CHANNEL_MODE_POS;
@@ -296,23 +289,6 @@ static bool is_ht40_channel(__le32 rxon_flags)
296 (chan_mod == CHANNEL_MODE_MIXED)); 289 (chan_mod == CHANNEL_MODE_MIXED));
297} 290}
298 291
299/*
300 * EEPROM handlers
301 */
302static u16 iwl4965_eeprom_calib_version(struct iwl_priv *priv)
303{
304 return iwl_eeprom_query16(priv, EEPROM_4965_CALIB_VERSION_OFFSET);
305}
306
307/*
308 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
309 * must be called under priv->lock and mac access
310 */
311static void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
312{
313 iwl_write_prph(priv, IWL49_SCD_TXFACT, mask);
314}
315
316static void iwl4965_nic_config(struct iwl_priv *priv) 292static void iwl4965_nic_config(struct iwl_priv *priv)
317{ 293{
318 unsigned long flags; 294 unsigned long flags;
@@ -320,22 +296,23 @@ static void iwl4965_nic_config(struct iwl_priv *priv)
320 296
321 spin_lock_irqsave(&priv->lock, flags); 297 spin_lock_irqsave(&priv->lock, flags);
322 298
323 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); 299 radio_cfg = iwl_legacy_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
324 300
325 /* write radio config values to register */ 301 /* write radio config values to register */
326 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX) 302 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
327 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 303 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
328 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) | 304 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
329 EEPROM_RF_CFG_STEP_MSK(radio_cfg) | 305 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
330 EEPROM_RF_CFG_DASH_MSK(radio_cfg)); 306 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
331 307
332 /* set CSR_HW_CONFIG_REG for uCode use */ 308 /* set CSR_HW_CONFIG_REG for uCode use */
333 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 309 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
334 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | 310 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
335 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); 311 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
336 312
337 priv->calib_info = (struct iwl_eeprom_calib_info *) 313 priv->calib_info = (struct iwl_eeprom_calib_info *)
338 iwl_eeprom_query_addr(priv, EEPROM_4965_CALIB_TXPOWER_OFFSET); 314 iwl_legacy_eeprom_query_addr(priv,
315 EEPROM_4965_CALIB_TXPOWER_OFFSET);
339 316
340 spin_unlock_irqrestore(&priv->lock, flags); 317 spin_unlock_irqrestore(&priv->lock, flags);
341} 318}
@@ -348,7 +325,7 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
348 struct iwl_chain_noise_data *data = &(priv->chain_noise_data); 325 struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
349 326
350 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && 327 if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
351 iwl_is_any_associated(priv)) { 328 iwl_legacy_is_any_associated(priv)) {
352 struct iwl_calib_diff_gain_cmd cmd; 329 struct iwl_calib_diff_gain_cmd cmd;
353 330
354 /* clear data for chain noise calibration algorithm */ 331 /* clear data for chain noise calibration algorithm */
@@ -365,7 +342,7 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
365 cmd.diff_gain_a = 0; 342 cmd.diff_gain_a = 0;
366 cmd.diff_gain_b = 0; 343 cmd.diff_gain_b = 0;
367 cmd.diff_gain_c = 0; 344 cmd.diff_gain_c = 0;
368 if (iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, 345 if (iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
369 sizeof(cmd), &cmd)) 346 sizeof(cmd), &cmd))
370 IWL_ERR(priv, 347 IWL_ERR(priv,
371 "Could not send REPLY_PHY_CALIBRATION_CMD\n"); 348 "Could not send REPLY_PHY_CALIBRATION_CMD\n");
@@ -374,237 +351,6 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
374 } 351 }
375} 352}
376 353
377static void iwl4965_gain_computation(struct iwl_priv *priv,
378 u32 *average_noise,
379 u16 min_average_noise_antenna_i,
380 u32 min_average_noise,
381 u8 default_chain)
382{
383 int i, ret;
384 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
385
386 data->delta_gain_code[min_average_noise_antenna_i] = 0;
387
388 for (i = default_chain; i < NUM_RX_CHAINS; i++) {
389 s32 delta_g = 0;
390
391 if (!(data->disconn_array[i]) &&
392 (data->delta_gain_code[i] ==
393 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
394 delta_g = average_noise[i] - min_average_noise;
395 data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
396 data->delta_gain_code[i] =
397 min(data->delta_gain_code[i],
398 (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
399
400 data->delta_gain_code[i] =
401 (data->delta_gain_code[i] | (1 << 2));
402 } else {
403 data->delta_gain_code[i] = 0;
404 }
405 }
406 IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n",
407 data->delta_gain_code[0],
408 data->delta_gain_code[1],
409 data->delta_gain_code[2]);
410
411 /* Differential gain gets sent to uCode only once */
412 if (!data->radio_write) {
413 struct iwl_calib_diff_gain_cmd cmd;
414 data->radio_write = 1;
415
416 memset(&cmd, 0, sizeof(cmd));
417 cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
418 cmd.diff_gain_a = data->delta_gain_code[0];
419 cmd.diff_gain_b = data->delta_gain_code[1];
420 cmd.diff_gain_c = data->delta_gain_code[2];
421 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
422 sizeof(cmd), &cmd);
423 if (ret)
424 IWL_DEBUG_CALIB(priv, "fail sending cmd "
425 "REPLY_PHY_CALIBRATION_CMD\n");
426
427 /* TODO we might want recalculate
428 * rx_chain in rxon cmd */
429
430 /* Mark so we run this algo only once! */
431 data->state = IWL_CHAIN_NOISE_CALIBRATED;
432 }
433}
434
435static void iwl4965_bg_txpower_work(struct work_struct *work)
436{
437 struct iwl_priv *priv = container_of(work, struct iwl_priv,
438 txpower_work);
439
440 /* If a scan happened to start before we got here
441 * then just return; the statistics notification will
442 * kick off another scheduled work to compensate for
443 * any temperature delta we missed here. */
444 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
445 test_bit(STATUS_SCANNING, &priv->status))
446 return;
447
448 mutex_lock(&priv->mutex);
449
450 /* Regardless of if we are associated, we must reconfigure the
451 * TX power since frames can be sent on non-radar channels while
452 * not associated */
453 iwl4965_send_tx_power(priv);
454
455 /* Update last_temperature to keep is_calib_needed from running
456 * when it isn't needed... */
457 priv->last_temperature = priv->temperature;
458
459 mutex_unlock(&priv->mutex);
460}
461
462/*
463 * Acquire priv->lock before calling this function !
464 */
465static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
466{
467 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
468 (index & 0xff) | (txq_id << 8));
469 iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
470}
471
472/**
473 * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
474 * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
475 * @scd_retry: (1) Indicates queue will be used in aggregation mode
476 *
477 * NOTE: Acquire priv->lock before calling this function !
478 */
479static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
480 struct iwl_tx_queue *txq,
481 int tx_fifo_id, int scd_retry)
482{
483 int txq_id = txq->q.id;
484
485 /* Find out whether to activate Tx queue */
486 int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
487
488 /* Set up and activate */
489 iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
490 (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
491 (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
492 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
493 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
494 IWL49_SCD_QUEUE_STTS_REG_MSK);
495
496 txq->sched_retry = scd_retry;
497
498 IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
499 active ? "Activate" : "Deactivate",
500 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
501}
502
503static const s8 default_queue_to_tx_fifo[] = {
504 IWL_TX_FIFO_VO,
505 IWL_TX_FIFO_VI,
506 IWL_TX_FIFO_BE,
507 IWL_TX_FIFO_BK,
508 IWL49_CMD_FIFO_NUM,
509 IWL_TX_FIFO_UNUSED,
510 IWL_TX_FIFO_UNUSED,
511};
512
513static int iwl4965_alive_notify(struct iwl_priv *priv)
514{
515 u32 a;
516 unsigned long flags;
517 int i, chan;
518 u32 reg_val;
519
520 spin_lock_irqsave(&priv->lock, flags);
521
522 /* Clear 4965's internal Tx Scheduler data base */
523 priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR);
524 a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
525 for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
526 iwl_write_targ_mem(priv, a, 0);
527 for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
528 iwl_write_targ_mem(priv, a, 0);
529 for (; a < priv->scd_base_addr +
530 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
531 iwl_write_targ_mem(priv, a, 0);
532
533 /* Tel 4965 where to find Tx byte count tables */
534 iwl_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
535 priv->scd_bc_tbls.dma >> 10);
536
537 /* Enable DMA channel */
538 for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
539 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
540 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
541 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
542
543 /* Update FH chicken bits */
544 reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
545 iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
546 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
547
548 /* Disable chain mode for all queues */
549 iwl_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
550
551 /* Initialize each Tx queue (including the command queue) */
552 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
553
554 /* TFD circular buffer read/write indexes */
555 iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
556 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
557
558 /* Max Tx Window size for Scheduler-ACK mode */
559 iwl_write_targ_mem(priv, priv->scd_base_addr +
560 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
561 (SCD_WIN_SIZE <<
562 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
563 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
564
565 /* Frame limit */
566 iwl_write_targ_mem(priv, priv->scd_base_addr +
567 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
568 sizeof(u32),
569 (SCD_FRAME_LIMIT <<
570 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
571 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
572
573 }
574 iwl_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
575 (1 << priv->hw_params.max_txq_num) - 1);
576
577 /* Activate all Tx DMA/FIFO channels */
578 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 6));
579
580 iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
581
582 /* make sure all queue are not stopped */
583 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
584 for (i = 0; i < 4; i++)
585 atomic_set(&priv->queue_stop_count[i], 0);
586
587 /* reset to 0 to enable all the queue first */
588 priv->txq_ctx_active_msk = 0;
589 /* Map each Tx/cmd queue to its corresponding fifo */
590 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
591
592 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
593 int ac = default_queue_to_tx_fifo[i];
594
595 iwl_txq_ctx_activate(priv, i);
596
597 if (ac == IWL_TX_FIFO_UNUSED)
598 continue;
599
600 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
601 }
602
603 spin_unlock_irqrestore(&priv->lock, flags);
604
605 return 0;
606}
607
608static struct iwl_sensitivity_ranges iwl4965_sensitivity = { 354static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
609 .min_nrg_cck = 97, 355 .min_nrg_cck = 97,
610 .max_nrg_cck = 0, /* not used, set to 0 */ 356 .max_nrg_cck = 0, /* not used, set to 0 */
@@ -666,15 +412,15 @@ static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
666 412
667 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; 413 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
668 414
669 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); 415 priv->hw_params.tx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_tx_ant);
670 priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant); 416 priv->hw_params.rx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_rx_ant);
671 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant; 417 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
672 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant; 418 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
673 419
674 iwl4965_set_ct_threshold(priv); 420 iwl4965_set_ct_threshold(priv);
675 421
676 priv->hw_params.sens = &iwl4965_sensitivity; 422 priv->hw_params.sens = &iwl4965_sensitivity;
677 priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS; 423 priv->hw_params.beacon_time_tsf_bits = IWL4965_EXT_BEACON_TIME_POS;
678 424
679 return 0; 425 return 0;
680} 426}
@@ -1158,9 +904,9 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
1158 IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_ht40 %d\n", channel, band, 904 IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_ht40 %d\n", channel, band,
1159 is_ht40); 905 is_ht40);
1160 906
1161 ch_info = iwl_get_channel_info(priv, priv->band, channel); 907 ch_info = iwl_legacy_get_channel_info(priv, priv->band, channel);
1162 908
1163 if (!is_channel_valid(ch_info)) 909 if (!iwl_legacy_is_channel_valid(ch_info))
1164 return -EINVAL; 910 return -EINVAL;
1165 911
1166 /* get txatten group, used to select 1) thermal txpower adjustment 912 /* get txatten group, used to select 1) thermal txpower adjustment
@@ -1384,7 +1130,7 @@ static int iwl4965_send_tx_power(struct iwl_priv *priv)
1384 1130
1385 band = priv->band == IEEE80211_BAND_2GHZ; 1131 band = priv->band == IEEE80211_BAND_2GHZ;
1386 1132
1387 is_ht40 = is_ht40_channel(ctx->active.flags); 1133 is_ht40 = iw4965_is_ht40_channel(ctx->active.flags);
1388 1134
1389 if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) 1135 if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1390 ctrl_chan_high = 1; 1136 ctrl_chan_high = 1;
@@ -1398,7 +1144,8 @@ static int iwl4965_send_tx_power(struct iwl_priv *priv)
1398 if (ret) 1144 if (ret)
1399 goto out; 1145 goto out;
1400 1146
1401 ret = iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd); 1147 ret = iwl_legacy_send_cmd_pdu(priv,
1148 REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
1402 1149
1403out: 1150out:
1404 return ret; 1151 return ret;
@@ -1409,8 +1156,8 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
1409{ 1156{
1410 int ret = 0; 1157 int ret = 0;
1411 struct iwl4965_rxon_assoc_cmd rxon_assoc; 1158 struct iwl4965_rxon_assoc_cmd rxon_assoc;
1412 const struct iwl_rxon_cmd *rxon1 = &ctx->staging; 1159 const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
1413 const struct iwl_rxon_cmd *rxon2 = &ctx->active; 1160 const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
1414 1161
1415 if ((rxon1->flags == rxon2->flags) && 1162 if ((rxon1->flags == rxon2->flags) &&
1416 (rxon1->filter_flags == rxon2->filter_flags) && 1163 (rxon1->filter_flags == rxon2->filter_flags) &&
@@ -1436,7 +1183,7 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
1436 ctx->staging.ofdm_ht_dual_stream_basic_rates; 1183 ctx->staging.ofdm_ht_dual_stream_basic_rates;
1437 rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain; 1184 rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
1438 1185
1439 ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC, 1186 ret = iwl_legacy_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
1440 sizeof(rxon_assoc), &rxon_assoc, NULL); 1187 sizeof(rxon_assoc), &rxon_assoc, NULL);
1441 if (ret) 1188 if (ret)
1442 return ret; 1189 return ret;
@@ -1447,12 +1194,12 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
1447static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) 1194static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1448{ 1195{
1449 /* cast away the const for active_rxon in this function */ 1196 /* cast away the const for active_rxon in this function */
1450 struct iwl_rxon_cmd *active_rxon = (void *)&ctx->active; 1197 struct iwl_legacy_rxon_cmd *active_rxon = (void *)&ctx->active;
1451 int ret; 1198 int ret;
1452 bool new_assoc = 1199 bool new_assoc =
1453 !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK); 1200 !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
1454 1201
1455 if (!iwl_is_alive(priv)) 1202 if (!iwl_legacy_is_alive(priv))
1456 return -EBUSY; 1203 return -EBUSY;
1457 1204
1458 if (!ctx->is_active) 1205 if (!ctx->is_active)
@@ -1461,7 +1208,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
1461 /* always get timestamp with Rx frame */ 1208 /* always get timestamp with Rx frame */
1462 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK; 1209 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
1463 1210
1464 ret = iwl_check_rxon_cmd(priv, ctx); 1211 ret = iwl_legacy_check_rxon_cmd(priv, ctx);
1465 if (ret) { 1212 if (ret) {
1466 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n"); 1213 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
1467 return -EINVAL; 1214 return -EINVAL;
@@ -1475,21 +1222,21 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
1475 (priv->switch_rxon.channel != ctx->staging.channel)) { 1222 (priv->switch_rxon.channel != ctx->staging.channel)) {
1476 IWL_DEBUG_11H(priv, "abort channel switch on %d\n", 1223 IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
1477 le16_to_cpu(priv->switch_rxon.channel)); 1224 le16_to_cpu(priv->switch_rxon.channel));
1478 iwl_chswitch_done(priv, false); 1225 iwl_legacy_chswitch_done(priv, false);
1479 } 1226 }
1480 1227
1481 /* If we don't need to send a full RXON, we can use 1228 /* If we don't need to send a full RXON, we can use
1482 * iwl_rxon_assoc_cmd which is used to reconfigure filter 1229 * iwl_rxon_assoc_cmd which is used to reconfigure filter
1483 * and other flags for the current radio configuration. */ 1230 * and other flags for the current radio configuration. */
1484 if (!iwl_full_rxon_required(priv, ctx)) { 1231 if (!iwl_legacy_full_rxon_required(priv, ctx)) {
1485 ret = iwl_send_rxon_assoc(priv, ctx); 1232 ret = iwl_legacy_send_rxon_assoc(priv, ctx);
1486 if (ret) { 1233 if (ret) {
1487 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret); 1234 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
1488 return ret; 1235 return ret;
1489 } 1236 }
1490 1237
1491 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); 1238 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1492 iwl_print_rx_config_cmd(priv, ctx); 1239 iwl_legacy_print_rx_config_cmd(priv, ctx);
1493 return 0; 1240 return 0;
1494 } 1241 }
1495 1242
@@ -1497,12 +1244,12 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
1497 * an RXON_ASSOC and the new config wants the associated mask enabled, 1244 * an RXON_ASSOC and the new config wants the associated mask enabled,
1498 * we must clear the associated from the active configuration 1245 * we must clear the associated from the active configuration
1499 * before we apply the new config */ 1246 * before we apply the new config */
1500 if (iwl_is_associated_ctx(ctx) && new_assoc) { 1247 if (iwl_legacy_is_associated_ctx(ctx) && new_assoc) {
1501 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n"); 1248 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
1502 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 1249 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1503 1250
1504 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, 1251 ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
1505 sizeof(struct iwl_rxon_cmd), 1252 sizeof(struct iwl_legacy_rxon_cmd),
1506 active_rxon); 1253 active_rxon);
1507 1254
1508 /* If the mask clearing failed then we set 1255 /* If the mask clearing failed then we set
@@ -1512,9 +1259,9 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
1512 IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret); 1259 IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
1513 return ret; 1260 return ret;
1514 } 1261 }
1515 iwl_clear_ucode_stations(priv, ctx); 1262 iwl_legacy_clear_ucode_stations(priv, ctx);
1516 iwl_restore_stations(priv, ctx); 1263 iwl_legacy_restore_stations(priv, ctx);
1517 ret = iwl_restore_default_wep_keys(priv, ctx); 1264 ret = iwl4965_restore_default_wep_keys(priv, ctx);
1518 if (ret) { 1265 if (ret) {
1519 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret); 1266 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
1520 return ret; 1267 return ret;
@@ -1529,24 +1276,25 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
1529 le16_to_cpu(ctx->staging.channel), 1276 le16_to_cpu(ctx->staging.channel),
1530 ctx->staging.bssid_addr); 1277 ctx->staging.bssid_addr);
1531 1278
1532 iwl_set_rxon_hwcrypto(priv, ctx, !priv->cfg->mod_params->sw_crypto); 1279 iwl_legacy_set_rxon_hwcrypto(priv, ctx,
1280 !priv->cfg->mod_params->sw_crypto);
1533 1281
1534 /* Apply the new configuration 1282 /* Apply the new configuration
1535 * RXON unassoc clears the station table in uCode so restoration of 1283 * RXON unassoc clears the station table in uCode so restoration of
1536 * stations is needed after it (the RXON command) completes 1284 * stations is needed after it (the RXON command) completes
1537 */ 1285 */
1538 if (!new_assoc) { 1286 if (!new_assoc) {
1539 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, 1287 ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
1540 sizeof(struct iwl_rxon_cmd), &ctx->staging); 1288 sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
1541 if (ret) { 1289 if (ret) {
1542 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); 1290 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
1543 return ret; 1291 return ret;
1544 } 1292 }
1545 IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n"); 1293 IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
1546 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); 1294 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1547 iwl_clear_ucode_stations(priv, ctx); 1295 iwl_legacy_clear_ucode_stations(priv, ctx);
1548 iwl_restore_stations(priv, ctx); 1296 iwl_legacy_restore_stations(priv, ctx);
1549 ret = iwl_restore_default_wep_keys(priv, ctx); 1297 ret = iwl4965_restore_default_wep_keys(priv, ctx);
1550 if (ret) { 1298 if (ret) {
1551 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret); 1299 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
1552 return ret; 1300 return ret;
@@ -1557,21 +1305,21 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
1557 /* Apply the new configuration 1305 /* Apply the new configuration
1558 * RXON assoc doesn't clear the station table in uCode, 1306 * RXON assoc doesn't clear the station table in uCode,
1559 */ 1307 */
1560 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, 1308 ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
1561 sizeof(struct iwl_rxon_cmd), &ctx->staging); 1309 sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
1562 if (ret) { 1310 if (ret) {
1563 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); 1311 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
1564 return ret; 1312 return ret;
1565 } 1313 }
1566 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); 1314 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1567 } 1315 }
1568 iwl_print_rx_config_cmd(priv, ctx); 1316 iwl_legacy_print_rx_config_cmd(priv, ctx);
1569 1317
1570 iwl_init_sensitivity(priv); 1318 iwl4965_init_sensitivity(priv);
1571 1319
1572 /* If we issue a new RXON command which required a tune then we must 1320 /* If we issue a new RXON command which required a tune then we must
1573 * send a new TXPOWER command or we won't be able to Tx any frames */ 1321 * send a new TXPOWER command or we won't be able to Tx any frames */
1574 ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true); 1322 ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
1575 if (ret) { 1323 if (ret) {
1576 IWL_ERR(priv, "Error sending TX power (%d)\n", ret); 1324 IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
1577 return ret; 1325 return ret;
@@ -1598,7 +1346,7 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
1598 struct ieee80211_vif *vif = ctx->vif; 1346 struct ieee80211_vif *vif = ctx->vif;
1599 band = priv->band == IEEE80211_BAND_2GHZ; 1347 band = priv->band == IEEE80211_BAND_2GHZ;
1600 1348
1601 is_ht40 = is_ht40_channel(ctx->staging.flags); 1349 is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags);
1602 1350
1603 if (is_ht40 && 1351 if (is_ht40 &&
1604 (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) 1352 (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
@@ -1629,19 +1377,19 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
1629 else { 1377 else {
1630 switch_time_in_usec = 1378 switch_time_in_usec =
1631 vif->bss_conf.beacon_int * switch_count * TIME_UNIT; 1379 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
1632 ucode_switch_time = iwl_usecs_to_beacons(priv, 1380 ucode_switch_time = iwl_legacy_usecs_to_beacons(priv,
1633 switch_time_in_usec, 1381 switch_time_in_usec,
1634 beacon_interval); 1382 beacon_interval);
1635 cmd.switch_time = iwl_add_beacon_time(priv, 1383 cmd.switch_time = iwl_legacy_add_beacon_time(priv,
1636 priv->ucode_beacon_time, 1384 priv->ucode_beacon_time,
1637 ucode_switch_time, 1385 ucode_switch_time,
1638 beacon_interval); 1386 beacon_interval);
1639 } 1387 }
1640 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", 1388 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
1641 cmd.switch_time); 1389 cmd.switch_time);
1642 ch_info = iwl_get_channel_info(priv, priv->band, ch); 1390 ch_info = iwl_legacy_get_channel_info(priv, priv->band, ch);
1643 if (ch_info) 1391 if (ch_info)
1644 cmd.expect_beacon = is_channel_radar(ch_info); 1392 cmd.expect_beacon = iwl_legacy_is_channel_radar(ch_info);
1645 else { 1393 else {
1646 IWL_ERR(priv, "invalid channel switch from %u to %u\n", 1394 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
1647 ctx->active.channel, ch); 1395 ctx->active.channel, ch);
@@ -1658,7 +1406,8 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
1658 priv->switch_rxon.channel = cmd.channel; 1406 priv->switch_rxon.channel = cmd.channel;
1659 priv->switch_rxon.switch_in_progress = true; 1407 priv->switch_rxon.switch_in_progress = true;
1660 1408
1661 return iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd); 1409 return iwl_legacy_send_cmd_pdu(priv,
1410 REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
1662} 1411}
1663 1412
1664/** 1413/**
@@ -1700,7 +1449,7 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
1700 u32 R4; 1449 u32 R4;
1701 1450
1702 if (test_bit(STATUS_TEMPERATURE, &priv->status) && 1451 if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
1703 (priv->_agn.statistics.flag & 1452 (priv->_4965.statistics.flag &
1704 STATISTICS_REPLY_FLG_HT40_MODE_MSK)) { 1453 STATISTICS_REPLY_FLG_HT40_MODE_MSK)) {
1705 IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n"); 1454 IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n");
1706 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]); 1455 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
@@ -1725,7 +1474,7 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
1725 if (!test_bit(STATUS_TEMPERATURE, &priv->status)) 1474 if (!test_bit(STATUS_TEMPERATURE, &priv->status))
1726 vt = sign_extend32(R4, 23); 1475 vt = sign_extend32(R4, 23);
1727 else 1476 else
1728 vt = sign_extend32(le32_to_cpu(priv->_agn.statistics. 1477 vt = sign_extend32(le32_to_cpu(priv->_4965.statistics.
1729 general.common.temperature), 23); 1478 general.common.temperature), 23);
1730 1479
1731 IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt); 1480 IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
@@ -1810,7 +1559,6 @@ static void iwl4965_temperature_calib(struct iwl_priv *priv)
1810 } 1559 }
1811 1560
1812 priv->temperature = temp; 1561 priv->temperature = temp;
1813 iwl_tt_handler(priv);
1814 set_bit(STATUS_TEMPERATURE, &priv->status); 1562 set_bit(STATUS_TEMPERATURE, &priv->status);
1815 1563
1816 if (!priv->disable_tx_power_cal && 1564 if (!priv->disable_tx_power_cal &&
@@ -1819,152 +1567,6 @@ static void iwl4965_temperature_calib(struct iwl_priv *priv)
1819 queue_work(priv->workqueue, &priv->txpower_work); 1567 queue_work(priv->workqueue, &priv->txpower_work);
1820} 1568}
1821 1569
1822/**
1823 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
1824 */
1825static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
1826 u16 txq_id)
1827{
1828 /* Simply stop the queue, but don't change any configuration;
1829 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
1830 iwl_write_prph(priv,
1831 IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
1832 (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
1833 (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1834}
1835
1836/**
1837 * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
1838 * priv->lock must be held by the caller
1839 */
1840static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
1841 u16 ssn_idx, u8 tx_fifo)
1842{
1843 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
1844 (IWL49_FIRST_AMPDU_QUEUE +
1845 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
1846 IWL_WARN(priv,
1847 "queue number out of range: %d, must be %d to %d\n",
1848 txq_id, IWL49_FIRST_AMPDU_QUEUE,
1849 IWL49_FIRST_AMPDU_QUEUE +
1850 priv->cfg->base_params->num_of_ampdu_queues - 1);
1851 return -EINVAL;
1852 }
1853
1854 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
1855
1856 iwl_clear_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
1857
1858 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
1859 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
1860 /* supposes that ssn_idx is valid (!= 0xFFF) */
1861 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
1862
1863 iwl_clear_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
1864 iwl_txq_ctx_deactivate(priv, txq_id);
1865 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
1866
1867 return 0;
1868}
1869
1870/**
1871 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
1872 */
1873static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
1874 u16 txq_id)
1875{
1876 u32 tbl_dw_addr;
1877 u32 tbl_dw;
1878 u16 scd_q2ratid;
1879
1880 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
1881
1882 tbl_dw_addr = priv->scd_base_addr +
1883 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
1884
1885 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
1886
1887 if (txq_id & 0x1)
1888 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
1889 else
1890 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
1891
1892 iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
1893
1894 return 0;
1895}
1896
1897
1898/**
1899 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
1900 *
1901 * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
1902 * i.e. it must be one of the higher queues used for aggregation
1903 */
1904static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
1905 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
1906{
1907 unsigned long flags;
1908 u16 ra_tid;
1909 int ret;
1910
1911 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
1912 (IWL49_FIRST_AMPDU_QUEUE +
1913 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
1914 IWL_WARN(priv,
1915 "queue number out of range: %d, must be %d to %d\n",
1916 txq_id, IWL49_FIRST_AMPDU_QUEUE,
1917 IWL49_FIRST_AMPDU_QUEUE +
1918 priv->cfg->base_params->num_of_ampdu_queues - 1);
1919 return -EINVAL;
1920 }
1921
1922 ra_tid = BUILD_RAxTID(sta_id, tid);
1923
1924 /* Modify device's station table to Tx this TID */
1925 ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
1926 if (ret)
1927 return ret;
1928
1929 spin_lock_irqsave(&priv->lock, flags);
1930
1931 /* Stop this Tx queue before configuring it */
1932 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
1933
1934 /* Map receiver-address / traffic-ID to this queue */
1935 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
1936
1937 /* Set this queue as a chain-building queue */
1938 iwl_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
1939
1940 /* Place first TFD at index corresponding to start sequence number.
1941 * Assumes that ssn_idx is valid (!= 0xFFF) */
1942 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
1943 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
1944 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
1945
1946 /* Set up Tx window size and frame limit for this queue */
1947 iwl_write_targ_mem(priv,
1948 priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
1949 (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
1950 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
1951
1952 iwl_write_targ_mem(priv, priv->scd_base_addr +
1953 IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
1954 (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
1955 & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
1956
1957 iwl_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
1958
1959 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
1960 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
1961
1962 spin_unlock_irqrestore(&priv->lock, flags);
1963
1964 return 0;
1965}
1966
1967
1968static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len) 1570static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
1969{ 1571{
1970 switch (cmd_id) { 1572 switch (cmd_id) {
@@ -1975,7 +1577,8 @@ static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
1975 } 1577 }
1976} 1578}
1977 1579
1978static u16 iwl4965_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data) 1580static u16 iwl4965_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
1581 u8 *data)
1979{ 1582{
1980 struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data; 1583 struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
1981 addsta->mode = cmd->mode; 1584 addsta->mode = cmd->mode;
@@ -2028,16 +1631,14 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
2028 status = le16_to_cpu(frame_status[0].status); 1631 status = le16_to_cpu(frame_status[0].status);
2029 idx = start_idx; 1632 idx = start_idx;
2030 1633
2031 /* FIXME: code repetition */
2032 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n", 1634 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
2033 agg->frame_count, agg->start_idx, idx); 1635 agg->frame_count, agg->start_idx, idx);
2034 1636
2035 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb); 1637 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb);
2036 info->status.rates[0].count = tx_resp->failure_frame + 1; 1638 info->status.rates[0].count = tx_resp->failure_frame + 1;
2037 info->flags &= ~IEEE80211_TX_CTL_AMPDU; 1639 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
2038 info->flags |= iwl_tx_status_to_mac80211(status); 1640 info->flags |= iwl4965_tx_status_to_mac80211(status);
2039 iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info); 1641 iwl4965_hwrate_to_tx_control(priv, rate_n_flags, info);
2040 /* FIXME: code repetition end */
2041 1642
2042 IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n", 1643 IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
2043 status & 0xff, tx_resp->failure_frame); 1644 status & 0xff, tx_resp->failure_frame);
@@ -2064,7 +1665,7 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
2064 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n", 1665 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
2065 agg->frame_count, txq_id, idx); 1666 agg->frame_count, txq_id, idx);
2066 1667
2067 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx); 1668 hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, idx);
2068 if (!hdr) { 1669 if (!hdr) {
2069 IWL_ERR(priv, 1670 IWL_ERR(priv,
2070 "BUG_ON idx doesn't point to valid skb" 1671 "BUG_ON idx doesn't point to valid skb"
@@ -2115,15 +1716,14 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
2115 return 0; 1716 return 0;
2116} 1717}
2117 1718
2118static u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr) 1719static u8 iwl4965_find_station(struct iwl_priv *priv, const u8 *addr)
2119{ 1720{
2120 int i; 1721 int i;
2121 int start = 0; 1722 int start = 0;
2122 int ret = IWL_INVALID_STATION; 1723 int ret = IWL_INVALID_STATION;
2123 unsigned long flags; 1724 unsigned long flags;
2124 1725
2125 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) || 1726 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC))
2126 (priv->iw_mode == NL80211_IFTYPE_AP))
2127 start = IWL_STA_ID; 1727 start = IWL_STA_ID;
2128 1728
2129 if (is_broadcast_ether_addr(addr)) 1729 if (is_broadcast_ether_addr(addr))
@@ -2159,13 +1759,13 @@ static u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
2159 return ret; 1759 return ret;
2160} 1760}
2161 1761
2162static int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr) 1762static int iwl4965_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
2163{ 1763{
2164 if (priv->iw_mode == NL80211_IFTYPE_STATION) { 1764 if (priv->iw_mode == NL80211_IFTYPE_STATION) {
2165 return IWL_AP_ID; 1765 return IWL_AP_ID;
2166 } else { 1766 } else {
2167 u8 *da = ieee80211_get_DA(hdr); 1767 u8 *da = ieee80211_get_DA(hdr);
2168 return iwl_find_station(priv, da); 1768 return iwl4965_find_station(priv, da);
2169 } 1769 }
2170} 1770}
2171 1771
@@ -2190,7 +1790,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2190 u8 *qc = NULL; 1790 u8 *qc = NULL;
2191 unsigned long flags; 1791 unsigned long flags;
2192 1792
2193 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) { 1793 if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
2194 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d " 1794 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
2195 "is out of range [0-%d] %d %d\n", txq_id, 1795 "is out of range [0-%d] %d %d\n", txq_id,
2196 index, txq->q.n_bd, txq->q.write_ptr, 1796 index, txq->q.n_bd, txq->q.write_ptr,
@@ -2202,13 +1802,13 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2202 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb); 1802 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
2203 memset(&info->status, 0, sizeof(info->status)); 1803 memset(&info->status, 0, sizeof(info->status));
2204 1804
2205 hdr = iwl_tx_queue_get_hdr(priv, txq_id, index); 1805 hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, index);
2206 if (ieee80211_is_data_qos(hdr->frame_control)) { 1806 if (ieee80211_is_data_qos(hdr->frame_control)) {
2207 qc = ieee80211_get_qos_ctl(hdr); 1807 qc = ieee80211_get_qos_ctl(hdr);
2208 tid = qc[0] & 0xf; 1808 tid = qc[0] & 0xf;
2209 } 1809 }
2210 1810
2211 sta_id = iwl_get_ra_sta_id(priv, hdr); 1811 sta_id = iwl4965_get_ra_sta_id(priv, hdr);
2212 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) { 1812 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
2213 IWL_ERR(priv, "Station not known\n"); 1813 IWL_ERR(priv, "Station not known\n");
2214 return; 1814 return;
@@ -2225,114 +1825,89 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2225 iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index); 1825 iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
2226 1826
2227 /* check if BAR is needed */ 1827 /* check if BAR is needed */
2228 if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status)) 1828 if ((tx_resp->frame_count == 1) && !iwl4965_is_tx_success(status))
2229 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; 1829 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
2230 1830
2231 if (txq->q.read_ptr != (scd_ssn & 0xff)) { 1831 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
2232 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); 1832 index = iwl_legacy_queue_dec_wrap(scd_ssn & 0xff,
1833 txq->q.n_bd);
2233 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " 1834 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
2234 "%d index %d\n", scd_ssn , index); 1835 "%d index %d\n", scd_ssn , index);
2235 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index); 1836 freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
2236 if (qc) 1837 if (qc)
2237 iwl_free_tfds_in_queue(priv, sta_id, 1838 iwl4965_free_tfds_in_queue(priv, sta_id,
2238 tid, freed); 1839 tid, freed);
2239 1840
2240 if (priv->mac80211_registered && 1841 if (priv->mac80211_registered &&
2241 (iwl_queue_space(&txq->q) > txq->q.low_mark) && 1842 (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark)
2242 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) 1843 && (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
2243 iwl_wake_queue(priv, txq); 1844 iwl_legacy_wake_queue(priv, txq);
2244 } 1845 }
2245 } else { 1846 } else {
2246 info->status.rates[0].count = tx_resp->failure_frame + 1; 1847 info->status.rates[0].count = tx_resp->failure_frame + 1;
2247 info->flags |= iwl_tx_status_to_mac80211(status); 1848 info->flags |= iwl4965_tx_status_to_mac80211(status);
2248 iwlagn_hwrate_to_tx_control(priv, 1849 iwl4965_hwrate_to_tx_control(priv,
2249 le32_to_cpu(tx_resp->rate_n_flags), 1850 le32_to_cpu(tx_resp->rate_n_flags),
2250 info); 1851 info);
2251 1852
2252 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) " 1853 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) "
2253 "rate_n_flags 0x%x retries %d\n", 1854 "rate_n_flags 0x%x retries %d\n",
2254 txq_id, 1855 txq_id,
2255 iwl_get_tx_fail_reason(status), status, 1856 iwl4965_get_tx_fail_reason(status), status,
2256 le32_to_cpu(tx_resp->rate_n_flags), 1857 le32_to_cpu(tx_resp->rate_n_flags),
2257 tx_resp->failure_frame); 1858 tx_resp->failure_frame);
2258 1859
2259 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index); 1860 freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
2260 if (qc && likely(sta_id != IWL_INVALID_STATION)) 1861 if (qc && likely(sta_id != IWL_INVALID_STATION))
2261 iwl_free_tfds_in_queue(priv, sta_id, tid, freed); 1862 iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
2262 else if (sta_id == IWL_INVALID_STATION) 1863 else if (sta_id == IWL_INVALID_STATION)
2263 IWL_DEBUG_TX_REPLY(priv, "Station not known\n"); 1864 IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
2264 1865
2265 if (priv->mac80211_registered && 1866 if (priv->mac80211_registered &&
2266 (iwl_queue_space(&txq->q) > txq->q.low_mark)) 1867 (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark))
2267 iwl_wake_queue(priv, txq); 1868 iwl_legacy_wake_queue(priv, txq);
2268 } 1869 }
2269 if (qc && likely(sta_id != IWL_INVALID_STATION)) 1870 if (qc && likely(sta_id != IWL_INVALID_STATION))
2270 iwlagn_txq_check_empty(priv, sta_id, tid, txq_id); 1871 iwl4965_txq_check_empty(priv, sta_id, tid, txq_id);
2271 1872
2272 iwl_check_abort_status(priv, tx_resp->frame_count, status); 1873 iwl4965_check_abort_status(priv, tx_resp->frame_count, status);
2273 1874
2274 spin_unlock_irqrestore(&priv->sta_lock, flags); 1875 spin_unlock_irqrestore(&priv->sta_lock, flags);
2275} 1876}
2276 1877
2277static int iwl4965_calc_rssi(struct iwl_priv *priv, 1878static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
2278 struct iwl_rx_phy_res *rx_resp) 1879 struct iwl_rx_mem_buffer *rxb)
2279{ 1880{
2280 /* data from PHY/DSP regarding signal strength, etc., 1881 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2281 * contents are always there, not configurable by host. */ 1882 struct iwl4965_beacon_notif *beacon = (void *)pkt->u.raw;
2282 struct iwl4965_rx_non_cfg_phy *ncphy = 1883 u8 rate __maybe_unused =
2283 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf; 1884 iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
2284 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK) 1885
2285 >> IWL49_AGC_DB_POS; 1886 IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
2286 1887 "tsf:0x%.8x%.8x rate:%d\n",
2287 u32 valid_antennae = 1888 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
2288 (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK) 1889 beacon->beacon_notify_hdr.failure_frame,
2289 >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET; 1890 le32_to_cpu(beacon->ibss_mgr_status),
2290 u8 max_rssi = 0; 1891 le32_to_cpu(beacon->high_tsf),
2291 u32 i; 1892 le32_to_cpu(beacon->low_tsf), rate);
2292 1893
2293 /* Find max rssi among 3 possible receivers. 1894 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
2294 * These values are measured by the digital signal processor (DSP).
2295 * They should stay fairly constant even as the signal strength varies,
2296 * if the radio's automatic gain control (AGC) is working right.
2297 * AGC value (see below) will provide the "interesting" info. */
2298 for (i = 0; i < 3; i++)
2299 if (valid_antennae & (1 << i))
2300 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
2301
2302 IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
2303 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
2304 max_rssi, agc);
2305
2306 /* dBm = max_rssi dB - agc dB - constant.
2307 * Higher AGC (higher radio gain) means lower signal. */
2308 return max_rssi - agc - IWLAGN_RSSI_OFFSET;
2309} 1895}
2310 1896
2311
2312/* Set up 4965-specific Rx frame reply handlers */ 1897/* Set up 4965-specific Rx frame reply handlers */
2313static void iwl4965_rx_handler_setup(struct iwl_priv *priv) 1898static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
2314{ 1899{
2315 /* Legacy Rx frames */ 1900 /* Legacy Rx frames */
2316 priv->rx_handlers[REPLY_RX] = iwlagn_rx_reply_rx; 1901 priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx;
2317 /* Tx response */ 1902 /* Tx response */
2318 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx; 1903 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
2319} 1904 priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
2320
2321static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
2322{
2323 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
2324}
2325
2326static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
2327{
2328 cancel_work_sync(&priv->txpower_work);
2329} 1905}
2330 1906
2331static struct iwl_hcmd_ops iwl4965_hcmd = { 1907static struct iwl_hcmd_ops iwl4965_hcmd = {
2332 .rxon_assoc = iwl4965_send_rxon_assoc, 1908 .rxon_assoc = iwl4965_send_rxon_assoc,
2333 .commit_rxon = iwl4965_commit_rxon, 1909 .commit_rxon = iwl4965_commit_rxon,
2334 .set_rxon_chain = iwlagn_set_rxon_chain, 1910 .set_rxon_chain = iwl4965_set_rxon_chain,
2335 .send_bt_config = iwl_send_bt_config,
2336}; 1911};
2337 1912
2338static void iwl4965_post_scan(struct iwl_priv *priv) 1913static void iwl4965_post_scan(struct iwl_priv *priv)
@@ -2344,7 +1919,7 @@ static void iwl4965_post_scan(struct iwl_priv *priv)
2344 * performing the scan, fire one off if needed 1919 * performing the scan, fire one off if needed
2345 */ 1920 */
2346 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) 1921 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
2347 iwlcore_commit_rxon(priv, ctx); 1922 iwl_legacy_commit_rxon(priv, ctx);
2348} 1923}
2349 1924
2350static void iwl4965_post_associate(struct iwl_priv *priv) 1925static void iwl4965_post_associate(struct iwl_priv *priv)
@@ -2357,29 +1932,24 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2357 if (!vif || !priv->is_open) 1932 if (!vif || !priv->is_open)
2358 return; 1933 return;
2359 1934
2360 if (vif->type == NL80211_IFTYPE_AP) {
2361 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
2362 return;
2363 }
2364
2365 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 1935 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2366 return; 1936 return;
2367 1937
2368 iwl_scan_cancel_timeout(priv, 200); 1938 iwl_legacy_scan_cancel_timeout(priv, 200);
2369 1939
2370 conf = ieee80211_get_hw_conf(priv->hw); 1940 conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
2371 1941
2372 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 1942 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2373 iwlcore_commit_rxon(priv, ctx); 1943 iwl_legacy_commit_rxon(priv, ctx);
2374 1944
2375 ret = iwl_send_rxon_timing(priv, ctx); 1945 ret = iwl_legacy_send_rxon_timing(priv, ctx);
2376 if (ret) 1946 if (ret)
2377 IWL_WARN(priv, "RXON timing - " 1947 IWL_WARN(priv, "RXON timing - "
2378 "Attempting to continue.\n"); 1948 "Attempting to continue.\n");
2379 1949
2380 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; 1950 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2381 1951
2382 iwl_set_rxon_ht(priv, &priv->current_ht_config); 1952 iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
2383 1953
2384 if (priv->cfg->ops->hcmd->set_rxon_chain) 1954 if (priv->cfg->ops->hcmd->set_rxon_chain)
2385 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); 1955 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
@@ -2401,7 +1971,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2401 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 1971 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2402 } 1972 }
2403 1973
2404 iwlcore_commit_rxon(priv, ctx); 1974 iwl_legacy_commit_rxon(priv, ctx);
2405 1975
2406 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n", 1976 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
2407 vif->bss_conf.aid, ctx->active.bssid_addr); 1977 vif->bss_conf.aid, ctx->active.bssid_addr);
@@ -2410,7 +1980,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2410 case NL80211_IFTYPE_STATION: 1980 case NL80211_IFTYPE_STATION:
2411 break; 1981 break;
2412 case NL80211_IFTYPE_ADHOC: 1982 case NL80211_IFTYPE_ADHOC:
2413 iwlagn_send_beacon_cmd(priv); 1983 iwl4965_send_beacon_cmd(priv);
2414 break; 1984 break;
2415 default: 1985 default:
2416 IWL_ERR(priv, "%s Should not be called in %d mode\n", 1986 IWL_ERR(priv, "%s Should not be called in %d mode\n",
@@ -2422,10 +1992,10 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2422 * If chain noise has already been run, then we need to enable 1992 * If chain noise has already been run, then we need to enable
2423 * power management here */ 1993 * power management here */
2424 if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE) 1994 if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
2425 iwl_power_update_mode(priv, false); 1995 iwl_legacy_power_update_mode(priv, false);
2426 1996
2427 /* Enable Rx differential gain and sensitivity calibrations */ 1997 /* Enable Rx differential gain and sensitivity calibrations */
2428 iwl_chain_noise_reset(priv); 1998 iwl4965_chain_noise_reset(priv);
2429 priv->start_calib = 1; 1999 priv->start_calib = 1;
2430} 2000}
2431 2001
@@ -2441,14 +2011,14 @@ static void iwl4965_config_ap(struct iwl_priv *priv)
2441 return; 2011 return;
2442 2012
2443 /* The following should be done only at AP bring up */ 2013 /* The following should be done only at AP bring up */
2444 if (!iwl_is_associated_ctx(ctx)) { 2014 if (!iwl_legacy_is_associated_ctx(ctx)) {
2445 2015
2446 /* RXON - unassoc (to set timing command) */ 2016 /* RXON - unassoc (to set timing command) */
2447 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2017 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2448 iwlcore_commit_rxon(priv, ctx); 2018 iwl_legacy_commit_rxon(priv, ctx);
2449 2019
2450 /* RXON Timing */ 2020 /* RXON Timing */
2451 ret = iwl_send_rxon_timing(priv, ctx); 2021 ret = iwl_legacy_send_rxon_timing(priv, ctx);
2452 if (ret) 2022 if (ret)
2453 IWL_WARN(priv, "RXON timing failed - " 2023 IWL_WARN(priv, "RXON timing failed - "
2454 "Attempting to continue.\n"); 2024 "Attempting to continue.\n");
@@ -2456,7 +2026,7 @@ static void iwl4965_config_ap(struct iwl_priv *priv)
2456 /* AP has all antennas */ 2026 /* AP has all antennas */
2457 priv->chain_noise_data.active_chains = 2027 priv->chain_noise_data.active_chains =
2458 priv->hw_params.valid_rx_ant; 2028 priv->hw_params.valid_rx_ant;
2459 iwl_set_rxon_ht(priv, &priv->current_ht_config); 2029 iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
2460 if (priv->cfg->ops->hcmd->set_rxon_chain) 2030 if (priv->cfg->ops->hcmd->set_rxon_chain)
2461 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); 2031 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2462 2032
@@ -2478,51 +2048,37 @@ static void iwl4965_config_ap(struct iwl_priv *priv)
2478 ~RXON_FLG_SHORT_SLOT_MSK; 2048 ~RXON_FLG_SHORT_SLOT_MSK;
2479 } 2049 }
2480 /* need to send beacon cmd before committing assoc RXON! */ 2050 /* need to send beacon cmd before committing assoc RXON! */
2481 iwlagn_send_beacon_cmd(priv); 2051 iwl4965_send_beacon_cmd(priv);
2482 /* restore RXON assoc */ 2052 /* restore RXON assoc */
2483 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; 2053 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2484 iwlcore_commit_rxon(priv, ctx); 2054 iwl_legacy_commit_rxon(priv, ctx);
2485 } 2055 }
2486 iwlagn_send_beacon_cmd(priv); 2056 iwl4965_send_beacon_cmd(priv);
2487
2488 /* FIXME - we need to add code here to detect a totally new
2489 * configuration, reset the AP, unassoc, rxon timing, assoc,
2490 * clear sta table, add BCAST sta... */
2491} 2057}
2492 2058
2493static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = { 2059static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
2494 .get_hcmd_size = iwl4965_get_hcmd_size, 2060 .get_hcmd_size = iwl4965_get_hcmd_size,
2495 .build_addsta_hcmd = iwl4965_build_addsta_hcmd, 2061 .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
2496 .chain_noise_reset = iwl4965_chain_noise_reset, 2062 .request_scan = iwl4965_request_scan,
2497 .gain_computation = iwl4965_gain_computation,
2498 .tx_cmd_protection = iwl_legacy_tx_cmd_protection,
2499 .calc_rssi = iwl4965_calc_rssi,
2500 .request_scan = iwlagn_request_scan,
2501 .post_scan = iwl4965_post_scan, 2063 .post_scan = iwl4965_post_scan,
2502}; 2064};
2503 2065
2504static struct iwl_lib_ops iwl4965_lib = { 2066static struct iwl_lib_ops iwl4965_lib = {
2505 .set_hw_params = iwl4965_hw_set_hw_params, 2067 .set_hw_params = iwl4965_hw_set_hw_params,
2506 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl, 2068 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
2507 .txq_set_sched = iwl4965_txq_set_sched, 2069 .txq_attach_buf_to_tfd = iwl4965_hw_txq_attach_buf_to_tfd,
2508 .txq_agg_enable = iwl4965_txq_agg_enable, 2070 .txq_free_tfd = iwl4965_hw_txq_free_tfd,
2509 .txq_agg_disable = iwl4965_txq_agg_disable, 2071 .txq_init = iwl4965_hw_tx_queue_init,
2510 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
2511 .txq_free_tfd = iwl_hw_txq_free_tfd,
2512 .txq_init = iwl_hw_tx_queue_init,
2513 .rx_handler_setup = iwl4965_rx_handler_setup, 2072 .rx_handler_setup = iwl4965_rx_handler_setup,
2514 .setup_deferred_work = iwl4965_setup_deferred_work,
2515 .cancel_deferred_work = iwl4965_cancel_deferred_work,
2516 .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr, 2073 .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
2517 .alive_notify = iwl4965_alive_notify,
2518 .init_alive_start = iwl4965_init_alive_start, 2074 .init_alive_start = iwl4965_init_alive_start,
2519 .load_ucode = iwl4965_load_bsm, 2075 .load_ucode = iwl4965_load_bsm,
2520 .dump_nic_event_log = iwl_dump_nic_event_log, 2076 .dump_nic_event_log = iwl4965_dump_nic_event_log,
2521 .dump_nic_error_log = iwl_dump_nic_error_log, 2077 .dump_nic_error_log = iwl4965_dump_nic_error_log,
2522 .dump_fh = iwl_dump_fh, 2078 .dump_fh = iwl4965_dump_fh,
2523 .set_channel_switch = iwl4965_hw_channel_switch, 2079 .set_channel_switch = iwl4965_hw_channel_switch,
2524 .apm_ops = { 2080 .apm_ops = {
2525 .init = iwl_apm_init, 2081 .init = iwl_legacy_apm_init,
2526 .config = iwl4965_nic_config, 2082 .config = iwl4965_nic_config,
2527 }, 2083 },
2528 .eeprom_ops = { 2084 .eeprom_ops = {
@@ -2535,64 +2091,56 @@ static struct iwl_lib_ops iwl4965_lib = {
2535 EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS, 2091 EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
2536 EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS 2092 EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS
2537 }, 2093 },
2538 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, 2094 .acquire_semaphore = iwl4965_eeprom_acquire_semaphore,
2539 .release_semaphore = iwlcore_eeprom_release_semaphore, 2095 .release_semaphore = iwl4965_eeprom_release_semaphore,
2540 .calib_version = iwl4965_eeprom_calib_version,
2541 .query_addr = iwlcore_eeprom_query_addr,
2542 }, 2096 },
2543 .send_tx_power = iwl4965_send_tx_power, 2097 .send_tx_power = iwl4965_send_tx_power,
2544 .update_chain_flags = iwl_update_chain_flags, 2098 .update_chain_flags = iwl4965_update_chain_flags,
2545 .isr_ops = {
2546 .isr = iwl_isr_legacy,
2547 },
2548 .temp_ops = { 2099 .temp_ops = {
2549 .temperature = iwl4965_temperature_calib, 2100 .temperature = iwl4965_temperature_calib,
2550 }, 2101 },
2551 .debugfs_ops = { 2102 .debugfs_ops = {
2552 .rx_stats_read = iwl_ucode_rx_stats_read, 2103 .rx_stats_read = iwl4965_ucode_rx_stats_read,
2553 .tx_stats_read = iwl_ucode_tx_stats_read, 2104 .tx_stats_read = iwl4965_ucode_tx_stats_read,
2554 .general_stats_read = iwl_ucode_general_stats_read, 2105 .general_stats_read = iwl4965_ucode_general_stats_read,
2555 .bt_stats_read = iwl_ucode_bt_stats_read,
2556 .reply_tx_error = iwl_reply_tx_error_read,
2557 }, 2106 },
2558 .check_plcp_health = iwl_good_plcp_health, 2107 .check_plcp_health = iwl4965_good_plcp_health,
2559}; 2108};
2560 2109
2561static const struct iwl_legacy_ops iwl4965_legacy_ops = { 2110static const struct iwl_legacy_ops iwl4965_legacy_ops = {
2562 .post_associate = iwl4965_post_associate, 2111 .post_associate = iwl4965_post_associate,
2563 .config_ap = iwl4965_config_ap, 2112 .config_ap = iwl4965_config_ap,
2564 .manage_ibss_station = iwlagn_manage_ibss_station, 2113 .manage_ibss_station = iwl4965_manage_ibss_station,
2565 .update_bcast_stations = iwl_update_bcast_stations, 2114 .update_bcast_stations = iwl4965_update_bcast_stations,
2566}; 2115};
2567 2116
2568struct ieee80211_ops iwl4965_hw_ops = { 2117struct ieee80211_ops iwl4965_hw_ops = {
2569 .tx = iwlagn_mac_tx, 2118 .tx = iwl4965_mac_tx,
2570 .start = iwlagn_mac_start, 2119 .start = iwl4965_mac_start,
2571 .stop = iwlagn_mac_stop, 2120 .stop = iwl4965_mac_stop,
2572 .add_interface = iwl_mac_add_interface, 2121 .add_interface = iwl_legacy_mac_add_interface,
2573 .remove_interface = iwl_mac_remove_interface, 2122 .remove_interface = iwl_legacy_mac_remove_interface,
2574 .change_interface = iwl_mac_change_interface, 2123 .change_interface = iwl_legacy_mac_change_interface,
2575 .config = iwl_legacy_mac_config, 2124 .config = iwl_legacy_mac_config,
2576 .configure_filter = iwlagn_configure_filter, 2125 .configure_filter = iwl4965_configure_filter,
2577 .set_key = iwlagn_mac_set_key, 2126 .set_key = iwl4965_mac_set_key,
2578 .update_tkip_key = iwlagn_mac_update_tkip_key, 2127 .update_tkip_key = iwl4965_mac_update_tkip_key,
2579 .conf_tx = iwl_mac_conf_tx, 2128 .conf_tx = iwl_legacy_mac_conf_tx,
2580 .reset_tsf = iwl_legacy_mac_reset_tsf, 2129 .reset_tsf = iwl_legacy_mac_reset_tsf,
2581 .bss_info_changed = iwl_legacy_mac_bss_info_changed, 2130 .bss_info_changed = iwl_legacy_mac_bss_info_changed,
2582 .ampdu_action = iwlagn_mac_ampdu_action, 2131 .ampdu_action = iwl4965_mac_ampdu_action,
2583 .hw_scan = iwl_mac_hw_scan, 2132 .hw_scan = iwl_legacy_mac_hw_scan,
2584 .sta_add = iwlagn_mac_sta_add, 2133 .sta_add = iwl4965_mac_sta_add,
2585 .sta_remove = iwl_mac_sta_remove, 2134 .sta_remove = iwl_legacy_mac_sta_remove,
2586 .channel_switch = iwlagn_mac_channel_switch, 2135 .channel_switch = iwl4965_mac_channel_switch,
2587 .flush = iwlagn_mac_flush, 2136 .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
2588 .tx_last_beacon = iwl_mac_tx_last_beacon,
2589}; 2137};
2590 2138
2591static const struct iwl_ops iwl4965_ops = { 2139static const struct iwl_ops iwl4965_ops = {
2592 .lib = &iwl4965_lib, 2140 .lib = &iwl4965_lib,
2593 .hcmd = &iwl4965_hcmd, 2141 .hcmd = &iwl4965_hcmd,
2594 .utils = &iwl4965_hcmd_utils, 2142 .utils = &iwl4965_hcmd_utils,
2595 .led = &iwlagn_led_ops, 2143 .led = &iwl4965_led_ops,
2596 .legacy = &iwl4965_legacy_ops, 2144 .legacy = &iwl4965_legacy_ops,
2597 .ieee80211_ops = &iwl4965_hw_ops, 2145 .ieee80211_ops = &iwl4965_hw_ops,
2598}; 2146};
@@ -2604,22 +2152,18 @@ static struct iwl_base_params iwl4965_base_params = {
2604 .pll_cfg_val = 0, 2152 .pll_cfg_val = 0,
2605 .set_l0s = true, 2153 .set_l0s = true,
2606 .use_bsm = true, 2154 .use_bsm = true,
2607 .use_isr_legacy = true,
2608 .broken_powersave = true,
2609 .led_compensation = 61, 2155 .led_compensation = 61,
2610 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS, 2156 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
2611 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 2157 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
2612 .wd_timeout = IWL_DEF_WD_TIMEOUT, 2158 .wd_timeout = IWL_DEF_WD_TIMEOUT,
2613 .temperature_kelvin = true, 2159 .temperature_kelvin = true,
2614 .max_event_log_size = 512, 2160 .max_event_log_size = 512,
2615 .tx_power_by_driver = true,
2616 .ucode_tracing = true, 2161 .ucode_tracing = true,
2617 .sensitivity_calib_by_driver = true, 2162 .sensitivity_calib_by_driver = true,
2618 .chain_noise_calib_by_driver = true, 2163 .chain_noise_calib_by_driver = true,
2619 .no_agg_framecnt_info = true,
2620}; 2164};
2621 2165
2622struct iwl_cfg iwl4965_agn_cfg = { 2166struct iwl_cfg iwl4965_cfg = {
2623 .name = "Intel(R) Wireless WiFi Link 4965AGN", 2167 .name = "Intel(R) Wireless WiFi Link 4965AGN",
2624 .fw_name_pre = IWL4965_FW_PRE, 2168 .fw_name_pre = IWL4965_FW_PRE,
2625 .ucode_api_max = IWL4965_UCODE_API_MAX, 2169 .ucode_api_max = IWL4965_UCODE_API_MAX,
@@ -2630,7 +2174,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
2630 .eeprom_ver = EEPROM_4965_EEPROM_VERSION, 2174 .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
2631 .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION, 2175 .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
2632 .ops = &iwl4965_ops, 2176 .ops = &iwl4965_ops,
2633 .mod_params = &iwlagn_mod_params, 2177 .mod_params = &iwl4965_mod_params,
2634 .base_params = &iwl4965_base_params, 2178 .base_params = &iwl4965_base_params,
2635 .led_mode = IWL_LED_BLINK, 2179 .led_mode = IWL_LED_BLINK,
2636 /* 2180 /*
@@ -2642,4 +2186,3 @@ struct iwl_cfg iwl4965_agn_cfg = {
2642 2186
2643/* Module firmware */ 2187/* Module firmware */
2644MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX)); 2188MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
2645
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.h b/drivers/net/wireless/iwlegacy/iwl-4965.h
new file mode 100644
index 000000000000..01f8163daf16
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965.h
@@ -0,0 +1,282 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_4965_h__
64#define __iwl_4965_h__
65
66#include "iwl-dev.h"
67
68/* configuration for the _4965 devices */
69extern struct iwl_cfg iwl4965_cfg;
70
71extern struct iwl_mod_params iwl4965_mod_params;
72
73extern struct ieee80211_ops iwl4965_hw_ops;
74
75/* tx queue */
76void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
77 int sta_id, int tid, int freed);
78
79/* RXON */
80void iwl4965_set_rxon_chain(struct iwl_priv *priv,
81 struct iwl_rxon_context *ctx);
82
83/* uCode */
84int iwl4965_verify_ucode(struct iwl_priv *priv);
85
86/* lib */
87void iwl4965_check_abort_status(struct iwl_priv *priv,
88 u8 frame_count, u32 status);
89
90void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
91int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
92int iwl4965_hw_nic_init(struct iwl_priv *priv);
93int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display);
94
95/* rx */
96void iwl4965_rx_queue_restock(struct iwl_priv *priv);
97void iwl4965_rx_replenish(struct iwl_priv *priv);
98void iwl4965_rx_replenish_now(struct iwl_priv *priv);
99void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
100int iwl4965_rxq_stop(struct iwl_priv *priv);
101int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
102void iwl4965_rx_reply_rx(struct iwl_priv *priv,
103 struct iwl_rx_mem_buffer *rxb);
104void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
105 struct iwl_rx_mem_buffer *rxb);
106void iwl4965_rx_handle(struct iwl_priv *priv);
107
108/* tx */
109void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
110int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
111 struct iwl_tx_queue *txq,
112 dma_addr_t addr, u16 len, u8 reset, u8 pad);
113int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
114 struct iwl_tx_queue *txq);
115void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
116 struct ieee80211_tx_info *info);
117int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
118int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
119 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
120int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
121 struct ieee80211_sta *sta, u16 tid);
122int iwl4965_txq_check_empty(struct iwl_priv *priv,
123 int sta_id, u8 tid, int txq_id);
124void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
125 struct iwl_rx_mem_buffer *rxb);
126int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
127void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv);
128int iwl4965_txq_ctx_alloc(struct iwl_priv *priv);
129void iwl4965_txq_ctx_reset(struct iwl_priv *priv);
130void iwl4965_txq_ctx_stop(struct iwl_priv *priv);
131void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask);
132
133/*
134 * Acquire priv->lock before calling this function !
135 */
136void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index);
137/**
138 * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
139 * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
140 * @scd_retry: (1) Indicates queue will be used in aggregation mode
141 *
142 * NOTE: Acquire priv->lock before calling this function !
143 */
144void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
145 struct iwl_tx_queue *txq,
146 int tx_fifo_id, int scd_retry);
147
148static inline u32 iwl4965_tx_status_to_mac80211(u32 status)
149{
150 status &= TX_STATUS_MSK;
151
152 switch (status) {
153 case TX_STATUS_SUCCESS:
154 case TX_STATUS_DIRECT_DONE:
155 return IEEE80211_TX_STAT_ACK;
156 case TX_STATUS_FAIL_DEST_PS:
157 return IEEE80211_TX_STAT_TX_FILTERED;
158 default:
159 return 0;
160 }
161}
162
163static inline bool iwl4965_is_tx_success(u32 status)
164{
165 status &= TX_STATUS_MSK;
166 return (status == TX_STATUS_SUCCESS) ||
167 (status == TX_STATUS_DIRECT_DONE);
168}
169
170u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
171
172/* rx */
173void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
174 struct iwl_rx_mem_buffer *rxb);
175bool iwl4965_good_plcp_health(struct iwl_priv *priv,
176 struct iwl_rx_packet *pkt);
177void iwl4965_rx_statistics(struct iwl_priv *priv,
178 struct iwl_rx_mem_buffer *rxb);
179void iwl4965_reply_statistics(struct iwl_priv *priv,
180 struct iwl_rx_mem_buffer *rxb);
181
182/* scan */
183int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
184
185/* station mgmt */
186int iwl4965_manage_ibss_station(struct iwl_priv *priv,
187 struct ieee80211_vif *vif, bool add);
188
189/* hcmd */
190int iwl4965_send_beacon_cmd(struct iwl_priv *priv);
191
192#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
193const char *iwl4965_get_tx_fail_reason(u32 status);
194#else
195static inline const char *
196iwl4965_get_tx_fail_reason(u32 status) { return ""; }
197#endif
198
199/* station management */
200int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
201 struct iwl_rxon_context *ctx);
202int iwl4965_add_bssid_station(struct iwl_priv *priv,
203 struct iwl_rxon_context *ctx,
204 const u8 *addr, u8 *sta_id_r);
205int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
206 struct iwl_rxon_context *ctx,
207 struct ieee80211_key_conf *key);
208int iwl4965_set_default_wep_key(struct iwl_priv *priv,
209 struct iwl_rxon_context *ctx,
210 struct ieee80211_key_conf *key);
211int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
212 struct iwl_rxon_context *ctx);
213int iwl4965_set_dynamic_key(struct iwl_priv *priv,
214 struct iwl_rxon_context *ctx,
215 struct ieee80211_key_conf *key, u8 sta_id);
216int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
217 struct iwl_rxon_context *ctx,
218 struct ieee80211_key_conf *key, u8 sta_id);
219void iwl4965_update_tkip_key(struct iwl_priv *priv,
220 struct iwl_rxon_context *ctx,
221 struct ieee80211_key_conf *keyconf,
222 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
223int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv,
224 int sta_id, int tid);
225int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
226 int tid, u16 ssn);
227int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
228 int tid);
229void iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv,
230 int sta_id, int cnt);
231int iwl4965_update_bcast_stations(struct iwl_priv *priv);
232
233/* rate */
234static inline u32 iwl4965_ant_idx_to_flags(u8 ant_idx)
235{
236 return BIT(ant_idx) << RATE_MCS_ANT_POS;
237}
238
239static inline u8 iwl4965_hw_get_rate(__le32 rate_n_flags)
240{
241 return le32_to_cpu(rate_n_flags) & 0xFF;
242}
243
244static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u32 flags)
245{
246 return cpu_to_le32(flags|(u32)rate);
247}
248
249/* eeprom */
250void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
251int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv);
252void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv);
253int iwl4965_eeprom_check_version(struct iwl_priv *priv);
254
255/* mac80211 handlers (for 4965) */
256void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
257int iwl4965_mac_start(struct ieee80211_hw *hw);
258void iwl4965_mac_stop(struct ieee80211_hw *hw);
259void iwl4965_configure_filter(struct ieee80211_hw *hw,
260 unsigned int changed_flags,
261 unsigned int *total_flags,
262 u64 multicast);
263int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
264 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
265 struct ieee80211_key_conf *key);
266void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
267 struct ieee80211_vif *vif,
268 struct ieee80211_key_conf *keyconf,
269 struct ieee80211_sta *sta,
270 u32 iv32, u16 *phase1key);
271int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
272 struct ieee80211_vif *vif,
273 enum ieee80211_ampdu_mlme_action action,
274 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
275 u8 buf_size);
276int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
277 struct ieee80211_vif *vif,
278 struct ieee80211_sta *sta);
279void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
280 struct ieee80211_channel_switch *ch_switch);
281
282#endif /* __iwl_4965_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-commands.h b/drivers/net/wireless/iwlegacy/iwl-commands.h
new file mode 100644
index 000000000000..17a1d504348e
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-commands.h
@@ -0,0 +1,3405 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63/*
64 * Please use this file (iwl-commands.h) only for uCode API definitions.
65 * Please use iwl-xxxx-hw.h for hardware-related definitions.
66 * Please use iwl-dev.h for driver implementation definitions.
67 */
68
69#ifndef __iwl_legacy_commands_h__
70#define __iwl_legacy_commands_h__
71
72struct iwl_priv;
73
74/* uCode version contains 4 values: Major/Minor/API/Serial */
75#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
76#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
77#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
78#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
79
80
81/* Tx rates */
82#define IWL_CCK_RATES 4
83#define IWL_OFDM_RATES 8
84#define IWL_MAX_RATES (IWL_CCK_RATES + IWL_OFDM_RATES)
85
86enum {
87 REPLY_ALIVE = 0x1,
88 REPLY_ERROR = 0x2,
89
90 /* RXON and QOS commands */
91 REPLY_RXON = 0x10,
92 REPLY_RXON_ASSOC = 0x11,
93 REPLY_QOS_PARAM = 0x13,
94 REPLY_RXON_TIMING = 0x14,
95
96 /* Multi-Station support */
97 REPLY_ADD_STA = 0x18,
98 REPLY_REMOVE_STA = 0x19,
99
100 /* Security */
101 REPLY_WEPKEY = 0x20,
102
103 /* RX, TX, LEDs */
104 REPLY_3945_RX = 0x1b, /* 3945 only */
105 REPLY_TX = 0x1c,
106 REPLY_RATE_SCALE = 0x47, /* 3945 only */
107 REPLY_LEDS_CMD = 0x48,
108 REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */
109
110 /* 802.11h related */
111 REPLY_CHANNEL_SWITCH = 0x72,
112 CHANNEL_SWITCH_NOTIFICATION = 0x73,
113 REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74,
114 SPECTRUM_MEASURE_NOTIFICATION = 0x75,
115
116 /* Power Management */
117 POWER_TABLE_CMD = 0x77,
118 PM_SLEEP_NOTIFICATION = 0x7A,
119 PM_DEBUG_STATISTIC_NOTIFIC = 0x7B,
120
121 /* Scan commands and notifications */
122 REPLY_SCAN_CMD = 0x80,
123 REPLY_SCAN_ABORT_CMD = 0x81,
124 SCAN_START_NOTIFICATION = 0x82,
125 SCAN_RESULTS_NOTIFICATION = 0x83,
126 SCAN_COMPLETE_NOTIFICATION = 0x84,
127
128 /* IBSS/AP commands */
129 BEACON_NOTIFICATION = 0x90,
130 REPLY_TX_BEACON = 0x91,
131
132 /* Miscellaneous commands */
133 REPLY_TX_PWR_TABLE_CMD = 0x97,
134
135 /* Bluetooth device coexistence config command */
136 REPLY_BT_CONFIG = 0x9b,
137
138 /* Statistics */
139 REPLY_STATISTICS_CMD = 0x9c,
140 STATISTICS_NOTIFICATION = 0x9d,
141
142 /* RF-KILL commands and notifications */
143 CARD_STATE_NOTIFICATION = 0xa1,
144
145 /* Missed beacons notification */
146 MISSED_BEACONS_NOTIFICATION = 0xa2,
147
148 REPLY_CT_KILL_CONFIG_CMD = 0xa4,
149 SENSITIVITY_CMD = 0xa8,
150 REPLY_PHY_CALIBRATION_CMD = 0xb0,
151 REPLY_RX_PHY_CMD = 0xc0,
152 REPLY_RX_MPDU_CMD = 0xc1,
153 REPLY_RX = 0xc3,
154 REPLY_COMPRESSED_BA = 0xc5,
155
156 REPLY_MAX = 0xff
157};
158
159/******************************************************************************
160 * (0)
161 * Commonly used structures and definitions:
162 * Command header, rate_n_flags, txpower
163 *
164 *****************************************************************************/
165
166/* iwl_cmd_header flags value */
167#define IWL_CMD_FAILED_MSK 0x40
168
169#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
170#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
171#define SEQ_TO_INDEX(s) ((s) & 0xff)
172#define INDEX_TO_SEQ(i) ((i) & 0xff)
173#define SEQ_HUGE_FRAME cpu_to_le16(0x4000)
174#define SEQ_RX_FRAME cpu_to_le16(0x8000)
175
176/**
177 * struct iwl_cmd_header
178 *
179 * This header format appears in the beginning of each command sent from the
180 * driver, and each response/notification received from uCode.
181 */
182struct iwl_cmd_header {
183 u8 cmd; /* Command ID: REPLY_RXON, etc. */
184 u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
185 /*
186 * The driver sets up the sequence number to values of its choosing.
187 * uCode does not use this value, but passes it back to the driver
188 * when sending the response to each driver-originated command, so
189 * the driver can match the response to the command. Since the values
190 * don't get used by uCode, the driver may set up an arbitrary format.
191 *
192 * There is one exception: uCode sets bit 15 when it originates
193 * the response/notification, i.e. when the response/notification
194 * is not a direct response to a command sent by the driver. For
195 * example, uCode issues REPLY_3945_RX when it sends a received frame
196 * to the driver; it is not a direct response to any driver command.
197 *
198 * The Linux driver uses the following format:
199 *
200 * 0:7 tfd index - position within TX queue
201 * 8:12 TX queue id
202 * 13 reserved
203 * 14 huge - driver sets this to indicate command is in the
204 * 'huge' storage at the end of the command buffers
205 * 15 unsolicited RX or uCode-originated notification
206 */
207 __le16 sequence;
208
209 /* command or response/notification data follows immediately */
210 u8 data[0];
211} __packed;
212
213
214/**
215 * struct iwl3945_tx_power
216 *
217 * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_SCAN_CMD, REPLY_CHANNEL_SWITCH
218 *
219 * Each entry contains two values:
220 * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
221 * linear value that multiplies the output of the digital signal processor,
222 * before being sent to the analog radio.
223 * 2) Radio gain. This sets the analog gain of the radio Tx path.
224 * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
225 *
226 * Driver obtains values from struct iwl3945_tx_power power_gain_table[][].
227 */
228struct iwl3945_tx_power {
229 u8 tx_gain; /* gain for analog radio */
230 u8 dsp_atten; /* gain for DSP */
231} __packed;
232
233/**
234 * struct iwl3945_power_per_rate
235 *
236 * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
237 */
238struct iwl3945_power_per_rate {
239 u8 rate; /* plcp */
240 struct iwl3945_tx_power tpc;
241 u8 reserved;
242} __packed;
243
244/**
245 * iwl4965 rate_n_flags bit fields
246 *
247 * rate_n_flags format is used in following iwl4965 commands:
248 * REPLY_RX (response only)
249 * REPLY_RX_MPDU (response only)
250 * REPLY_TX (both command and response)
251 * REPLY_TX_LINK_QUALITY_CMD
252 *
253 * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"):
254 * 2-0: 0) 6 Mbps
255 * 1) 12 Mbps
256 * 2) 18 Mbps
257 * 3) 24 Mbps
258 * 4) 36 Mbps
259 * 5) 48 Mbps
260 * 6) 54 Mbps
261 * 7) 60 Mbps
262 *
263 * 4-3: 0) Single stream (SISO)
264 * 1) Dual stream (MIMO)
265 * 2) Triple stream (MIMO)
266 *
267 * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data
268 *
269 * Legacy OFDM rate format for bits 7:0 (bit 8 must be "0", bit 9 "0"):
270 * 3-0: 0xD) 6 Mbps
271 * 0xF) 9 Mbps
272 * 0x5) 12 Mbps
273 * 0x7) 18 Mbps
274 * 0x9) 24 Mbps
275 * 0xB) 36 Mbps
276 * 0x1) 48 Mbps
277 * 0x3) 54 Mbps
278 *
279 * Legacy CCK rate format for bits 7:0 (bit 8 must be "0", bit 9 "1"):
280 * 6-0: 10) 1 Mbps
281 * 20) 2 Mbps
282 * 55) 5.5 Mbps
283 * 110) 11 Mbps
284 */
285#define RATE_MCS_CODE_MSK 0x7
286#define RATE_MCS_SPATIAL_POS 3
287#define RATE_MCS_SPATIAL_MSK 0x18
288#define RATE_MCS_HT_DUP_POS 5
289#define RATE_MCS_HT_DUP_MSK 0x20
290
291/* Bit 8: (1) HT format, (0) legacy format in bits 7:0 */
292#define RATE_MCS_FLAGS_POS 8
293#define RATE_MCS_HT_POS 8
294#define RATE_MCS_HT_MSK 0x100
295
296/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */
297#define RATE_MCS_CCK_POS 9
298#define RATE_MCS_CCK_MSK 0x200
299
300/* Bit 10: (1) Use Green Field preamble */
301#define RATE_MCS_GF_POS 10
302#define RATE_MCS_GF_MSK 0x400
303
304/* Bit 11: (1) Use 40Mhz HT40 chnl width, (0) use 20 MHz legacy chnl width */
305#define RATE_MCS_HT40_POS 11
306#define RATE_MCS_HT40_MSK 0x800
307
308/* Bit 12: (1) Duplicate data on both 20MHz chnls. HT40 (bit 11) must be set. */
309#define RATE_MCS_DUP_POS 12
310#define RATE_MCS_DUP_MSK 0x1000
311
312/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */
313#define RATE_MCS_SGI_POS 13
314#define RATE_MCS_SGI_MSK 0x2000
315
316/**
317 * rate_n_flags Tx antenna masks
318 * 4965 has 2 transmitters
319 * bit14:16
320 */
321#define RATE_MCS_ANT_POS 14
322#define RATE_MCS_ANT_A_MSK 0x04000
323#define RATE_MCS_ANT_B_MSK 0x08000
324#define RATE_MCS_ANT_C_MSK 0x10000
325#define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | RATE_MCS_ANT_B_MSK)
326#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK)
327#define RATE_ANT_NUM 3
328
329#define POWER_TABLE_NUM_ENTRIES 33
330#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32
331#define POWER_TABLE_CCK_ENTRY 32
332
333#define IWL_PWR_NUM_HT_OFDM_ENTRIES 24
334#define IWL_PWR_CCK_ENTRIES 2
335
336/**
337 * union iwl4965_tx_power_dual_stream
338 *
339 * Host format used for REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
340 * Use __le32 version (struct tx_power_dual_stream) when building command.
341 *
342 * Driver provides radio gain and DSP attenuation settings to device in pairs,
343 * one value for each transmitter chain. The first value is for transmitter A,
344 * second for transmitter B.
345 *
346 * For SISO bit rates, both values in a pair should be identical.
347 * For MIMO rates, one value may be different from the other,
348 * in order to balance the Tx output between the two transmitters.
349 *
350 * See more details in doc for TXPOWER in iwl-4965-hw.h.
351 */
352union iwl4965_tx_power_dual_stream {
353 struct {
354 u8 radio_tx_gain[2];
355 u8 dsp_predis_atten[2];
356 } s;
357 u32 dw;
358};
359
360/**
361 * struct tx_power_dual_stream
362 *
363 * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
364 *
365 * Same format as iwl_tx_power_dual_stream, but __le32
366 */
367struct tx_power_dual_stream {
368 __le32 dw;
369} __packed;
370
371/**
372 * struct iwl4965_tx_power_db
373 *
374 * Entire table within REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
375 */
376struct iwl4965_tx_power_db {
377 struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES];
378} __packed;
379
380/******************************************************************************
381 * (0a)
382 * Alive and Error Commands & Responses:
383 *
384 *****************************************************************************/
385
386#define UCODE_VALID_OK cpu_to_le32(0x1)
387#define INITIALIZE_SUBTYPE (9)
388
389/*
390 * ("Initialize") REPLY_ALIVE = 0x1 (response only, not a command)
391 *
392 * uCode issues this "initialize alive" notification once the initialization
393 * uCode image has completed its work, and is ready to load the runtime image.
394 * This is the *first* "alive" notification that the driver will receive after
395 * rebooting uCode; the "initialize" alive is indicated by subtype field == 9.
396 *
397 * See comments documenting "BSM" (bootstrap state machine).
398 *
399 * For 4965, this notification contains important calibration data for
400 * calculating txpower settings:
401 *
402 * 1) Power supply voltage indication. The voltage sensor outputs higher
403 * values for lower voltage, and vice verse.
404 *
405 * 2) Temperature measurement parameters, for each of two channel widths
406 * (20 MHz and 40 MHz) supported by the radios. Temperature sensing
407 * is done via one of the receiver chains, and channel width influences
408 * the results.
409 *
410 * 3) Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation,
411 * for each of 5 frequency ranges.
412 */
413struct iwl_init_alive_resp {
414 u8 ucode_minor;
415 u8 ucode_major;
416 __le16 reserved1;
417 u8 sw_rev[8];
418 u8 ver_type;
419 u8 ver_subtype; /* "9" for initialize alive */
420 __le16 reserved2;
421 __le32 log_event_table_ptr;
422 __le32 error_event_table_ptr;
423 __le32 timestamp;
424 __le32 is_valid;
425
426 /* calibration values from "initialize" uCode */
427 __le32 voltage; /* signed, higher value is lower voltage */
428 __le32 therm_r1[2]; /* signed, 1st for normal, 2nd for HT40 */
429 __le32 therm_r2[2]; /* signed */
430 __le32 therm_r3[2]; /* signed */
431 __le32 therm_r4[2]; /* signed */
432 __le32 tx_atten[5][2]; /* signed MIMO gain comp, 5 freq groups,
433 * 2 Tx chains */
434} __packed;
435
436
437/**
438 * REPLY_ALIVE = 0x1 (response only, not a command)
439 *
440 * uCode issues this "alive" notification once the runtime image is ready
441 * to receive commands from the driver. This is the *second* "alive"
442 * notification that the driver will receive after rebooting uCode;
443 * this "alive" is indicated by subtype field != 9.
444 *
445 * See comments documenting "BSM" (bootstrap state machine).
446 *
447 * This response includes two pointers to structures within the device's
448 * data SRAM (access via HBUS_TARG_MEM_* regs) that are useful for debugging:
449 *
450 * 1) log_event_table_ptr indicates base of the event log. This traces
451 * a 256-entry history of uCode execution within a circular buffer.
452 * Its header format is:
453 *
454 * __le32 log_size; log capacity (in number of entries)
455 * __le32 type; (1) timestamp with each entry, (0) no timestamp
456 * __le32 wraps; # times uCode has wrapped to top of circular buffer
457 * __le32 write_index; next circular buffer entry that uCode would fill
458 *
459 * The header is followed by the circular buffer of log entries. Entries
460 * with timestamps have the following format:
461 *
462 * __le32 event_id; range 0 - 1500
463 * __le32 timestamp; low 32 bits of TSF (of network, if associated)
464 * __le32 data; event_id-specific data value
465 *
466 * Entries without timestamps contain only event_id and data.
467 *
468 *
469 * 2) error_event_table_ptr indicates base of the error log. This contains
470 * information about any uCode error that occurs. For 4965, the format
471 * of the error log is:
472 *
473 * __le32 valid; (nonzero) valid, (0) log is empty
474 * __le32 error_id; type of error
475 * __le32 pc; program counter
476 * __le32 blink1; branch link
477 * __le32 blink2; branch link
478 * __le32 ilink1; interrupt link
479 * __le32 ilink2; interrupt link
480 * __le32 data1; error-specific data
481 * __le32 data2; error-specific data
482 * __le32 line; source code line of error
483 * __le32 bcon_time; beacon timer
484 * __le32 tsf_low; network timestamp function timer
485 * __le32 tsf_hi; network timestamp function timer
486 * __le32 gp1; GP1 timer register
487 * __le32 gp2; GP2 timer register
488 * __le32 gp3; GP3 timer register
489 * __le32 ucode_ver; uCode version
490 * __le32 hw_ver; HW Silicon version
491 * __le32 brd_ver; HW board version
492 * __le32 log_pc; log program counter
493 * __le32 frame_ptr; frame pointer
494 * __le32 stack_ptr; stack pointer
495 * __le32 hcmd; last host command
496 * __le32 isr0; isr status register LMPM_NIC_ISR0: rxtx_flag
497 * __le32 isr1; isr status register LMPM_NIC_ISR1: host_flag
498 * __le32 isr2; isr status register LMPM_NIC_ISR2: enc_flag
499 * __le32 isr3; isr status register LMPM_NIC_ISR3: time_flag
500 * __le32 isr4; isr status register LMPM_NIC_ISR4: wico interrupt
501 * __le32 isr_pref; isr status register LMPM_NIC_PREF_STAT
502 * __le32 wait_event; wait event() caller address
503 * __le32 l2p_control; L2pControlField
504 * __le32 l2p_duration; L2pDurationField
505 * __le32 l2p_mhvalid; L2pMhValidBits
506 * __le32 l2p_addr_match; L2pAddrMatchStat
507 * __le32 lmpm_pmg_sel; indicate which clocks are turned on (LMPM_PMG_SEL)
508 * __le32 u_timestamp; indicate when the date and time of the compilation
509 * __le32 reserved;
510 *
511 * The Linux driver can print both logs to the system log when a uCode error
512 * occurs.
513 */
514struct iwl_alive_resp {
515 u8 ucode_minor;
516 u8 ucode_major;
517 __le16 reserved1;
518 u8 sw_rev[8];
519 u8 ver_type;
520 u8 ver_subtype; /* not "9" for runtime alive */
521 __le16 reserved2;
522 __le32 log_event_table_ptr; /* SRAM address for event log */
523 __le32 error_event_table_ptr; /* SRAM address for error log */
524 __le32 timestamp;
525 __le32 is_valid;
526} __packed;
527
528/*
529 * REPLY_ERROR = 0x2 (response only, not a command)
530 */
531struct iwl_error_resp {
532 __le32 error_type;
533 u8 cmd_id;
534 u8 reserved1;
535 __le16 bad_cmd_seq_num;
536 __le32 error_info;
537 __le64 timestamp;
538} __packed;
539
540/******************************************************************************
541 * (1)
542 * RXON Commands & Responses:
543 *
544 *****************************************************************************/
545
546/*
547 * Rx config defines & structure
548 */
549/* rx_config device types */
550enum {
551 RXON_DEV_TYPE_AP = 1,
552 RXON_DEV_TYPE_ESS = 3,
553 RXON_DEV_TYPE_IBSS = 4,
554 RXON_DEV_TYPE_SNIFFER = 6,
555};
556
557
558#define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0)
559#define RXON_RX_CHAIN_DRIVER_FORCE_POS (0)
560#define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1)
561#define RXON_RX_CHAIN_VALID_POS (1)
562#define RXON_RX_CHAIN_FORCE_SEL_MSK cpu_to_le16(0x7 << 4)
563#define RXON_RX_CHAIN_FORCE_SEL_POS (4)
564#define RXON_RX_CHAIN_FORCE_MIMO_SEL_MSK cpu_to_le16(0x7 << 7)
565#define RXON_RX_CHAIN_FORCE_MIMO_SEL_POS (7)
566#define RXON_RX_CHAIN_CNT_MSK cpu_to_le16(0x3 << 10)
567#define RXON_RX_CHAIN_CNT_POS (10)
568#define RXON_RX_CHAIN_MIMO_CNT_MSK cpu_to_le16(0x3 << 12)
569#define RXON_RX_CHAIN_MIMO_CNT_POS (12)
570#define RXON_RX_CHAIN_MIMO_FORCE_MSK cpu_to_le16(0x1 << 14)
571#define RXON_RX_CHAIN_MIMO_FORCE_POS (14)
572
573/* rx_config flags */
574/* band & modulation selection */
575#define RXON_FLG_BAND_24G_MSK cpu_to_le32(1 << 0)
576#define RXON_FLG_CCK_MSK cpu_to_le32(1 << 1)
577/* auto detection enable */
578#define RXON_FLG_AUTO_DETECT_MSK cpu_to_le32(1 << 2)
579/* TGg protection when tx */
580#define RXON_FLG_TGG_PROTECT_MSK cpu_to_le32(1 << 3)
581/* cck short slot & preamble */
582#define RXON_FLG_SHORT_SLOT_MSK cpu_to_le32(1 << 4)
583#define RXON_FLG_SHORT_PREAMBLE_MSK cpu_to_le32(1 << 5)
584/* antenna selection */
585#define RXON_FLG_DIS_DIV_MSK cpu_to_le32(1 << 7)
586#define RXON_FLG_ANT_SEL_MSK cpu_to_le32(0x0f00)
587#define RXON_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
588#define RXON_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
589/* radar detection enable */
590#define RXON_FLG_RADAR_DETECT_MSK cpu_to_le32(1 << 12)
591#define RXON_FLG_TGJ_NARROW_BAND_MSK cpu_to_le32(1 << 13)
592/* rx response to host with 8-byte TSF
593* (according to ON_AIR deassertion) */
594#define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15)
595
596
597/* HT flags */
598#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22)
599#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK cpu_to_le32(0x1 << 22)
600
601#define RXON_FLG_HT_OPERATING_MODE_POS (23)
602
603#define RXON_FLG_HT_PROT_MSK cpu_to_le32(0x1 << 23)
604#define RXON_FLG_HT40_PROT_MSK cpu_to_le32(0x2 << 23)
605
606#define RXON_FLG_CHANNEL_MODE_POS (25)
607#define RXON_FLG_CHANNEL_MODE_MSK cpu_to_le32(0x3 << 25)
608
609/* channel mode */
610enum {
611 CHANNEL_MODE_LEGACY = 0,
612 CHANNEL_MODE_PURE_40 = 1,
613 CHANNEL_MODE_MIXED = 2,
614 CHANNEL_MODE_RESERVED = 3,
615};
616#define RXON_FLG_CHANNEL_MODE_LEGACY \
617 cpu_to_le32(CHANNEL_MODE_LEGACY << RXON_FLG_CHANNEL_MODE_POS)
618#define RXON_FLG_CHANNEL_MODE_PURE_40 \
619 cpu_to_le32(CHANNEL_MODE_PURE_40 << RXON_FLG_CHANNEL_MODE_POS)
620#define RXON_FLG_CHANNEL_MODE_MIXED \
621 cpu_to_le32(CHANNEL_MODE_MIXED << RXON_FLG_CHANNEL_MODE_POS)
622
623/* CTS to self (if spec allows) flag */
624#define RXON_FLG_SELF_CTS_EN cpu_to_le32(0x1<<30)
625
626/* rx_config filter flags */
627/* accept all data frames */
628#define RXON_FILTER_PROMISC_MSK cpu_to_le32(1 << 0)
629/* pass control & management to host */
630#define RXON_FILTER_CTL2HOST_MSK cpu_to_le32(1 << 1)
631/* accept multi-cast */
632#define RXON_FILTER_ACCEPT_GRP_MSK cpu_to_le32(1 << 2)
633/* don't decrypt uni-cast frames */
634#define RXON_FILTER_DIS_DECRYPT_MSK cpu_to_le32(1 << 3)
635/* don't decrypt multi-cast frames */
636#define RXON_FILTER_DIS_GRP_DECRYPT_MSK cpu_to_le32(1 << 4)
637/* STA is associated */
638#define RXON_FILTER_ASSOC_MSK cpu_to_le32(1 << 5)
639/* transfer to host non bssid beacons in associated state */
640#define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6)
641
642/**
643 * REPLY_RXON = 0x10 (command, has simple generic response)
644 *
645 * RXON tunes the radio tuner to a service channel, and sets up a number
646 * of parameters that are used primarily for Rx, but also for Tx operations.
647 *
648 * NOTE: When tuning to a new channel, driver must set the
649 * RXON_FILTER_ASSOC_MSK to 0. This will clear station-dependent
650 * info within the device, including the station tables, tx retry
651 * rate tables, and txpower tables. Driver must build a new station
652 * table and txpower table before transmitting anything on the RXON
653 * channel.
654 *
655 * NOTE: All RXONs wipe clean the internal txpower table. Driver must
656 * issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10),
657 * regardless of whether RXON_FILTER_ASSOC_MSK is set.
658 */
659
660struct iwl3945_rxon_cmd {
661 u8 node_addr[6];
662 __le16 reserved1;
663 u8 bssid_addr[6];
664 __le16 reserved2;
665 u8 wlap_bssid_addr[6];
666 __le16 reserved3;
667 u8 dev_type;
668 u8 air_propagation;
669 __le16 reserved4;
670 u8 ofdm_basic_rates;
671 u8 cck_basic_rates;
672 __le16 assoc_id;
673 __le32 flags;
674 __le32 filter_flags;
675 __le16 channel;
676 __le16 reserved5;
677} __packed;
678
679struct iwl4965_rxon_cmd {
680 u8 node_addr[6];
681 __le16 reserved1;
682 u8 bssid_addr[6];
683 __le16 reserved2;
684 u8 wlap_bssid_addr[6];
685 __le16 reserved3;
686 u8 dev_type;
687 u8 air_propagation;
688 __le16 rx_chain;
689 u8 ofdm_basic_rates;
690 u8 cck_basic_rates;
691 __le16 assoc_id;
692 __le32 flags;
693 __le32 filter_flags;
694 __le16 channel;
695 u8 ofdm_ht_single_stream_basic_rates;
696 u8 ofdm_ht_dual_stream_basic_rates;
697} __packed;
698
699/* Create a common rxon cmd which will be typecast into the 3945 or 4965
700 * specific rxon cmd, depending on where it is called from.
701 */
702struct iwl_legacy_rxon_cmd {
703 u8 node_addr[6];
704 __le16 reserved1;
705 u8 bssid_addr[6];
706 __le16 reserved2;
707 u8 wlap_bssid_addr[6];
708 __le16 reserved3;
709 u8 dev_type;
710 u8 air_propagation;
711 __le16 rx_chain;
712 u8 ofdm_basic_rates;
713 u8 cck_basic_rates;
714 __le16 assoc_id;
715 __le32 flags;
716 __le32 filter_flags;
717 __le16 channel;
718 u8 ofdm_ht_single_stream_basic_rates;
719 u8 ofdm_ht_dual_stream_basic_rates;
720 u8 reserved4;
721 u8 reserved5;
722} __packed;
723
724
725/*
726 * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
727 */
728struct iwl3945_rxon_assoc_cmd {
729 __le32 flags;
730 __le32 filter_flags;
731 u8 ofdm_basic_rates;
732 u8 cck_basic_rates;
733 __le16 reserved;
734} __packed;
735
736struct iwl4965_rxon_assoc_cmd {
737 __le32 flags;
738 __le32 filter_flags;
739 u8 ofdm_basic_rates;
740 u8 cck_basic_rates;
741 u8 ofdm_ht_single_stream_basic_rates;
742 u8 ofdm_ht_dual_stream_basic_rates;
743 __le16 rx_chain_select_flags;
744 __le16 reserved;
745} __packed;
746
747#define IWL_CONN_MAX_LISTEN_INTERVAL 10
748#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */
749#define IWL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */
750
751/*
752 * REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
753 */
754struct iwl_rxon_time_cmd {
755 __le64 timestamp;
756 __le16 beacon_interval;
757 __le16 atim_window;
758 __le32 beacon_init_val;
759 __le16 listen_interval;
760 u8 dtim_period;
761 u8 delta_cp_bss_tbtts;
762} __packed;
763
764/*
765 * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
766 */
767struct iwl3945_channel_switch_cmd {
768 u8 band;
769 u8 expect_beacon;
770 __le16 channel;
771 __le32 rxon_flags;
772 __le32 rxon_filter_flags;
773 __le32 switch_time;
774 struct iwl3945_power_per_rate power[IWL_MAX_RATES];
775} __packed;
776
777struct iwl4965_channel_switch_cmd {
778 u8 band;
779 u8 expect_beacon;
780 __le16 channel;
781 __le32 rxon_flags;
782 __le32 rxon_filter_flags;
783 __le32 switch_time;
784 struct iwl4965_tx_power_db tx_power;
785} __packed;
786
787/*
788 * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
789 */
790struct iwl_csa_notification {
791 __le16 band;
792 __le16 channel;
793 __le32 status; /* 0 - OK, 1 - fail */
794} __packed;
795
796/******************************************************************************
797 * (2)
798 * Quality-of-Service (QOS) Commands & Responses:
799 *
800 *****************************************************************************/
801
802/**
803 * struct iwl_ac_qos -- QOS timing params for REPLY_QOS_PARAM
804 * One for each of 4 EDCA access categories in struct iwl_qosparam_cmd
805 *
806 * @cw_min: Contention window, start value in numbers of slots.
807 * Should be a power-of-2, minus 1. Device's default is 0x0f.
808 * @cw_max: Contention window, max value in numbers of slots.
809 * Should be a power-of-2, minus 1. Device's default is 0x3f.
810 * @aifsn: Number of slots in Arbitration Interframe Space (before
811 * performing random backoff timing prior to Tx). Device default 1.
812 * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0.
813 *
814 * Device will automatically increase contention window by (2*CW) + 1 for each
815 * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW
816 * value, to cap the CW value.
817 */
818struct iwl_ac_qos {
819 __le16 cw_min;
820 __le16 cw_max;
821 u8 aifsn;
822 u8 reserved1;
823 __le16 edca_txop;
824} __packed;
825
826/* QoS flags defines */
827#define QOS_PARAM_FLG_UPDATE_EDCA_MSK cpu_to_le32(0x01)
828#define QOS_PARAM_FLG_TGN_MSK cpu_to_le32(0x02)
829#define QOS_PARAM_FLG_TXOP_TYPE_MSK cpu_to_le32(0x10)
830
831/* Number of Access Categories (AC) (EDCA), queues 0..3 */
832#define AC_NUM 4
833
834/*
835 * REPLY_QOS_PARAM = 0x13 (command, has simple generic response)
836 *
837 * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs
838 * 0: Background, 1: Best Effort, 2: Video, 3: Voice.
839 */
840struct iwl_qosparam_cmd {
841 __le32 qos_flags;
842 struct iwl_ac_qos ac[AC_NUM];
843} __packed;
844
845/******************************************************************************
846 * (3)
847 * Add/Modify Stations Commands & Responses:
848 *
849 *****************************************************************************/
850/*
851 * Multi station support
852 */
853
854/* Special, dedicated locations within device's station table */
855#define IWL_AP_ID 0
856#define IWL_STA_ID 2
857#define IWL3945_BROADCAST_ID 24
858#define IWL3945_STATION_COUNT 25
859#define IWL4965_BROADCAST_ID 31
860#define IWL4965_STATION_COUNT 32
861
862#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/
863#define IWL_INVALID_STATION 255
864
865#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
866#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
867#define STA_FLG_RTS_MIMO_PROT_MSK cpu_to_le32(1 << 17)
868#define STA_FLG_AGG_MPDU_8US_MSK cpu_to_le32(1 << 18)
869#define STA_FLG_MAX_AGG_SIZE_POS (19)
870#define STA_FLG_MAX_AGG_SIZE_MSK cpu_to_le32(3 << 19)
871#define STA_FLG_HT40_EN_MSK cpu_to_le32(1 << 21)
872#define STA_FLG_MIMO_DIS_MSK cpu_to_le32(1 << 22)
873#define STA_FLG_AGG_MPDU_DENSITY_POS (23)
874#define STA_FLG_AGG_MPDU_DENSITY_MSK cpu_to_le32(7 << 23)
875
876/* Use in mode field. 1: modify existing entry, 0: add new station entry */
877#define STA_CONTROL_MODIFY_MSK 0x01
878
879/* key flags __le16*/
880#define STA_KEY_FLG_ENCRYPT_MSK cpu_to_le16(0x0007)
881#define STA_KEY_FLG_NO_ENC cpu_to_le16(0x0000)
882#define STA_KEY_FLG_WEP cpu_to_le16(0x0001)
883#define STA_KEY_FLG_CCMP cpu_to_le16(0x0002)
884#define STA_KEY_FLG_TKIP cpu_to_le16(0x0003)
885
886#define STA_KEY_FLG_KEYID_POS 8
887#define STA_KEY_FLG_INVALID cpu_to_le16(0x0800)
888/* wep key is either from global key (0) or from station info array (1) */
889#define STA_KEY_FLG_MAP_KEY_MSK cpu_to_le16(0x0008)
890
891/* wep key in STA: 5-bytes (0) or 13-bytes (1) */
892#define STA_KEY_FLG_KEY_SIZE_MSK cpu_to_le16(0x1000)
893#define STA_KEY_MULTICAST_MSK cpu_to_le16(0x4000)
894#define STA_KEY_MAX_NUM 8
895
896/* Flags indicate whether to modify vs. don't change various station params */
897#define STA_MODIFY_KEY_MASK 0x01
898#define STA_MODIFY_TID_DISABLE_TX 0x02
899#define STA_MODIFY_TX_RATE_MSK 0x04
900#define STA_MODIFY_ADDBA_TID_MSK 0x08
901#define STA_MODIFY_DELBA_TID_MSK 0x10
902#define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20
903
904/* Receiver address (actually, Rx station's index into station table),
905 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
906#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
907
908struct iwl4965_keyinfo {
909 __le16 key_flags;
910 u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */
911 u8 reserved1;
912 __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */
913 u8 key_offset;
914 u8 reserved2;
915 u8 key[16]; /* 16-byte unicast decryption key */
916} __packed;
917
918/**
919 * struct sta_id_modify
920 * @addr[ETH_ALEN]: station's MAC address
921 * @sta_id: index of station in uCode's station table
922 * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change
923 *
924 * Driver selects unused table index when adding new station,
925 * or the index to a pre-existing station entry when modifying that station.
926 * Some indexes have special purposes (IWL_AP_ID, index 0, is for AP).
927 *
928 * modify_mask flags select which parameters to modify vs. leave alone.
929 */
930struct sta_id_modify {
931 u8 addr[ETH_ALEN];
932 __le16 reserved1;
933 u8 sta_id;
934 u8 modify_mask;
935 __le16 reserved2;
936} __packed;
937
938/*
939 * REPLY_ADD_STA = 0x18 (command)
940 *
941 * The device contains an internal table of per-station information,
942 * with info on security keys, aggregation parameters, and Tx rates for
943 * initial Tx attempt and any retries (4965 devices uses
944 * REPLY_TX_LINK_QUALITY_CMD,
945 * 3945 uses REPLY_RATE_SCALE to set up rate tables).
946 *
947 * REPLY_ADD_STA sets up the table entry for one station, either creating
948 * a new entry, or modifying a pre-existing one.
949 *
950 * NOTE: RXON command (without "associated" bit set) wipes the station table
951 * clean. Moving into RF_KILL state does this also. Driver must set up
952 * new station table before transmitting anything on the RXON channel
953 * (except active scans or active measurements; those commands carry
954 * their own txpower/rate setup data).
955 *
956 * When getting started on a new channel, driver must set up the
957 * IWL_BROADCAST_ID entry (last entry in the table). For a client
958 * station in a BSS, once an AP is selected, driver sets up the AP STA
959 * in the IWL_AP_ID entry (1st entry in the table). BROADCAST and AP
960 * are all that are needed for a BSS client station. If the device is
961 * used as AP, or in an IBSS network, driver must set up station table
962 * entries for all STAs in network, starting with index IWL_STA_ID.
963 */
964
965struct iwl3945_addsta_cmd {
966 u8 mode; /* 1: modify existing, 0: add new station */
967 u8 reserved[3];
968 struct sta_id_modify sta;
969 struct iwl4965_keyinfo key;
970 __le32 station_flags; /* STA_FLG_* */
971 __le32 station_flags_msk; /* STA_FLG_* */
972
973 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
974 * corresponding to bit (e.g. bit 5 controls TID 5).
975 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
976 __le16 tid_disable_tx;
977
978 __le16 rate_n_flags;
979
980 /* TID for which to add block-ack support.
981 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
982 u8 add_immediate_ba_tid;
983
984 /* TID for which to remove block-ack support.
985 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
986 u8 remove_immediate_ba_tid;
987
988 /* Starting Sequence Number for added block-ack support.
989 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
990 __le16 add_immediate_ba_ssn;
991} __packed;
992
993struct iwl4965_addsta_cmd {
994 u8 mode; /* 1: modify existing, 0: add new station */
995 u8 reserved[3];
996 struct sta_id_modify sta;
997 struct iwl4965_keyinfo key;
998 __le32 station_flags; /* STA_FLG_* */
999 __le32 station_flags_msk; /* STA_FLG_* */
1000
1001 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
1002 * corresponding to bit (e.g. bit 5 controls TID 5).
1003 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
1004 __le16 tid_disable_tx;
1005
1006 __le16 reserved1;
1007
1008 /* TID for which to add block-ack support.
1009 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1010 u8 add_immediate_ba_tid;
1011
1012 /* TID for which to remove block-ack support.
1013 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
1014 u8 remove_immediate_ba_tid;
1015
1016 /* Starting Sequence Number for added block-ack support.
1017 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1018 __le16 add_immediate_ba_ssn;
1019
1020 /*
1021 * Number of packets OK to transmit to station even though
1022 * it is asleep -- used to synchronise PS-poll and u-APSD
1023 * responses while ucode keeps track of STA sleep state.
1024 */
1025 __le16 sleep_tx_count;
1026
1027 __le16 reserved2;
1028} __packed;
1029
1030/* Wrapper struct for 3945 and 4965 addsta_cmd structures */
1031struct iwl_legacy_addsta_cmd {
1032 u8 mode; /* 1: modify existing, 0: add new station */
1033 u8 reserved[3];
1034 struct sta_id_modify sta;
1035 struct iwl4965_keyinfo key;
1036 __le32 station_flags; /* STA_FLG_* */
1037 __le32 station_flags_msk; /* STA_FLG_* */
1038
1039 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
1040 * corresponding to bit (e.g. bit 5 controls TID 5).
1041 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
1042 __le16 tid_disable_tx;
1043
1044 __le16 rate_n_flags; /* 3945 only */
1045
1046 /* TID for which to add block-ack support.
1047 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1048 u8 add_immediate_ba_tid;
1049
1050 /* TID for which to remove block-ack support.
1051 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
1052 u8 remove_immediate_ba_tid;
1053
1054 /* Starting Sequence Number for added block-ack support.
1055 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1056 __le16 add_immediate_ba_ssn;
1057
1058 /*
1059 * Number of packets OK to transmit to station even though
1060 * it is asleep -- used to synchronise PS-poll and u-APSD
1061 * responses while ucode keeps track of STA sleep state.
1062 */
1063 __le16 sleep_tx_count;
1064
1065 __le16 reserved2;
1066} __packed;
1067
1068
1069#define ADD_STA_SUCCESS_MSK 0x1
1070#define ADD_STA_NO_ROOM_IN_TABLE 0x2
1071#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4
1072#define ADD_STA_MODIFY_NON_EXIST_STA 0x8
1073/*
1074 * REPLY_ADD_STA = 0x18 (response)
1075 */
1076struct iwl_add_sta_resp {
1077 u8 status; /* ADD_STA_* */
1078} __packed;
1079
1080#define REM_STA_SUCCESS_MSK 0x1
1081/*
1082 * REPLY_REM_STA = 0x19 (response)
1083 */
1084struct iwl_rem_sta_resp {
1085 u8 status;
1086} __packed;
1087
1088/*
1089 * REPLY_REM_STA = 0x19 (command)
1090 */
1091struct iwl_rem_sta_cmd {
1092 u8 num_sta; /* number of removed stations */
1093 u8 reserved[3];
1094 u8 addr[ETH_ALEN]; /* MAC addr of the first station */
1095 u8 reserved2[2];
1096} __packed;
1097
1098#define IWL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0))
1099#define IWL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1))
1100#define IWL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2))
1101#define IWL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3))
1102#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
1103
1104#define IWL_DROP_SINGLE 0
1105#define IWL_DROP_SELECTED 1
1106#define IWL_DROP_ALL 2
1107
1108/*
1109 * REPLY_WEP_KEY = 0x20
1110 */
1111struct iwl_wep_key {
1112 u8 key_index;
1113 u8 key_offset;
1114 u8 reserved1[2];
1115 u8 key_size;
1116 u8 reserved2[3];
1117 u8 key[16];
1118} __packed;
1119
1120struct iwl_wep_cmd {
1121 u8 num_keys;
1122 u8 global_key_type;
1123 u8 flags;
1124 u8 reserved;
1125 struct iwl_wep_key key[0];
1126} __packed;
1127
1128#define WEP_KEY_WEP_TYPE 1
1129#define WEP_KEYS_MAX 4
1130#define WEP_INVALID_OFFSET 0xff
1131#define WEP_KEY_LEN_64 5
1132#define WEP_KEY_LEN_128 13
1133
1134/******************************************************************************
1135 * (4)
1136 * Rx Responses:
1137 *
1138 *****************************************************************************/
1139
1140#define RX_RES_STATUS_NO_CRC32_ERROR cpu_to_le32(1 << 0)
1141#define RX_RES_STATUS_NO_RXE_OVERFLOW cpu_to_le32(1 << 1)
1142
1143#define RX_RES_PHY_FLAGS_BAND_24_MSK cpu_to_le16(1 << 0)
1144#define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1)
1145#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2)
1146#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3)
1147#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0xf0
1148#define RX_RES_PHY_FLAGS_ANTENNA_POS 4
1149
1150#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8)
1151#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8)
1152#define RX_RES_STATUS_SEC_TYPE_WEP (0x1 << 8)
1153#define RX_RES_STATUS_SEC_TYPE_CCMP (0x2 << 8)
1154#define RX_RES_STATUS_SEC_TYPE_TKIP (0x3 << 8)
1155#define RX_RES_STATUS_SEC_TYPE_ERR (0x7 << 8)
1156
1157#define RX_RES_STATUS_STATION_FOUND (1<<6)
1158#define RX_RES_STATUS_NO_STATION_INFO_MISMATCH (1<<7)
1159
1160#define RX_RES_STATUS_DECRYPT_TYPE_MSK (0x3 << 11)
1161#define RX_RES_STATUS_NOT_DECRYPT (0x0 << 11)
1162#define RX_RES_STATUS_DECRYPT_OK (0x3 << 11)
1163#define RX_RES_STATUS_BAD_ICV_MIC (0x1 << 11)
1164#define RX_RES_STATUS_BAD_KEY_TTAK (0x2 << 11)
1165
1166#define RX_MPDU_RES_STATUS_ICV_OK (0x20)
1167#define RX_MPDU_RES_STATUS_MIC_OK (0x40)
1168#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7)
1169#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800)
1170
1171
1172struct iwl3945_rx_frame_stats {
1173 u8 phy_count;
1174 u8 id;
1175 u8 rssi;
1176 u8 agc;
1177 __le16 sig_avg;
1178 __le16 noise_diff;
1179 u8 payload[0];
1180} __packed;
1181
1182struct iwl3945_rx_frame_hdr {
1183 __le16 channel;
1184 __le16 phy_flags;
1185 u8 reserved1;
1186 u8 rate;
1187 __le16 len;
1188 u8 payload[0];
1189} __packed;
1190
1191struct iwl3945_rx_frame_end {
1192 __le32 status;
1193 __le64 timestamp;
1194 __le32 beacon_timestamp;
1195} __packed;
1196
1197/*
1198 * REPLY_3945_RX = 0x1b (response only, not a command)
1199 *
1200 * NOTE: DO NOT dereference from casts to this structure
1201 * It is provided only for calculating minimum data set size.
1202 * The actual offsets of the hdr and end are dynamic based on
1203 * stats.phy_count
1204 */
1205struct iwl3945_rx_frame {
1206 struct iwl3945_rx_frame_stats stats;
1207 struct iwl3945_rx_frame_hdr hdr;
1208 struct iwl3945_rx_frame_end end;
1209} __packed;
1210
1211#define IWL39_RX_FRAME_SIZE (4 + sizeof(struct iwl3945_rx_frame))
1212
1213/* Fixed (non-configurable) rx data from phy */
1214
1215#define IWL49_RX_RES_PHY_CNT 14
1216#define IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4)
1217#define IWL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70)
1218#define IWL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */
1219#define IWL49_AGC_DB_POS (7)
1220struct iwl4965_rx_non_cfg_phy {
1221 __le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */
1222 __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */
1223 u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */
1224 u8 pad[0];
1225} __packed;
1226
1227
1228/*
1229 * REPLY_RX = 0xc3 (response only, not a command)
1230 * Used only for legacy (non 11n) frames.
1231 */
1232struct iwl_rx_phy_res {
1233 u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */
1234 u8 cfg_phy_cnt; /* configurable DSP phy data byte count */
1235 u8 stat_id; /* configurable DSP phy data set ID */
1236 u8 reserved1;
1237 __le64 timestamp; /* TSF at on air rise */
1238 __le32 beacon_time_stamp; /* beacon at on-air rise */
1239 __le16 phy_flags; /* general phy flags: band, modulation, ... */
1240 __le16 channel; /* channel number */
1241 u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
1242 __le32 rate_n_flags; /* RATE_MCS_* */
1243 __le16 byte_count; /* frame's byte-count */
1244 __le16 frame_time; /* frame's time on the air */
1245} __packed;
1246
1247struct iwl_rx_mpdu_res_start {
1248 __le16 byte_count;
1249 __le16 reserved;
1250} __packed;
1251
1252
1253/******************************************************************************
1254 * (5)
1255 * Tx Commands & Responses:
1256 *
1257 * Driver must place each REPLY_TX command into one of the prioritized Tx
1258 * queues in host DRAM, shared between driver and device (see comments for
1259 * SCD registers and Tx/Rx Queues). When the device's Tx scheduler and uCode
1260 * are preparing to transmit, the device pulls the Tx command over the PCI
1261 * bus via one of the device's Tx DMA channels, to fill an internal FIFO
1262 * from which data will be transmitted.
1263 *
1264 * uCode handles all timing and protocol related to control frames
1265 * (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler
1266 * handle reception of block-acks; uCode updates the host driver via
1267 * REPLY_COMPRESSED_BA.
1268 *
1269 * uCode handles retrying Tx when an ACK is expected but not received.
1270 * This includes trying lower data rates than the one requested in the Tx
1271 * command, as set up by the REPLY_RATE_SCALE (for 3945) or
1272 * REPLY_TX_LINK_QUALITY_CMD (4965).
1273 *
1274 * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD.
1275 * This command must be executed after every RXON command, before Tx can occur.
1276 *****************************************************************************/
1277
1278/* REPLY_TX Tx flags field */
1279
1280/*
1281 * 1: Use Request-To-Send protocol before this frame.
1282 * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK.
1283 */
1284#define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1)
1285
1286/*
1287 * 1: Transmit Clear-To-Send to self before this frame.
1288 * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames.
1289 * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK.
1290 */
1291#define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2)
1292
1293/* 1: Expect ACK from receiving station
1294 * 0: Don't expect ACK (MAC header's duration field s/b 0)
1295 * Set this for unicast frames, but not broadcast/multicast. */
1296#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3)
1297
1298/* For 4965 devices:
1299 * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD).
1300 * Tx command's initial_rate_index indicates first rate to try;
1301 * uCode walks through table for additional Tx attempts.
1302 * 0: Use Tx rate/MCS from Tx command's rate_n_flags field.
1303 * This rate will be used for all Tx attempts; it will not be scaled. */
1304#define TX_CMD_FLG_STA_RATE_MSK cpu_to_le32(1 << 4)
1305
1306/* 1: Expect immediate block-ack.
1307 * Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */
1308#define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6)
1309
1310/*
1311 * 1: Frame requires full Tx-Op protection.
1312 * Set this if either RTS or CTS Tx Flag gets set.
1313 */
1314#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7)
1315
1316/* Tx antenna selection field; used only for 3945, reserved (0) for 4965 devices.
1317 * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */
1318#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00)
1319#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
1320#define TX_CMD_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
1321
1322/* 1: uCode overrides sequence control field in MAC header.
1323 * 0: Driver provides sequence control field in MAC header.
1324 * Set this for management frames, non-QOS data frames, non-unicast frames,
1325 * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */
1326#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13)
1327
1328/* 1: This frame is non-last MPDU; more fragments are coming.
1329 * 0: Last fragment, or not using fragmentation. */
1330#define TX_CMD_FLG_MORE_FRAG_MSK cpu_to_le32(1 << 14)
1331
1332/* 1: uCode calculates and inserts Timestamp Function (TSF) in outgoing frame.
1333 * 0: No TSF required in outgoing frame.
1334 * Set this for transmitting beacons and probe responses. */
1335#define TX_CMD_FLG_TSF_MSK cpu_to_le32(1 << 16)
1336
1337/* 1: Driver inserted 2 bytes pad after the MAC header, for (required) dword
1338 * alignment of frame's payload data field.
1339 * 0: No pad
1340 * Set this for MAC headers with 26 or 30 bytes, i.e. those with QOS or ADDR4
1341 * field (but not both). Driver must align frame data (i.e. data following
1342 * MAC header) to DWORD boundary. */
1343#define TX_CMD_FLG_MH_PAD_MSK cpu_to_le32(1 << 20)
1344
1345/* accelerate aggregation support
1346 * 0 - no CCMP encryption; 1 - CCMP encryption */
1347#define TX_CMD_FLG_AGG_CCMP_MSK cpu_to_le32(1 << 22)
1348
1349/* HCCA-AP - disable duration overwriting. */
1350#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25)
1351
1352
1353/*
1354 * TX command security control
1355 */
1356#define TX_CMD_SEC_WEP 0x01
1357#define TX_CMD_SEC_CCM 0x02
1358#define TX_CMD_SEC_TKIP 0x03
1359#define TX_CMD_SEC_MSK 0x03
1360#define TX_CMD_SEC_SHIFT 6
1361#define TX_CMD_SEC_KEY128 0x08
1362
1363/*
1364 * security overhead sizes
1365 */
1366#define WEP_IV_LEN 4
1367#define WEP_ICV_LEN 4
1368#define CCMP_MIC_LEN 8
1369#define TKIP_ICV_LEN 4
1370
1371/*
1372 * REPLY_TX = 0x1c (command)
1373 */
1374
1375struct iwl3945_tx_cmd {
1376 /*
1377 * MPDU byte count:
1378 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
1379 * + 8 byte IV for CCM or TKIP (not used for WEP)
1380 * + Data payload
1381 * + 8-byte MIC (not used for CCM/WEP)
1382 * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
1383 * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
1384 * Range: 14-2342 bytes.
1385 */
1386 __le16 len;
1387
1388 /*
1389 * MPDU or MSDU byte count for next frame.
1390 * Used for fragmentation and bursting, but not 11n aggregation.
1391 * Same as "len", but for next frame. Set to 0 if not applicable.
1392 */
1393 __le16 next_frame_len;
1394
1395 __le32 tx_flags; /* TX_CMD_FLG_* */
1396
1397 u8 rate;
1398
1399 /* Index of recipient station in uCode's station table */
1400 u8 sta_id;
1401 u8 tid_tspec;
1402 u8 sec_ctl;
1403 u8 key[16];
1404 union {
1405 u8 byte[8];
1406 __le16 word[4];
1407 __le32 dw[2];
1408 } tkip_mic;
1409 __le32 next_frame_info;
1410 union {
1411 __le32 life_time;
1412 __le32 attempt;
1413 } stop_time;
1414 u8 supp_rates[2];
1415 u8 rts_retry_limit; /*byte 50 */
1416 u8 data_retry_limit; /*byte 51 */
1417 union {
1418 __le16 pm_frame_timeout;
1419 __le16 attempt_duration;
1420 } timeout;
1421
1422 /*
1423 * Duration of EDCA burst Tx Opportunity, in 32-usec units.
1424 * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
1425 */
1426 __le16 driver_txop;
1427
1428 /*
1429 * MAC header goes here, followed by 2 bytes padding if MAC header
1430 * length is 26 or 30 bytes, followed by payload data
1431 */
1432 u8 payload[0];
1433 struct ieee80211_hdr hdr[0];
1434} __packed;
1435
1436/*
1437 * REPLY_TX = 0x1c (response)
1438 */
1439struct iwl3945_tx_resp {
1440 u8 failure_rts;
1441 u8 failure_frame;
1442 u8 bt_kill_count;
1443 u8 rate;
1444 __le32 wireless_media_time;
1445 __le32 status; /* TX status */
1446} __packed;
1447
1448
1449/*
1450 * 4965 uCode updates these Tx attempt count values in host DRAM.
1451 * Used for managing Tx retries when expecting block-acks.
1452 * Driver should set these fields to 0.
1453 */
1454struct iwl_dram_scratch {
1455 u8 try_cnt; /* Tx attempts */
1456 u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */
1457 __le16 reserved;
1458} __packed;
1459
1460struct iwl_tx_cmd {
1461 /*
1462 * MPDU byte count:
1463 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
1464 * + 8 byte IV for CCM or TKIP (not used for WEP)
1465 * + Data payload
1466 * + 8-byte MIC (not used for CCM/WEP)
1467 * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
1468 * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
1469 * Range: 14-2342 bytes.
1470 */
1471 __le16 len;
1472
1473 /*
1474 * MPDU or MSDU byte count for next frame.
1475 * Used for fragmentation and bursting, but not 11n aggregation.
1476 * Same as "len", but for next frame. Set to 0 if not applicable.
1477 */
1478 __le16 next_frame_len;
1479
1480 __le32 tx_flags; /* TX_CMD_FLG_* */
1481
1482 /* uCode may modify this field of the Tx command (in host DRAM!).
1483 * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
1484 struct iwl_dram_scratch scratch;
1485
1486 /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
1487 __le32 rate_n_flags; /* RATE_MCS_* */
1488
1489 /* Index of destination station in uCode's station table */
1490 u8 sta_id;
1491
1492 /* Type of security encryption: CCM or TKIP */
1493 u8 sec_ctl; /* TX_CMD_SEC_* */
1494
1495 /*
1496 * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial
1497 * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for
1498 * data frames, this field may be used to selectively reduce initial
1499 * rate (via non-0 value) for special frames (e.g. management), while
1500 * still supporting rate scaling for all frames.
1501 */
1502 u8 initial_rate_index;
1503 u8 reserved;
1504 u8 key[16];
1505 __le16 next_frame_flags;
1506 __le16 reserved2;
1507 union {
1508 __le32 life_time;
1509 __le32 attempt;
1510 } stop_time;
1511
1512 /* Host DRAM physical address pointer to "scratch" in this command.
1513 * Must be dword aligned. "0" in dram_lsb_ptr disables usage. */
1514 __le32 dram_lsb_ptr;
1515 u8 dram_msb_ptr;
1516
1517 u8 rts_retry_limit; /*byte 50 */
1518 u8 data_retry_limit; /*byte 51 */
1519 u8 tid_tspec;
1520 union {
1521 __le16 pm_frame_timeout;
1522 __le16 attempt_duration;
1523 } timeout;
1524
1525 /*
1526 * Duration of EDCA burst Tx Opportunity, in 32-usec units.
1527 * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
1528 */
1529 __le16 driver_txop;
1530
1531 /*
1532 * MAC header goes here, followed by 2 bytes padding if MAC header
1533 * length is 26 or 30 bytes, followed by payload data
1534 */
1535 u8 payload[0];
1536 struct ieee80211_hdr hdr[0];
1537} __packed;
1538
1539/* TX command response is sent after *3945* transmission attempts.
1540 *
1541 * NOTES:
1542 *
1543 * TX_STATUS_FAIL_NEXT_FRAG
1544 *
1545 * If the fragment flag in the MAC header for the frame being transmitted
1546 * is set and there is insufficient time to transmit the next frame, the
1547 * TX status will be returned with 'TX_STATUS_FAIL_NEXT_FRAG'.
1548 *
1549 * TX_STATUS_FIFO_UNDERRUN
1550 *
1551 * Indicates the host did not provide bytes to the FIFO fast enough while
1552 * a TX was in progress.
1553 *
1554 * TX_STATUS_FAIL_MGMNT_ABORT
1555 *
1556 * This status is only possible if the ABORT ON MGMT RX parameter was
1557 * set to true with the TX command.
1558 *
1559 * If the MSB of the status parameter is set then an abort sequence is
1560 * required. This sequence consists of the host activating the TX Abort
1561 * control line, and then waiting for the TX Abort command response. This
1562 * indicates that a the device is no longer in a transmit state, and that the
1563 * command FIFO has been cleared. The host must then deactivate the TX Abort
1564 * control line. Receiving is still allowed in this case.
1565 */
1566enum {
1567 TX_3945_STATUS_SUCCESS = 0x01,
1568 TX_3945_STATUS_DIRECT_DONE = 0x02,
1569 TX_3945_STATUS_FAIL_SHORT_LIMIT = 0x82,
1570 TX_3945_STATUS_FAIL_LONG_LIMIT = 0x83,
1571 TX_3945_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
1572 TX_3945_STATUS_FAIL_MGMNT_ABORT = 0x85,
1573 TX_3945_STATUS_FAIL_NEXT_FRAG = 0x86,
1574 TX_3945_STATUS_FAIL_LIFE_EXPIRE = 0x87,
1575 TX_3945_STATUS_FAIL_DEST_PS = 0x88,
1576 TX_3945_STATUS_FAIL_ABORTED = 0x89,
1577 TX_3945_STATUS_FAIL_BT_RETRY = 0x8a,
1578 TX_3945_STATUS_FAIL_STA_INVALID = 0x8b,
1579 TX_3945_STATUS_FAIL_FRAG_DROPPED = 0x8c,
1580 TX_3945_STATUS_FAIL_TID_DISABLE = 0x8d,
1581 TX_3945_STATUS_FAIL_FRAME_FLUSHED = 0x8e,
1582 TX_3945_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
1583 TX_3945_STATUS_FAIL_TX_LOCKED = 0x90,
1584 TX_3945_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
1585};
1586
1587/*
1588 * TX command response is sent after *4965* transmission attempts.
1589 *
1590 * both postpone and abort status are expected behavior from uCode. there is
1591 * no special operation required from driver; except for RFKILL_FLUSH,
1592 * which required tx flush host command to flush all the tx frames in queues
1593 */
1594enum {
1595 TX_STATUS_SUCCESS = 0x01,
1596 TX_STATUS_DIRECT_DONE = 0x02,
1597 /* postpone TX */
1598 TX_STATUS_POSTPONE_DELAY = 0x40,
1599 TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
1600 TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
1601 TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
1602 /* abort TX */
1603 TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
1604 TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
1605 TX_STATUS_FAIL_LONG_LIMIT = 0x83,
1606 TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
1607 TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
1608 TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
1609 TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
1610 TX_STATUS_FAIL_DEST_PS = 0x88,
1611 TX_STATUS_FAIL_HOST_ABORTED = 0x89,
1612 TX_STATUS_FAIL_BT_RETRY = 0x8a,
1613 TX_STATUS_FAIL_STA_INVALID = 0x8b,
1614 TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
1615 TX_STATUS_FAIL_TID_DISABLE = 0x8d,
1616 TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
1617 TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
1618 TX_STATUS_FAIL_PASSIVE_NO_RX = 0x90,
1619 TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
1620};
1621
1622#define TX_PACKET_MODE_REGULAR 0x0000
1623#define TX_PACKET_MODE_BURST_SEQ 0x0100
1624#define TX_PACKET_MODE_BURST_FIRST 0x0200
1625
1626enum {
1627 TX_POWER_PA_NOT_ACTIVE = 0x0,
1628};
1629
1630enum {
1631 TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */
1632 TX_STATUS_DELAY_MSK = 0x00000040,
1633 TX_STATUS_ABORT_MSK = 0x00000080,
1634 TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */
1635 TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */
1636 TX_RESERVED = 0x00780000, /* bits 19:22 */
1637 TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */
1638 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
1639};
1640
1641/* *******************************
1642 * TX aggregation status
1643 ******************************* */
1644
1645enum {
1646 AGG_TX_STATE_TRANSMITTED = 0x00,
1647 AGG_TX_STATE_UNDERRUN_MSK = 0x01,
1648 AGG_TX_STATE_FEW_BYTES_MSK = 0x04,
1649 AGG_TX_STATE_ABORT_MSK = 0x08,
1650 AGG_TX_STATE_LAST_SENT_TTL_MSK = 0x10,
1651 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK = 0x20,
1652 AGG_TX_STATE_SCD_QUERY_MSK = 0x80,
1653 AGG_TX_STATE_TEST_BAD_CRC32_MSK = 0x100,
1654 AGG_TX_STATE_RESPONSE_MSK = 0x1ff,
1655 AGG_TX_STATE_DUMP_TX_MSK = 0x200,
1656 AGG_TX_STATE_DELAY_TX_MSK = 0x400
1657};
1658
1659#define AGG_TX_STATUS_MSK 0x00000fff /* bits 0:11 */
1660#define AGG_TX_TRY_MSK 0x0000f000 /* bits 12:15 */
1661
1662#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL_MSK | \
1663 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK)
1664
1665/* # tx attempts for first frame in aggregation */
1666#define AGG_TX_STATE_TRY_CNT_POS 12
1667#define AGG_TX_STATE_TRY_CNT_MSK 0xf000
1668
1669/* Command ID and sequence number of Tx command for this frame */
1670#define AGG_TX_STATE_SEQ_NUM_POS 16
1671#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000
1672
1673/*
1674 * REPLY_TX = 0x1c (response)
1675 *
1676 * This response may be in one of two slightly different formats, indicated
1677 * by the frame_count field:
1678 *
1679 * 1) No aggregation (frame_count == 1). This reports Tx results for
1680 * a single frame. Multiple attempts, at various bit rates, may have
1681 * been made for this frame.
1682 *
1683 * 2) Aggregation (frame_count > 1). This reports Tx results for
1684 * 2 or more frames that used block-acknowledge. All frames were
1685 * transmitted at same rate. Rate scaling may have been used if first
1686 * frame in this new agg block failed in previous agg block(s).
1687 *
1688 * Note that, for aggregation, ACK (block-ack) status is not delivered here;
1689 * block-ack has not been received by the time the 4965 device records
1690 * this status.
1691 * This status relates to reasons the tx might have been blocked or aborted
1692 * within the sending station (this 4965 device), rather than whether it was
1693 * received successfully by the destination station.
1694 */
1695struct agg_tx_status {
1696 __le16 status;
1697 __le16 sequence;
1698} __packed;
1699
1700struct iwl4965_tx_resp {
1701 u8 frame_count; /* 1 no aggregation, >1 aggregation */
1702 u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */
1703 u8 failure_rts; /* # failures due to unsuccessful RTS */
1704 u8 failure_frame; /* # failures due to no ACK (unused for agg) */
1705
1706 /* For non-agg: Rate at which frame was successful.
1707 * For agg: Rate at which all frames were transmitted. */
1708 __le32 rate_n_flags; /* RATE_MCS_* */
1709
1710 /* For non-agg: RTS + CTS + frame tx attempts time + ACK.
1711 * For agg: RTS + CTS + aggregation tx time + block-ack time. */
1712 __le16 wireless_media_time; /* uSecs */
1713
1714 __le16 reserved;
1715 __le32 pa_power1; /* RF power amplifier measurement (not used) */
1716 __le32 pa_power2;
1717
1718 /*
1719 * For non-agg: frame status TX_STATUS_*
1720 * For agg: status of 1st frame, AGG_TX_STATE_*; other frame status
1721 * fields follow this one, up to frame_count.
1722 * Bit fields:
1723 * 11- 0: AGG_TX_STATE_* status code
1724 * 15-12: Retry count for 1st frame in aggregation (retries
1725 * occur if tx failed for this frame when it was a
1726 * member of a previous aggregation block). If rate
1727 * scaling is used, retry count indicates the rate
1728 * table entry used for all frames in the new agg.
1729 * 31-16: Sequence # for this frame's Tx cmd (not SSN!)
1730 */
1731 union {
1732 __le32 status;
1733 struct agg_tx_status agg_status[0]; /* for each agg frame */
1734 } u;
1735} __packed;
1736
1737/*
1738 * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command)
1739 *
1740 * Reports Block-Acknowledge from recipient station
1741 */
1742struct iwl_compressed_ba_resp {
1743 __le32 sta_addr_lo32;
1744 __le16 sta_addr_hi16;
1745 __le16 reserved;
1746
1747 /* Index of recipient (BA-sending) station in uCode's station table */
1748 u8 sta_id;
1749 u8 tid;
1750 __le16 seq_ctl;
1751 __le64 bitmap;
1752 __le16 scd_flow;
1753 __le16 scd_ssn;
1754} __packed;
1755
1756/*
1757 * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response)
1758 *
1759 * See details under "TXPOWER" in iwl-4965-hw.h.
1760 */
1761
1762struct iwl3945_txpowertable_cmd {
1763 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
1764 u8 reserved;
1765 __le16 channel;
1766 struct iwl3945_power_per_rate power[IWL_MAX_RATES];
1767} __packed;
1768
1769struct iwl4965_txpowertable_cmd {
1770 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
1771 u8 reserved;
1772 __le16 channel;
1773 struct iwl4965_tx_power_db tx_power;
1774} __packed;
1775
1776
1777/**
1778 * struct iwl3945_rate_scaling_cmd - Rate Scaling Command & Response
1779 *
1780 * REPLY_RATE_SCALE = 0x47 (command, has simple generic response)
1781 *
1782 * NOTE: The table of rates passed to the uCode via the
1783 * RATE_SCALE command sets up the corresponding order of
1784 * rates used for all related commands, including rate
1785 * masks, etc.
1786 *
1787 * For example, if you set 9MB (PLCP 0x0f) as the first
1788 * rate in the rate table, the bit mask for that rate
1789 * when passed through ofdm_basic_rates on the REPLY_RXON
1790 * command would be bit 0 (1 << 0)
1791 */
1792struct iwl3945_rate_scaling_info {
1793 __le16 rate_n_flags;
1794 u8 try_cnt;
1795 u8 next_rate_index;
1796} __packed;
1797
1798struct iwl3945_rate_scaling_cmd {
1799 u8 table_id;
1800 u8 reserved[3];
1801 struct iwl3945_rate_scaling_info table[IWL_MAX_RATES];
1802} __packed;
1803
1804
1805/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
1806#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0)
1807
1808/* # of EDCA prioritized tx fifos */
1809#define LINK_QUAL_AC_NUM AC_NUM
1810
1811/* # entries in rate scale table to support Tx retries */
1812#define LINK_QUAL_MAX_RETRY_NUM 16
1813
1814/* Tx antenna selection values */
1815#define LINK_QUAL_ANT_A_MSK (1 << 0)
1816#define LINK_QUAL_ANT_B_MSK (1 << 1)
1817#define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK)
1818
1819
1820/**
1821 * struct iwl_link_qual_general_params
1822 *
1823 * Used in REPLY_TX_LINK_QUALITY_CMD
1824 */
1825struct iwl_link_qual_general_params {
1826 u8 flags;
1827
1828 /* No entries at or above this (driver chosen) index contain MIMO */
1829 u8 mimo_delimiter;
1830
1831 /* Best single antenna to use for single stream (legacy, SISO). */
1832 u8 single_stream_ant_msk; /* LINK_QUAL_ANT_* */
1833
1834 /* Best antennas to use for MIMO (unused for 4965, assumes both). */
1835 u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */
1836
1837 /*
1838 * If driver needs to use different initial rates for different
1839 * EDCA QOS access categories (as implemented by tx fifos 0-3),
1840 * this table will set that up, by indicating the indexes in the
1841 * rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start.
1842 * Otherwise, driver should set all entries to 0.
1843 *
1844 * Entry usage:
1845 * 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice
1846 * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3.
1847 */
1848 u8 start_rate_index[LINK_QUAL_AC_NUM];
1849} __packed;
1850
1851#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
1852#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
1853#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100)
1854
1855#define LINK_QUAL_AGG_DISABLE_START_DEF (3)
1856#define LINK_QUAL_AGG_DISABLE_START_MAX (255)
1857#define LINK_QUAL_AGG_DISABLE_START_MIN (0)
1858
1859#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (31)
1860#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
1861#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
1862
1863/**
1864 * struct iwl_link_qual_agg_params
1865 *
1866 * Used in REPLY_TX_LINK_QUALITY_CMD
1867 */
1868struct iwl_link_qual_agg_params {
1869
1870 /*
1871 *Maximum number of uSec in aggregation.
1872 * default set to 4000 (4 milliseconds) if not configured in .cfg
1873 */
1874 __le16 agg_time_limit;
1875
1876 /*
1877 * Number of Tx retries allowed for a frame, before that frame will
1878 * no longer be considered for the start of an aggregation sequence
1879 * (scheduler will then try to tx it as single frame).
1880 * Driver should set this to 3.
1881 */
1882 u8 agg_dis_start_th;
1883
1884 /*
1885 * Maximum number of frames in aggregation.
1886 * 0 = no limit (default). 1 = no aggregation.
1887 * Other values = max # frames in aggregation.
1888 */
1889 u8 agg_frame_cnt_limit;
1890
1891 __le32 reserved;
1892} __packed;
1893
1894/*
1895 * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
1896 *
1897 * For 4965 devices only; 3945 uses REPLY_RATE_SCALE.
1898 *
1899 * Each station in the 4965 device's internal station table has its own table
1900 * of 16
1901 * Tx rates and modulation modes (e.g. legacy/SISO/MIMO) for retrying Tx when
1902 * an ACK is not received. This command replaces the entire table for
1903 * one station.
1904 *
1905 * NOTE: Station must already be in 4965 device's station table.
1906 * Use REPLY_ADD_STA.
1907 *
1908 * The rate scaling procedures described below work well. Of course, other
1909 * procedures are possible, and may work better for particular environments.
1910 *
1911 *
1912 * FILLING THE RATE TABLE
1913 *
1914 * Given a particular initial rate and mode, as determined by the rate
1915 * scaling algorithm described below, the Linux driver uses the following
1916 * formula to fill the rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table in the
1917 * Link Quality command:
1918 *
1919 *
1920 * 1) If using High-throughput (HT) (SISO or MIMO) initial rate:
1921 * a) Use this same initial rate for first 3 entries.
1922 * b) Find next lower available rate using same mode (SISO or MIMO),
1923 * use for next 3 entries. If no lower rate available, switch to
1924 * legacy mode (no HT40 channel, no MIMO, no short guard interval).
1925 * c) If using MIMO, set command's mimo_delimiter to number of entries
1926 * using MIMO (3 or 6).
1927 * d) After trying 2 HT rates, switch to legacy mode (no HT40 channel,
1928 * no MIMO, no short guard interval), at the next lower bit rate
1929 * (e.g. if second HT bit rate was 54, try 48 legacy), and follow
1930 * legacy procedure for remaining table entries.
1931 *
1932 * 2) If using legacy initial rate:
1933 * a) Use the initial rate for only one entry.
1934 * b) For each following entry, reduce the rate to next lower available
1935 * rate, until reaching the lowest available rate.
1936 * c) When reducing rate, also switch antenna selection.
1937 * d) Once lowest available rate is reached, repeat this rate until
1938 * rate table is filled (16 entries), switching antenna each entry.
1939 *
1940 *
1941 * ACCUMULATING HISTORY
1942 *
1943 * The rate scaling algorithm for 4965 devices, as implemented in Linux driver,
1944 * uses two sets of frame Tx success history: One for the current/active
1945 * modulation mode, and one for a speculative/search mode that is being
1946 * attempted. If the speculative mode turns out to be more effective (i.e.
1947 * actual transfer rate is better), then the driver continues to use the
1948 * speculative mode as the new current active mode.
1949 *
1950 * Each history set contains, separately for each possible rate, data for a
1951 * sliding window of the 62 most recent tx attempts at that rate. The data
1952 * includes a shifting bitmap of success(1)/failure(0), and sums of successful
1953 * and attempted frames, from which the driver can additionally calculate a
1954 * success ratio (success / attempted) and number of failures
1955 * (attempted - success), and control the size of the window (attempted).
1956 * The driver uses the bit map to remove successes from the success sum, as
1957 * the oldest tx attempts fall out of the window.
1958 *
1959 * When the 4965 device makes multiple tx attempts for a given frame, each
1960 * attempt might be at a different rate, and have different modulation
1961 * characteristics (e.g. antenna, fat channel, short guard interval), as set
1962 * up in the rate scaling table in the Link Quality command. The driver must
1963 * determine which rate table entry was used for each tx attempt, to determine
1964 * which rate-specific history to update, and record only those attempts that
1965 * match the modulation characteristics of the history set.
1966 *
1967 * When using block-ack (aggregation), all frames are transmitted at the same
1968 * rate, since there is no per-attempt acknowledgment from the destination
1969 * station. The Tx response struct iwl_tx_resp indicates the Tx rate in
1970 * rate_n_flags field. After receiving a block-ack, the driver can update
1971 * history for the entire block all at once.
1972 *
1973 *
1974 * FINDING BEST STARTING RATE:
1975 *
1976 * When working with a selected initial modulation mode (see below), the
1977 * driver attempts to find a best initial rate. The initial rate is the
1978 * first entry in the Link Quality command's rate table.
1979 *
1980 * 1) Calculate actual throughput (success ratio * expected throughput, see
1981 * table below) for current initial rate. Do this only if enough frames
1982 * have been attempted to make the value meaningful: at least 6 failed
1983 * tx attempts, or at least 8 successes. If not enough, don't try rate
1984 * scaling yet.
1985 *
1986 * 2) Find available rates adjacent to current initial rate. Available means:
1987 * a) supported by hardware &&
1988 * b) supported by association &&
1989 * c) within any constraints selected by user
1990 *
1991 * 3) Gather measured throughputs for adjacent rates. These might not have
1992 * enough history to calculate a throughput. That's okay, we might try
1993 * using one of them anyway!
1994 *
1995 * 4) Try decreasing rate if, for current rate:
1996 * a) success ratio is < 15% ||
1997 * b) lower adjacent rate has better measured throughput ||
1998 * c) higher adjacent rate has worse throughput, and lower is unmeasured
1999 *
2000 * As a sanity check, if decrease was determined above, leave rate
2001 * unchanged if:
2002 * a) lower rate unavailable
2003 * b) success ratio at current rate > 85% (very good)
2004 * c) current measured throughput is better than expected throughput
2005 * of lower rate (under perfect 100% tx conditions, see table below)
2006 *
2007 * 5) Try increasing rate if, for current rate:
2008 * a) success ratio is < 15% ||
2009 * b) both adjacent rates' throughputs are unmeasured (try it!) ||
2010 * b) higher adjacent rate has better measured throughput ||
2011 * c) lower adjacent rate has worse throughput, and higher is unmeasured
2012 *
2013 * As a sanity check, if increase was determined above, leave rate
2014 * unchanged if:
2015 * a) success ratio at current rate < 70%. This is not particularly
2016 * good performance; higher rate is sure to have poorer success.
2017 *
2018 * 6) Re-evaluate the rate after each tx frame. If working with block-
2019 * acknowledge, history and statistics may be calculated for the entire
2020 * block (including prior history that fits within the history windows),
2021 * before re-evaluation.
2022 *
2023 * FINDING BEST STARTING MODULATION MODE:
2024 *
2025 * After working with a modulation mode for a "while" (and doing rate scaling),
2026 * the driver searches for a new initial mode in an attempt to improve
2027 * throughput. The "while" is measured by numbers of attempted frames:
2028 *
2029 * For legacy mode, search for new mode after:
2030 * 480 successful frames, or 160 failed frames
2031 * For high-throughput modes (SISO or MIMO), search for new mode after:
2032 * 4500 successful frames, or 400 failed frames
2033 *
2034 * Mode switch possibilities are (3 for each mode):
2035 *
2036 * For legacy:
2037 * Change antenna, try SISO (if HT association), try MIMO (if HT association)
2038 * For SISO:
2039 * Change antenna, try MIMO, try shortened guard interval (SGI)
2040 * For MIMO:
2041 * Try SISO antenna A, SISO antenna B, try shortened guard interval (SGI)
2042 *
2043 * When trying a new mode, use the same bit rate as the old/current mode when
2044 * trying antenna switches and shortened guard interval. When switching to
2045 * SISO from MIMO or legacy, or to MIMO from SISO or legacy, use a rate
2046 * for which the expected throughput (under perfect conditions) is about the
2047 * same or slightly better than the actual measured throughput delivered by
2048 * the old/current mode.
2049 *
2050 * Actual throughput can be estimated by multiplying the expected throughput
2051 * by the success ratio (successful / attempted tx frames). Frame size is
2052 * not considered in this calculation; it assumes that frame size will average
2053 * out to be fairly consistent over several samples. The following are
2054 * metric values for expected throughput assuming 100% success ratio.
2055 * Only G band has support for CCK rates:
2056 *
2057 * RATE: 1 2 5 11 6 9 12 18 24 36 48 54 60
2058 *
2059 * G: 7 13 35 58 40 57 72 98 121 154 177 186 186
2060 * A: 0 0 0 0 40 57 72 98 121 154 177 186 186
2061 * SISO 20MHz: 0 0 0 0 42 42 76 102 124 159 183 193 202
2062 * SGI SISO 20MHz: 0 0 0 0 46 46 82 110 132 168 192 202 211
2063 * MIMO 20MHz: 0 0 0 0 74 74 123 155 179 214 236 244 251
2064 * SGI MIMO 20MHz: 0 0 0 0 81 81 131 164 188 222 243 251 257
2065 * SISO 40MHz: 0 0 0 0 77 77 127 160 184 220 242 250 257
2066 * SGI SISO 40MHz: 0 0 0 0 83 83 135 169 193 229 250 257 264
2067 * MIMO 40MHz: 0 0 0 0 123 123 182 214 235 264 279 285 289
2068 * SGI MIMO 40MHz: 0 0 0 0 131 131 191 222 242 270 284 289 293
2069 *
2070 * After the new mode has been tried for a short while (minimum of 6 failed
2071 * frames or 8 successful frames), compare success ratio and actual throughput
2072 * estimate of the new mode with the old. If either is better with the new
2073 * mode, continue to use the new mode.
2074 *
2075 * Continue comparing modes until all 3 possibilities have been tried.
2076 * If moving from legacy to HT, try all 3 possibilities from the new HT
2077 * mode. After trying all 3, a best mode is found. Continue to use this mode
2078 * for the longer "while" described above (e.g. 480 successful frames for
2079 * legacy), and then repeat the search process.
2080 *
2081 */
2082struct iwl_link_quality_cmd {
2083
2084 /* Index of destination/recipient station in uCode's station table */
2085 u8 sta_id;
2086 u8 reserved1;
2087 __le16 control; /* not used */
2088 struct iwl_link_qual_general_params general_params;
2089 struct iwl_link_qual_agg_params agg_params;
2090
2091 /*
2092 * Rate info; when using rate-scaling, Tx command's initial_rate_index
2093 * specifies 1st Tx rate attempted, via index into this table.
2094 * 4965 devices works its way through table when retrying Tx.
2095 */
2096 struct {
2097 __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */
2098 } rs_table[LINK_QUAL_MAX_RETRY_NUM];
2099 __le32 reserved2;
2100} __packed;
2101
2102/*
2103 * BT configuration enable flags:
2104 * bit 0 - 1: BT channel announcement enabled
2105 * 0: disable
2106 * bit 1 - 1: priority of BT device enabled
2107 * 0: disable
2108 */
2109#define BT_COEX_DISABLE (0x0)
2110#define BT_ENABLE_CHANNEL_ANNOUNCE BIT(0)
2111#define BT_ENABLE_PRIORITY BIT(1)
2112
2113#define BT_COEX_ENABLE (BT_ENABLE_CHANNEL_ANNOUNCE | BT_ENABLE_PRIORITY)
2114
2115#define BT_LEAD_TIME_DEF (0x1E)
2116
2117#define BT_MAX_KILL_DEF (0x5)
2118
2119/*
2120 * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
2121 *
2122 * 3945 and 4965 devices support hardware handshake with Bluetooth device on
2123 * same platform. Bluetooth device alerts wireless device when it will Tx;
2124 * wireless device can delay or kill its own Tx to accommodate.
2125 */
2126struct iwl_bt_cmd {
2127 u8 flags;
2128 u8 lead_time;
2129 u8 max_kill;
2130 u8 reserved;
2131 __le32 kill_ack_mask;
2132 __le32 kill_cts_mask;
2133} __packed;
2134
2135
2136/******************************************************************************
2137 * (6)
2138 * Spectrum Management (802.11h) Commands, Responses, Notifications:
2139 *
2140 *****************************************************************************/
2141
2142/*
2143 * Spectrum Management
2144 */
2145#define MEASUREMENT_FILTER_FLAG (RXON_FILTER_PROMISC_MSK | \
2146 RXON_FILTER_CTL2HOST_MSK | \
2147 RXON_FILTER_ACCEPT_GRP_MSK | \
2148 RXON_FILTER_DIS_DECRYPT_MSK | \
2149 RXON_FILTER_DIS_GRP_DECRYPT_MSK | \
2150 RXON_FILTER_ASSOC_MSK | \
2151 RXON_FILTER_BCON_AWARE_MSK)
2152
2153struct iwl_measure_channel {
2154 __le32 duration; /* measurement duration in extended beacon
2155 * format */
2156 u8 channel; /* channel to measure */
2157 u8 type; /* see enum iwl_measure_type */
2158 __le16 reserved;
2159} __packed;
2160
2161/*
2162 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command)
2163 */
2164struct iwl_spectrum_cmd {
2165 __le16 len; /* number of bytes starting from token */
2166 u8 token; /* token id */
2167 u8 id; /* measurement id -- 0 or 1 */
2168 u8 origin; /* 0 = TGh, 1 = other, 2 = TGk */
2169 u8 periodic; /* 1 = periodic */
2170 __le16 path_loss_timeout;
2171 __le32 start_time; /* start time in extended beacon format */
2172 __le32 reserved2;
2173 __le32 flags; /* rxon flags */
2174 __le32 filter_flags; /* rxon filter flags */
2175 __le16 channel_count; /* minimum 1, maximum 10 */
2176 __le16 reserved3;
2177 struct iwl_measure_channel channels[10];
2178} __packed;
2179
2180/*
2181 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response)
2182 */
2183struct iwl_spectrum_resp {
2184 u8 token;
2185 u8 id; /* id of the prior command replaced, or 0xff */
2186 __le16 status; /* 0 - command will be handled
2187 * 1 - cannot handle (conflicts with another
2188 * measurement) */
2189} __packed;
2190
2191enum iwl_measurement_state {
2192 IWL_MEASUREMENT_START = 0,
2193 IWL_MEASUREMENT_STOP = 1,
2194};
2195
2196enum iwl_measurement_status {
2197 IWL_MEASUREMENT_OK = 0,
2198 IWL_MEASUREMENT_CONCURRENT = 1,
2199 IWL_MEASUREMENT_CSA_CONFLICT = 2,
2200 IWL_MEASUREMENT_TGH_CONFLICT = 3,
2201 /* 4-5 reserved */
2202 IWL_MEASUREMENT_STOPPED = 6,
2203 IWL_MEASUREMENT_TIMEOUT = 7,
2204 IWL_MEASUREMENT_PERIODIC_FAILED = 8,
2205};
2206
2207#define NUM_ELEMENTS_IN_HISTOGRAM 8
2208
2209struct iwl_measurement_histogram {
2210 __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */
2211 __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */
2212} __packed;
2213
2214/* clear channel availability counters */
2215struct iwl_measurement_cca_counters {
2216 __le32 ofdm;
2217 __le32 cck;
2218} __packed;
2219
2220enum iwl_measure_type {
2221 IWL_MEASURE_BASIC = (1 << 0),
2222 IWL_MEASURE_CHANNEL_LOAD = (1 << 1),
2223 IWL_MEASURE_HISTOGRAM_RPI = (1 << 2),
2224 IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
2225 IWL_MEASURE_FRAME = (1 << 4),
2226 /* bits 5:6 are reserved */
2227 IWL_MEASURE_IDLE = (1 << 7),
2228};
2229
2230/*
2231 * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command)
2232 */
2233struct iwl_spectrum_notification {
2234 u8 id; /* measurement id -- 0 or 1 */
2235 u8 token;
2236 u8 channel_index; /* index in measurement channel list */
2237 u8 state; /* 0 - start, 1 - stop */
2238 __le32 start_time; /* lower 32-bits of TSF */
2239 u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */
2240 u8 channel;
2241 u8 type; /* see enum iwl_measurement_type */
2242 u8 reserved1;
2243 /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only
2244 * valid if applicable for measurement type requested. */
2245 __le32 cca_ofdm; /* cca fraction time in 40Mhz clock periods */
2246 __le32 cca_cck; /* cca fraction time in 44Mhz clock periods */
2247 __le32 cca_time; /* channel load time in usecs */
2248 u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 -
2249 * unidentified */
2250 u8 reserved2[3];
2251 struct iwl_measurement_histogram histogram;
2252 __le32 stop_time; /* lower 32-bits of TSF */
2253 __le32 status; /* see iwl_measurement_status */
2254} __packed;
2255
2256/******************************************************************************
2257 * (7)
2258 * Power Management Commands, Responses, Notifications:
2259 *
2260 *****************************************************************************/
2261
2262/**
2263 * struct iwl_powertable_cmd - Power Table Command
2264 * @flags: See below:
2265 *
2266 * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
2267 *
2268 * PM allow:
2269 * bit 0 - '0' Driver not allow power management
2270 * '1' Driver allow PM (use rest of parameters)
2271 *
2272 * uCode send sleep notifications:
2273 * bit 1 - '0' Don't send sleep notification
2274 * '1' send sleep notification (SEND_PM_NOTIFICATION)
2275 *
2276 * Sleep over DTIM
2277 * bit 2 - '0' PM have to walk up every DTIM
2278 * '1' PM could sleep over DTIM till listen Interval.
2279 *
2280 * PCI power managed
2281 * bit 3 - '0' (PCI_CFG_LINK_CTRL & 0x1)
2282 * '1' !(PCI_CFG_LINK_CTRL & 0x1)
2283 *
2284 * Fast PD
2285 * bit 4 - '1' Put radio to sleep when receiving frame for others
2286 *
2287 * Force sleep Modes
2288 * bit 31/30- '00' use both mac/xtal sleeps
2289 * '01' force Mac sleep
2290 * '10' force xtal sleep
2291 * '11' Illegal set
2292 *
2293 * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then
2294 * ucode assume sleep over DTIM is allowed and we don't need to wake up
2295 * for every DTIM.
2296 */
2297#define IWL_POWER_VEC_SIZE 5
2298
2299#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
2300#define IWL_POWER_POWER_SAVE_ENA_MSK cpu_to_le16(BIT(0))
2301#define IWL_POWER_POWER_MANAGEMENT_ENA_MSK cpu_to_le16(BIT(1))
2302#define IWL_POWER_SLEEP_OVER_DTIM_MSK cpu_to_le16(BIT(2))
2303#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3))
2304#define IWL_POWER_FAST_PD cpu_to_le16(BIT(4))
2305#define IWL_POWER_BEACON_FILTERING cpu_to_le16(BIT(5))
2306#define IWL_POWER_SHADOW_REG_ENA cpu_to_le16(BIT(6))
2307#define IWL_POWER_CT_KILL_SET cpu_to_le16(BIT(7))
2308
2309struct iwl3945_powertable_cmd {
2310 __le16 flags;
2311 u8 reserved[2];
2312 __le32 rx_data_timeout;
2313 __le32 tx_data_timeout;
2314 __le32 sleep_interval[IWL_POWER_VEC_SIZE];
2315} __packed;
2316
2317struct iwl_powertable_cmd {
2318 __le16 flags;
2319 u8 keep_alive_seconds; /* 3945 reserved */
2320 u8 debug_flags; /* 3945 reserved */
2321 __le32 rx_data_timeout;
2322 __le32 tx_data_timeout;
2323 __le32 sleep_interval[IWL_POWER_VEC_SIZE];
2324 __le32 keep_alive_beacons;
2325} __packed;
2326
2327/*
2328 * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command)
2329 * all devices identical.
2330 */
2331struct iwl_sleep_notification {
2332 u8 pm_sleep_mode;
2333 u8 pm_wakeup_src;
2334 __le16 reserved;
2335 __le32 sleep_time;
2336 __le32 tsf_low;
2337 __le32 bcon_timer;
2338} __packed;
2339
2340/* Sleep states. all devices identical. */
2341enum {
2342 IWL_PM_NO_SLEEP = 0,
2343 IWL_PM_SLP_MAC = 1,
2344 IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
2345 IWL_PM_SLP_FULL_MAC_CARD_STATE = 3,
2346 IWL_PM_SLP_PHY = 4,
2347 IWL_PM_SLP_REPENT = 5,
2348 IWL_PM_WAKEUP_BY_TIMER = 6,
2349 IWL_PM_WAKEUP_BY_DRIVER = 7,
2350 IWL_PM_WAKEUP_BY_RFKILL = 8,
2351 /* 3 reserved */
2352 IWL_PM_NUM_OF_MODES = 12,
2353};
2354
2355/*
2356 * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command)
2357 */
2358struct iwl_card_state_notif {
2359 __le32 flags;
2360} __packed;
2361
2362#define HW_CARD_DISABLED 0x01
2363#define SW_CARD_DISABLED 0x02
2364#define CT_CARD_DISABLED 0x04
2365#define RXON_CARD_DISABLED 0x10
2366
2367struct iwl_ct_kill_config {
2368 __le32 reserved;
2369 __le32 critical_temperature_M;
2370 __le32 critical_temperature_R;
2371} __packed;
2372
2373/******************************************************************************
2374 * (8)
2375 * Scan Commands, Responses, Notifications:
2376 *
2377 *****************************************************************************/
2378
2379#define SCAN_CHANNEL_TYPE_PASSIVE cpu_to_le32(0)
2380#define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1)
2381
2382/**
2383 * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table
2384 *
2385 * One for each channel in the scan list.
2386 * Each channel can independently select:
2387 * 1) SSID for directed active scans
2388 * 2) Txpower setting (for rate specified within Tx command)
2389 * 3) How long to stay on-channel (behavior may be modified by quiet_time,
2390 * quiet_plcp_th, good_CRC_th)
2391 *
2392 * To avoid uCode errors, make sure the following are true (see comments
2393 * under struct iwl_scan_cmd about max_out_time and quiet_time):
2394 * 1) If using passive_dwell (i.e. passive_dwell != 0):
2395 * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
2396 * 2) quiet_time <= active_dwell
2397 * 3) If restricting off-channel time (i.e. max_out_time !=0):
2398 * passive_dwell < max_out_time
2399 * active_dwell < max_out_time
2400 */
2401struct iwl3945_scan_channel {
2402 /*
2403 * type is defined as:
2404 * 0:0 1 = active, 0 = passive
2405 * 1:4 SSID direct bit map; if a bit is set, then corresponding
2406 * SSID IE is transmitted in probe request.
2407 * 5:7 reserved
2408 */
2409 u8 type;
2410 u8 channel; /* band is selected by iwl3945_scan_cmd "flags" field */
2411 struct iwl3945_tx_power tpc;
2412 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
2413 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
2414} __packed;
2415
2416/* set number of direct probes u8 type */
2417#define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
2418
2419struct iwl_scan_channel {
2420 /*
2421 * type is defined as:
2422 * 0:0 1 = active, 0 = passive
2423 * 1:20 SSID direct bit map; if a bit is set, then corresponding
2424 * SSID IE is transmitted in probe request.
2425 * 21:31 reserved
2426 */
2427 __le32 type;
2428 __le16 channel; /* band is selected by iwl_scan_cmd "flags" field */
2429 u8 tx_gain; /* gain for analog radio */
2430 u8 dsp_atten; /* gain for DSP */
2431 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
2432 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
2433} __packed;
2434
2435/* set number of direct probes __le32 type */
2436#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
2437
2438/**
2439 * struct iwl_ssid_ie - directed scan network information element
2440 *
2441 * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in
2442 * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel;
2443 * each channel may select different ssids from among the 20 (4) entries.
2444 * SSID IEs get transmitted in reverse order of entry.
2445 */
2446struct iwl_ssid_ie {
2447 u8 id;
2448 u8 len;
2449 u8 ssid[32];
2450} __packed;
2451
2452#define PROBE_OPTION_MAX_3945 4
2453#define PROBE_OPTION_MAX 20
2454#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
2455#define IWL_GOOD_CRC_TH_DISABLED 0
2456#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
2457#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
2458#define IWL_MAX_SCAN_SIZE 1024
2459#define IWL_MAX_CMD_SIZE 4096
2460
2461/*
2462 * REPLY_SCAN_CMD = 0x80 (command)
2463 *
2464 * The hardware scan command is very powerful; the driver can set it up to
2465 * maintain (relatively) normal network traffic while doing a scan in the
2466 * background. The max_out_time and suspend_time control the ratio of how
2467 * long the device stays on an associated network channel ("service channel")
2468 * vs. how long it's away from the service channel, i.e. tuned to other channels
2469 * for scanning.
2470 *
2471 * max_out_time is the max time off-channel (in usec), and suspend_time
2472 * is how long (in "extended beacon" format) that the scan is "suspended"
2473 * after returning to the service channel. That is, suspend_time is the
2474 * time that we stay on the service channel, doing normal work, between
2475 * scan segments. The driver may set these parameters differently to support
2476 * scanning when associated vs. not associated, and light vs. heavy traffic
2477 * loads when associated.
2478 *
2479 * After receiving this command, the device's scan engine does the following;
2480 *
2481 * 1) Sends SCAN_START notification to driver
2482 * 2) Checks to see if it has time to do scan for one channel
2483 * 3) Sends NULL packet, with power-save (PS) bit set to 1,
2484 * to tell AP that we're going off-channel
2485 * 4) Tunes to first channel in scan list, does active or passive scan
2486 * 5) Sends SCAN_RESULT notification to driver
2487 * 6) Checks to see if it has time to do scan on *next* channel in list
2488 * 7) Repeats 4-6 until it no longer has time to scan the next channel
2489 * before max_out_time expires
2490 * 8) Returns to service channel
2491 * 9) Sends NULL packet with PS=0 to tell AP that we're back
2492 * 10) Stays on service channel until suspend_time expires
2493 * 11) Repeats entire process 2-10 until list is complete
2494 * 12) Sends SCAN_COMPLETE notification
2495 *
2496 * For fast, efficient scans, the scan command also has support for staying on
2497 * a channel for just a short time, if doing active scanning and getting no
2498 * responses to the transmitted probe request. This time is controlled by
2499 * quiet_time, and the number of received packets below which a channel is
2500 * considered "quiet" is controlled by quiet_plcp_threshold.
2501 *
2502 * For active scanning on channels that have regulatory restrictions against
2503 * blindly transmitting, the scan can listen before transmitting, to make sure
2504 * that there is already legitimate activity on the channel. If enough
2505 * packets are cleanly received on the channel (controlled by good_CRC_th,
2506 * typical value 1), the scan engine starts transmitting probe requests.
2507 *
2508 * Driver must use separate scan commands for 2.4 vs. 5 GHz bands.
2509 *
2510 * To avoid uCode errors, see timing restrictions described under
2511 * struct iwl_scan_channel.
2512 */
2513
2514struct iwl3945_scan_cmd {
2515 __le16 len;
2516 u8 reserved0;
2517 u8 channel_count; /* # channels in channel list */
2518 __le16 quiet_time; /* dwell only this # millisecs on quiet channel
2519 * (only for active scan) */
2520 __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */
2521 __le16 good_CRC_th; /* passive -> active promotion threshold */
2522 __le16 reserved1;
2523 __le32 max_out_time; /* max usec to be away from associated (service)
2524 * channel */
2525 __le32 suspend_time; /* pause scan this long (in "extended beacon
2526 * format") when returning to service channel:
2527 * 3945; 31:24 # beacons, 19:0 additional usec,
2528 * 4965; 31:22 # beacons, 21:0 additional usec.
2529 */
2530 __le32 flags; /* RXON_FLG_* */
2531 __le32 filter_flags; /* RXON_FILTER_* */
2532
2533 /* For active scans (set to all-0s for passive scans).
2534 * Does not include payload. Must specify Tx rate; no rate scaling. */
2535 struct iwl3945_tx_cmd tx_cmd;
2536
2537 /* For directed active scans (set to all-0s otherwise) */
2538 struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX_3945];
2539
2540 /*
2541 * Probe request frame, followed by channel list.
2542 *
2543 * Size of probe request frame is specified by byte count in tx_cmd.
2544 * Channel list follows immediately after probe request frame.
2545 * Number of channels in list is specified by channel_count.
2546 * Each channel in list is of type:
2547 *
2548 * struct iwl3945_scan_channel channels[0];
2549 *
2550 * NOTE: Only one band of channels can be scanned per pass. You
2551 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
2552 * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
2553 * before requesting another scan.
2554 */
2555 u8 data[0];
2556} __packed;
2557
2558struct iwl_scan_cmd {
2559 __le16 len;
2560 u8 reserved0;
2561 u8 channel_count; /* # channels in channel list */
2562 __le16 quiet_time; /* dwell only this # millisecs on quiet channel
2563 * (only for active scan) */
2564 __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */
2565 __le16 good_CRC_th; /* passive -> active promotion threshold */
2566 __le16 rx_chain; /* RXON_RX_CHAIN_* */
2567 __le32 max_out_time; /* max usec to be away from associated (service)
2568 * channel */
2569 __le32 suspend_time; /* pause scan this long (in "extended beacon
2570 * format") when returning to service chnl:
2571 * 3945; 31:24 # beacons, 19:0 additional usec,
2572 * 4965; 31:22 # beacons, 21:0 additional usec.
2573 */
2574 __le32 flags; /* RXON_FLG_* */
2575 __le32 filter_flags; /* RXON_FILTER_* */
2576
2577 /* For active scans (set to all-0s for passive scans).
2578 * Does not include payload. Must specify Tx rate; no rate scaling. */
2579 struct iwl_tx_cmd tx_cmd;
2580
2581 /* For directed active scans (set to all-0s otherwise) */
2582 struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
2583
2584 /*
2585 * Probe request frame, followed by channel list.
2586 *
2587 * Size of probe request frame is specified by byte count in tx_cmd.
2588 * Channel list follows immediately after probe request frame.
2589 * Number of channels in list is specified by channel_count.
2590 * Each channel in list is of type:
2591 *
2592 * struct iwl_scan_channel channels[0];
2593 *
2594 * NOTE: Only one band of channels can be scanned per pass. You
2595 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
2596 * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
2597 * before requesting another scan.
2598 */
2599 u8 data[0];
2600} __packed;
2601
2602/* Can abort will notify by complete notification with abort status. */
2603#define CAN_ABORT_STATUS cpu_to_le32(0x1)
2604/* complete notification statuses */
2605#define ABORT_STATUS 0x2
2606
2607/*
2608 * REPLY_SCAN_CMD = 0x80 (response)
2609 */
2610struct iwl_scanreq_notification {
2611 __le32 status; /* 1: okay, 2: cannot fulfill request */
2612} __packed;
2613
2614/*
2615 * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command)
2616 */
2617struct iwl_scanstart_notification {
2618 __le32 tsf_low;
2619 __le32 tsf_high;
2620 __le32 beacon_timer;
2621 u8 channel;
2622 u8 band;
2623 u8 reserved[2];
2624 __le32 status;
2625} __packed;
2626
2627#define SCAN_OWNER_STATUS 0x1;
2628#define MEASURE_OWNER_STATUS 0x2;
2629
2630#define IWL_PROBE_STATUS_OK 0
2631#define IWL_PROBE_STATUS_TX_FAILED BIT(0)
2632/* error statuses combined with TX_FAILED */
2633#define IWL_PROBE_STATUS_FAIL_TTL BIT(1)
2634#define IWL_PROBE_STATUS_FAIL_BT BIT(2)
2635
2636#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */
2637/*
2638 * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command)
2639 */
2640struct iwl_scanresults_notification {
2641 u8 channel;
2642 u8 band;
2643 u8 probe_status;
2644 u8 num_probe_not_sent; /* not enough time to send */
2645 __le32 tsf_low;
2646 __le32 tsf_high;
2647 __le32 statistics[NUMBER_OF_STATISTICS];
2648} __packed;
2649
2650/*
2651 * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command)
2652 */
2653struct iwl_scancomplete_notification {
2654 u8 scanned_channels;
2655 u8 status;
2656 u8 last_channel;
2657 __le32 tsf_low;
2658 __le32 tsf_high;
2659} __packed;
2660
2661
2662/******************************************************************************
2663 * (9)
2664 * IBSS/AP Commands and Notifications:
2665 *
2666 *****************************************************************************/
2667
2668enum iwl_ibss_manager {
2669 IWL_NOT_IBSS_MANAGER = 0,
2670 IWL_IBSS_MANAGER = 1,
2671};
2672
2673/*
2674 * BEACON_NOTIFICATION = 0x90 (notification only, not a command)
2675 */
2676
2677struct iwl3945_beacon_notif {
2678 struct iwl3945_tx_resp beacon_notify_hdr;
2679 __le32 low_tsf;
2680 __le32 high_tsf;
2681 __le32 ibss_mgr_status;
2682} __packed;
2683
2684struct iwl4965_beacon_notif {
2685 struct iwl4965_tx_resp beacon_notify_hdr;
2686 __le32 low_tsf;
2687 __le32 high_tsf;
2688 __le32 ibss_mgr_status;
2689} __packed;
2690
2691/*
2692 * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
2693 */
2694
2695struct iwl3945_tx_beacon_cmd {
2696 struct iwl3945_tx_cmd tx;
2697 __le16 tim_idx;
2698 u8 tim_size;
2699 u8 reserved1;
2700 struct ieee80211_hdr frame[0]; /* beacon frame */
2701} __packed;
2702
2703struct iwl_tx_beacon_cmd {
2704 struct iwl_tx_cmd tx;
2705 __le16 tim_idx;
2706 u8 tim_size;
2707 u8 reserved1;
2708 struct ieee80211_hdr frame[0]; /* beacon frame */
2709} __packed;
2710
2711/******************************************************************************
2712 * (10)
2713 * Statistics Commands and Notifications:
2714 *
2715 *****************************************************************************/
2716
2717#define IWL_TEMP_CONVERT 260
2718
2719#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
2720#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
2721#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
2722
2723/* Used for passing to driver number of successes and failures per rate */
2724struct rate_histogram {
2725 union {
2726 __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
2727 __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
2728 __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
2729 } success;
2730 union {
2731 __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
2732 __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
2733 __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
2734 } failed;
2735} __packed;
2736
2737/* statistics command response */
2738
2739struct iwl39_statistics_rx_phy {
2740 __le32 ina_cnt;
2741 __le32 fina_cnt;
2742 __le32 plcp_err;
2743 __le32 crc32_err;
2744 __le32 overrun_err;
2745 __le32 early_overrun_err;
2746 __le32 crc32_good;
2747 __le32 false_alarm_cnt;
2748 __le32 fina_sync_err_cnt;
2749 __le32 sfd_timeout;
2750 __le32 fina_timeout;
2751 __le32 unresponded_rts;
2752 __le32 rxe_frame_limit_overrun;
2753 __le32 sent_ack_cnt;
2754 __le32 sent_cts_cnt;
2755} __packed;
2756
2757struct iwl39_statistics_rx_non_phy {
2758 __le32 bogus_cts; /* CTS received when not expecting CTS */
2759 __le32 bogus_ack; /* ACK received when not expecting ACK */
2760 __le32 non_bssid_frames; /* number of frames with BSSID that
2761 * doesn't belong to the STA BSSID */
2762 __le32 filtered_frames; /* count frames that were dumped in the
2763 * filtering process */
2764 __le32 non_channel_beacons; /* beacons with our bss id but not on
2765 * our serving channel */
2766} __packed;
2767
2768struct iwl39_statistics_rx {
2769 struct iwl39_statistics_rx_phy ofdm;
2770 struct iwl39_statistics_rx_phy cck;
2771 struct iwl39_statistics_rx_non_phy general;
2772} __packed;
2773
2774struct iwl39_statistics_tx {
2775 __le32 preamble_cnt;
2776 __le32 rx_detected_cnt;
2777 __le32 bt_prio_defer_cnt;
2778 __le32 bt_prio_kill_cnt;
2779 __le32 few_bytes_cnt;
2780 __le32 cts_timeout;
2781 __le32 ack_timeout;
2782 __le32 expected_ack_cnt;
2783 __le32 actual_ack_cnt;
2784} __packed;
2785
2786struct statistics_dbg {
2787 __le32 burst_check;
2788 __le32 burst_count;
2789 __le32 wait_for_silence_timeout_cnt;
2790 __le32 reserved[3];
2791} __packed;
2792
2793struct iwl39_statistics_div {
2794 __le32 tx_on_a;
2795 __le32 tx_on_b;
2796 __le32 exec_time;
2797 __le32 probe_time;
2798} __packed;
2799
2800struct iwl39_statistics_general {
2801 __le32 temperature;
2802 struct statistics_dbg dbg;
2803 __le32 sleep_time;
2804 __le32 slots_out;
2805 __le32 slots_idle;
2806 __le32 ttl_timestamp;
2807 struct iwl39_statistics_div div;
2808} __packed;
2809
2810struct statistics_rx_phy {
2811 __le32 ina_cnt;
2812 __le32 fina_cnt;
2813 __le32 plcp_err;
2814 __le32 crc32_err;
2815 __le32 overrun_err;
2816 __le32 early_overrun_err;
2817 __le32 crc32_good;
2818 __le32 false_alarm_cnt;
2819 __le32 fina_sync_err_cnt;
2820 __le32 sfd_timeout;
2821 __le32 fina_timeout;
2822 __le32 unresponded_rts;
2823 __le32 rxe_frame_limit_overrun;
2824 __le32 sent_ack_cnt;
2825 __le32 sent_cts_cnt;
2826 __le32 sent_ba_rsp_cnt;
2827 __le32 dsp_self_kill;
2828 __le32 mh_format_err;
2829 __le32 re_acq_main_rssi_sum;
2830 __le32 reserved3;
2831} __packed;
2832
2833struct statistics_rx_ht_phy {
2834 __le32 plcp_err;
2835 __le32 overrun_err;
2836 __le32 early_overrun_err;
2837 __le32 crc32_good;
2838 __le32 crc32_err;
2839 __le32 mh_format_err;
2840 __le32 agg_crc32_good;
2841 __le32 agg_mpdu_cnt;
2842 __le32 agg_cnt;
2843 __le32 unsupport_mcs;
2844} __packed;
2845
2846#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1)
2847
2848struct statistics_rx_non_phy {
2849 __le32 bogus_cts; /* CTS received when not expecting CTS */
2850 __le32 bogus_ack; /* ACK received when not expecting ACK */
2851 __le32 non_bssid_frames; /* number of frames with BSSID that
2852 * doesn't belong to the STA BSSID */
2853 __le32 filtered_frames; /* count frames that were dumped in the
2854 * filtering process */
2855 __le32 non_channel_beacons; /* beacons with our bss id but not on
2856 * our serving channel */
2857 __le32 channel_beacons; /* beacons with our bss id and in our
2858 * serving channel */
2859 __le32 num_missed_bcon; /* number of missed beacons */
2860 __le32 adc_rx_saturation_time; /* count in 0.8us units the time the
2861 * ADC was in saturation */
2862 __le32 ina_detection_search_time;/* total time (in 0.8us) searched
2863 * for INA */
2864 __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */
2865 __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */
2866 __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */
2867 __le32 interference_data_flag; /* flag for interference data
2868 * availability. 1 when data is
2869 * available. */
2870 __le32 channel_load; /* counts RX Enable time in uSec */
2871 __le32 dsp_false_alarms; /* DSP false alarm (both OFDM
2872 * and CCK) counter */
2873 __le32 beacon_rssi_a;
2874 __le32 beacon_rssi_b;
2875 __le32 beacon_rssi_c;
2876 __le32 beacon_energy_a;
2877 __le32 beacon_energy_b;
2878 __le32 beacon_energy_c;
2879} __packed;
2880
2881struct statistics_rx {
2882 struct statistics_rx_phy ofdm;
2883 struct statistics_rx_phy cck;
2884 struct statistics_rx_non_phy general;
2885 struct statistics_rx_ht_phy ofdm_ht;
2886} __packed;
2887
2888/**
2889 * struct statistics_tx_power - current tx power
2890 *
2891 * @ant_a: current tx power on chain a in 1/2 dB step
2892 * @ant_b: current tx power on chain b in 1/2 dB step
2893 * @ant_c: current tx power on chain c in 1/2 dB step
2894 */
2895struct statistics_tx_power {
2896 u8 ant_a;
2897 u8 ant_b;
2898 u8 ant_c;
2899 u8 reserved;
2900} __packed;
2901
2902struct statistics_tx_non_phy_agg {
2903 __le32 ba_timeout;
2904 __le32 ba_reschedule_frames;
2905 __le32 scd_query_agg_frame_cnt;
2906 __le32 scd_query_no_agg;
2907 __le32 scd_query_agg;
2908 __le32 scd_query_mismatch;
2909 __le32 frame_not_ready;
2910 __le32 underrun;
2911 __le32 bt_prio_kill;
2912 __le32 rx_ba_rsp_cnt;
2913} __packed;
2914
2915struct statistics_tx {
2916 __le32 preamble_cnt;
2917 __le32 rx_detected_cnt;
2918 __le32 bt_prio_defer_cnt;
2919 __le32 bt_prio_kill_cnt;
2920 __le32 few_bytes_cnt;
2921 __le32 cts_timeout;
2922 __le32 ack_timeout;
2923 __le32 expected_ack_cnt;
2924 __le32 actual_ack_cnt;
2925 __le32 dump_msdu_cnt;
2926 __le32 burst_abort_next_frame_mismatch_cnt;
2927 __le32 burst_abort_missing_next_frame_cnt;
2928 __le32 cts_timeout_collision;
2929 __le32 ack_or_ba_timeout_collision;
2930 struct statistics_tx_non_phy_agg agg;
2931
2932 __le32 reserved1;
2933} __packed;
2934
2935
2936struct statistics_div {
2937 __le32 tx_on_a;
2938 __le32 tx_on_b;
2939 __le32 exec_time;
2940 __le32 probe_time;
2941 __le32 reserved1;
2942 __le32 reserved2;
2943} __packed;
2944
2945struct statistics_general_common {
2946 __le32 temperature; /* radio temperature */
2947 struct statistics_dbg dbg;
2948 __le32 sleep_time;
2949 __le32 slots_out;
2950 __le32 slots_idle;
2951 __le32 ttl_timestamp;
2952 struct statistics_div div;
2953 __le32 rx_enable_counter;
2954 /*
2955 * num_of_sos_states:
2956 * count the number of times we have to re-tune
2957 * in order to get out of bad PHY status
2958 */
2959 __le32 num_of_sos_states;
2960} __packed;
2961
2962struct statistics_general {
2963 struct statistics_general_common common;
2964 __le32 reserved2;
2965 __le32 reserved3;
2966} __packed;
2967
2968#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0)
2969#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1)
2970#define UCODE_STATISTICS_NARROW_BAND_MSK (0x1 << 2)
2971
2972/*
2973 * REPLY_STATISTICS_CMD = 0x9c,
2974 * all devices identical.
2975 *
2976 * This command triggers an immediate response containing uCode statistics.
2977 * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below.
2978 *
2979 * If the CLEAR_STATS configuration flag is set, uCode will clear its
2980 * internal copy of the statistics (counters) after issuing the response.
2981 * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below).
2982 *
2983 * If the DISABLE_NOTIF configuration flag is set, uCode will not issue
2984 * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag
2985 * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself.
2986 */
2987#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */
2988#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */
2989struct iwl_statistics_cmd {
2990 __le32 configuration_flags; /* IWL_STATS_CONF_* */
2991} __packed;
2992
2993/*
2994 * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
2995 *
2996 * By default, uCode issues this notification after receiving a beacon
2997 * while associated. To disable this behavior, set DISABLE_NOTIF flag in the
2998 * REPLY_STATISTICS_CMD 0x9c, above.
2999 *
3000 * Statistics counters continue to increment beacon after beacon, but are
3001 * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
3002 * 0x9c with CLEAR_STATS bit set (see above).
3003 *
3004 * uCode also issues this notification during scans. uCode clears statistics
3005 * appropriately so that each notification contains statistics for only the
3006 * one channel that has just been scanned.
3007 */
3008#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2)
3009#define STATISTICS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8)
3010
3011struct iwl3945_notif_statistics {
3012 __le32 flag;
3013 struct iwl39_statistics_rx rx;
3014 struct iwl39_statistics_tx tx;
3015 struct iwl39_statistics_general general;
3016} __packed;
3017
3018struct iwl_notif_statistics {
3019 __le32 flag;
3020 struct statistics_rx rx;
3021 struct statistics_tx tx;
3022 struct statistics_general general;
3023} __packed;
3024
3025/*
3026 * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command)
3027 *
3028 * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed
3029 * in regardless of how many missed beacons, which mean when driver receive the
3030 * notification, inside the command, it can find all the beacons information
3031 * which include number of total missed beacons, number of consecutive missed
3032 * beacons, number of beacons received and number of beacons expected to
3033 * receive.
3034 *
3035 * If uCode detected consecutive_missed_beacons > 5, it will reset the radio
3036 * in order to bring the radio/PHY back to working state; which has no relation
3037 * to when driver will perform sensitivity calibration.
3038 *
3039 * Driver should set it own missed_beacon_threshold to decide when to perform
3040 * sensitivity calibration based on number of consecutive missed beacons in
3041 * order to improve overall performance, especially in noisy environment.
3042 *
3043 */
3044
3045#define IWL_MISSED_BEACON_THRESHOLD_MIN (1)
3046#define IWL_MISSED_BEACON_THRESHOLD_DEF (5)
3047#define IWL_MISSED_BEACON_THRESHOLD_MAX IWL_MISSED_BEACON_THRESHOLD_DEF
3048
3049struct iwl_missed_beacon_notif {
3050 __le32 consecutive_missed_beacons;
3051 __le32 total_missed_becons;
3052 __le32 num_expected_beacons;
3053 __le32 num_recvd_beacons;
3054} __packed;
3055
3056
3057/******************************************************************************
3058 * (11)
3059 * Rx Calibration Commands:
3060 *
3061 * With the uCode used for open source drivers, most Tx calibration (except
3062 * for Tx Power) and most Rx calibration is done by uCode during the
3063 * "initialize" phase of uCode boot. Driver must calibrate only:
3064 *
3065 * 1) Tx power (depends on temperature), described elsewhere
3066 * 2) Receiver gain balance (optimize MIMO, and detect disconnected antennas)
3067 * 3) Receiver sensitivity (to optimize signal detection)
3068 *
3069 *****************************************************************************/
3070
3071/**
3072 * SENSITIVITY_CMD = 0xa8 (command, has simple generic response)
3073 *
3074 * This command sets up the Rx signal detector for a sensitivity level that
3075 * is high enough to lock onto all signals within the associated network,
3076 * but low enough to ignore signals that are below a certain threshold, so as
3077 * not to have too many "false alarms". False alarms are signals that the
3078 * Rx DSP tries to lock onto, but then discards after determining that they
3079 * are noise.
3080 *
3081 * The optimum number of false alarms is between 5 and 50 per 200 TUs
3082 * (200 * 1024 uSecs, i.e. 204.8 milliseconds) of actual Rx time (i.e.
3083 * time listening, not transmitting). Driver must adjust sensitivity so that
3084 * the ratio of actual false alarms to actual Rx time falls within this range.
3085 *
3086 * While associated, uCode delivers STATISTICS_NOTIFICATIONs after each
3087 * received beacon. These provide information to the driver to analyze the
3088 * sensitivity. Don't analyze statistics that come in from scanning, or any
3089 * other non-associated-network source. Pertinent statistics include:
3090 *
3091 * From "general" statistics (struct statistics_rx_non_phy):
3092 *
3093 * (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level)
3094 * Measure of energy of desired signal. Used for establishing a level
3095 * below which the device does not detect signals.
3096 *
3097 * (beacon_silence_rssi_[abc] & 0x0FF00) >> 8 (unsigned, units in dB)
3098 * Measure of background noise in silent period after beacon.
3099 *
3100 * channel_load
3101 * uSecs of actual Rx time during beacon period (varies according to
3102 * how much time was spent transmitting).
3103 *
3104 * From "cck" and "ofdm" statistics (struct statistics_rx_phy), separately:
3105 *
3106 * false_alarm_cnt
3107 * Signal locks abandoned early (before phy-level header).
3108 *
3109 * plcp_err
3110 * Signal locks abandoned late (during phy-level header).
3111 *
3112 * NOTE: Both false_alarm_cnt and plcp_err increment monotonically from
3113 * beacon to beacon, i.e. each value is an accumulation of all errors
3114 * before and including the latest beacon. Values will wrap around to 0
3115 * after counting up to 2^32 - 1. Driver must differentiate vs.
3116 * previous beacon's values to determine # false alarms in the current
3117 * beacon period.
3118 *
3119 * Total number of false alarms = false_alarms + plcp_errs
3120 *
3121 * For OFDM, adjust the following table entries in struct iwl_sensitivity_cmd
3122 * (notice that the start points for OFDM are at or close to settings for
3123 * maximum sensitivity):
3124 *
3125 * START / MIN / MAX
3126 * HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX 90 / 85 / 120
3127 * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX 170 / 170 / 210
3128 * HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX 105 / 105 / 140
3129 * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX 220 / 220 / 270
3130 *
3131 * If actual rate of OFDM false alarms (+ plcp_errors) is too high
3132 * (greater than 50 for each 204.8 msecs listening), reduce sensitivity
3133 * by *adding* 1 to all 4 of the table entries above, up to the max for
3134 * each entry. Conversely, if false alarm rate is too low (less than 5
3135 * for each 204.8 msecs listening), *subtract* 1 from each entry to
3136 * increase sensitivity.
3137 *
3138 * For CCK sensitivity, keep track of the following:
3139 *
3140 * 1). 20-beacon history of maximum background noise, indicated by
3141 * (beacon_silence_rssi_[abc] & 0x0FF00), units in dB, across the
3142 * 3 receivers. For any given beacon, the "silence reference" is
3143 * the maximum of last 60 samples (20 beacons * 3 receivers).
3144 *
3145 * 2). 10-beacon history of strongest signal level, as indicated
3146 * by (beacon_energy_[abc] & 0x0FF00) >> 8, across the 3 receivers,
3147 * i.e. the strength of the signal through the best receiver at the
3148 * moment. These measurements are "upside down", with lower values
3149 * for stronger signals, so max energy will be *minimum* value.
3150 *
3151 * Then for any given beacon, the driver must determine the *weakest*
3152 * of the strongest signals; this is the minimum level that needs to be
3153 * successfully detected, when using the best receiver at the moment.
3154 * "Max cck energy" is the maximum (higher value means lower energy!)
3155 * of the last 10 minima. Once this is determined, driver must add
3156 * a little margin by adding "6" to it.
3157 *
3158 * 3). Number of consecutive beacon periods with too few false alarms.
3159 * Reset this to 0 at the first beacon period that falls within the
3160 * "good" range (5 to 50 false alarms per 204.8 milliseconds rx).
3161 *
3162 * Then, adjust the following CCK table entries in struct iwl_sensitivity_cmd
3163 * (notice that the start points for CCK are at maximum sensitivity):
3164 *
3165 * START / MIN / MAX
3166 * HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX 125 / 125 / 200
3167 * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX 200 / 200 / 400
3168 * HD_MIN_ENERGY_CCK_DET_INDEX 100 / 0 / 100
3169 *
3170 * If actual rate of CCK false alarms (+ plcp_errors) is too high
3171 * (greater than 50 for each 204.8 msecs listening), method for reducing
3172 * sensitivity is:
3173 *
3174 * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
3175 * up to max 400.
3176 *
3177 * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is < 160,
3178 * sensitivity has been reduced a significant amount; bring it up to
3179 * a moderate 161. Otherwise, *add* 3, up to max 200.
3180 *
3181 * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is > 160,
3182 * sensitivity has been reduced only a moderate or small amount;
3183 * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_INDEX,
3184 * down to min 0. Otherwise (if gain has been significantly reduced),
3185 * don't change the HD_MIN_ENERGY_CCK_DET_INDEX value.
3186 *
3187 * b) Save a snapshot of the "silence reference".
3188 *
3189 * If actual rate of CCK false alarms (+ plcp_errors) is too low
3190 * (less than 5 for each 204.8 msecs listening), method for increasing
3191 * sensitivity is used only if:
3192 *
3193 * 1a) Previous beacon did not have too many false alarms
3194 * 1b) AND difference between previous "silence reference" and current
3195 * "silence reference" (prev - current) is 2 or more,
3196 * OR 2) 100 or more consecutive beacon periods have had rate of
3197 * less than 5 false alarms per 204.8 milliseconds rx time.
3198 *
3199 * Method for increasing sensitivity:
3200 *
3201 * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX,
3202 * down to min 125.
3203 *
3204 * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
3205 * down to min 200.
3206 *
3207 * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_INDEX, up to max 100.
3208 *
3209 * If actual rate of CCK false alarms (+ plcp_errors) is within good range
3210 * (between 5 and 50 for each 204.8 msecs listening):
3211 *
3212 * 1) Save a snapshot of the silence reference.
3213 *
3214 * 2) If previous beacon had too many CCK false alarms (+ plcp_errors),
3215 * give some extra margin to energy threshold by *subtracting* 8
3216 * from value in HD_MIN_ENERGY_CCK_DET_INDEX.
3217 *
3218 * For all cases (too few, too many, good range), make sure that the CCK
3219 * detection threshold (energy) is below the energy level for robust
3220 * detection over the past 10 beacon periods, the "Max cck energy".
3221 * Lower values mean higher energy; this means making sure that the value
3222 * in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy".
3223 *
3224 */
3225
3226/*
3227 * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd)
3228 */
3229#define HD_TABLE_SIZE (11) /* number of entries */
3230#define HD_MIN_ENERGY_CCK_DET_INDEX (0) /* table indexes */
3231#define HD_MIN_ENERGY_OFDM_DET_INDEX (1)
3232#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX (2)
3233#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX (3)
3234#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX (4)
3235#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX (5)
3236#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX (6)
3237#define HD_BARKER_CORR_TH_ADD_MIN_INDEX (7)
3238#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX (8)
3239#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9)
3240#define HD_OFDM_ENERGY_TH_IN_INDEX (10)
3241
3242/* Control field in struct iwl_sensitivity_cmd */
3243#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE cpu_to_le16(0)
3244#define SENSITIVITY_CMD_CONTROL_WORK_TABLE cpu_to_le16(1)
3245
3246/**
3247 * struct iwl_sensitivity_cmd
3248 * @control: (1) updates working table, (0) updates default table
3249 * @table: energy threshold values, use HD_* as index into table
3250 *
3251 * Always use "1" in "control" to update uCode's working table and DSP.
3252 */
3253struct iwl_sensitivity_cmd {
3254 __le16 control; /* always use "1" */
3255 __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */
3256} __packed;
3257
3258
3259/**
3260 * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response)
3261 *
3262 * This command sets the relative gains of 4965 device's 3 radio receiver chains.
3263 *
3264 * After the first association, driver should accumulate signal and noise
3265 * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20
3266 * beacons from the associated network (don't collect statistics that come
3267 * in from scanning, or any other non-network source).
3268 *
3269 * DISCONNECTED ANTENNA:
3270 *
3271 * Driver should determine which antennas are actually connected, by comparing
3272 * average beacon signal levels for the 3 Rx chains. Accumulate (add) the
3273 * following values over 20 beacons, one accumulator for each of the chains
3274 * a/b/c, from struct statistics_rx_non_phy:
3275 *
3276 * beacon_rssi_[abc] & 0x0FF (unsigned, units in dB)
3277 *
3278 * Find the strongest signal from among a/b/c. Compare the other two to the
3279 * strongest. If any signal is more than 15 dB (times 20, unless you
3280 * divide the accumulated values by 20) below the strongest, the driver
3281 * considers that antenna to be disconnected, and should not try to use that
3282 * antenna/chain for Rx or Tx. If both A and B seem to be disconnected,
3283 * driver should declare the stronger one as connected, and attempt to use it
3284 * (A and B are the only 2 Tx chains!).
3285 *
3286 *
3287 * RX BALANCE:
3288 *
3289 * Driver should balance the 3 receivers (but just the ones that are connected
3290 * to antennas, see above) for gain, by comparing the average signal levels
3291 * detected during the silence after each beacon (background noise).
3292 * Accumulate (add) the following values over 20 beacons, one accumulator for
3293 * each of the chains a/b/c, from struct statistics_rx_non_phy:
3294 *
3295 * beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB)
3296 *
3297 * Find the weakest background noise level from among a/b/c. This Rx chain
3298 * will be the reference, with 0 gain adjustment. Attenuate other channels by
3299 * finding noise difference:
3300 *
3301 * (accum_noise[i] - accum_noise[reference]) / 30
3302 *
3303 * The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB.
3304 * For use in diff_gain_[abc] fields of struct iwl_calibration_cmd, the
3305 * driver should limit the difference results to a range of 0-3 (0-4.5 dB),
3306 * and set bit 2 to indicate "reduce gain". The value for the reference
3307 * (weakest) chain should be "0".
3308 *
3309 * diff_gain_[abc] bit fields:
3310 * 2: (1) reduce gain, (0) increase gain
3311 * 1-0: amount of gain, units of 1.5 dB
3312 */
3313
3314/* Phy calibration command for series */
3315/* The default calibrate table size if not specified by firmware */
3316#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
3317enum {
3318 IWL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7,
3319 IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19,
3320};
3321
3322#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE (253)
3323
3324struct iwl_calib_hdr {
3325 u8 op_code;
3326 u8 first_group;
3327 u8 groups_num;
3328 u8 data_valid;
3329} __packed;
3330
3331/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
3332struct iwl_calib_diff_gain_cmd {
3333 struct iwl_calib_hdr hdr;
3334 s8 diff_gain_a; /* see above */
3335 s8 diff_gain_b;
3336 s8 diff_gain_c;
3337 u8 reserved1;
3338} __packed;
3339
3340/******************************************************************************
3341 * (12)
3342 * Miscellaneous Commands:
3343 *
3344 *****************************************************************************/
3345
3346/*
3347 * LEDs Command & Response
3348 * REPLY_LEDS_CMD = 0x48 (command, has simple generic response)
3349 *
3350 * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field),
3351 * this command turns it on or off, or sets up a periodic blinking cycle.
3352 */
3353struct iwl_led_cmd {
3354 __le32 interval; /* "interval" in uSec */
3355 u8 id; /* 1: Activity, 2: Link, 3: Tech */
3356 u8 off; /* # intervals off while blinking;
3357 * "0", with >0 "on" value, turns LED on */
3358 u8 on; /* # intervals on while blinking;
3359 * "0", regardless of "off", turns LED off */
3360 u8 reserved;
3361} __packed;
3362
3363
3364/******************************************************************************
3365 * (13)
3366 * Union of all expected notifications/responses:
3367 *
3368 *****************************************************************************/
3369
3370struct iwl_rx_packet {
3371 /*
3372 * The first 4 bytes of the RX frame header contain both the RX frame
3373 * size and some flags.
3374 * Bit fields:
3375 * 31: flag flush RB request
3376 * 30: flag ignore TC (terminal counter) request
3377 * 29: flag fast IRQ request
3378 * 28-14: Reserved
3379 * 13-00: RX frame size
3380 */
3381 __le32 len_n_flags;
3382 struct iwl_cmd_header hdr;
3383 union {
3384 struct iwl3945_rx_frame rx_frame;
3385 struct iwl3945_tx_resp tx_resp;
3386 struct iwl3945_beacon_notif beacon_status;
3387
3388 struct iwl_alive_resp alive_frame;
3389 struct iwl_spectrum_notification spectrum_notif;
3390 struct iwl_csa_notification csa_notif;
3391 struct iwl_error_resp err_resp;
3392 struct iwl_card_state_notif card_state_notif;
3393 struct iwl_add_sta_resp add_sta;
3394 struct iwl_rem_sta_resp rem_sta;
3395 struct iwl_sleep_notification sleep_notif;
3396 struct iwl_spectrum_resp spectrum;
3397 struct iwl_notif_statistics stats;
3398 struct iwl_compressed_ba_resp compressed_ba;
3399 struct iwl_missed_beacon_notif missed_beacon;
3400 __le32 status;
3401 u8 raw[0];
3402 } u;
3403} __packed;
3404
3405#endif /* __iwl_legacy_commands_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
new file mode 100644
index 000000000000..d418b647be80
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-core.c
@@ -0,0 +1,2674 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/etherdevice.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <net/mac80211.h>
35
36#include "iwl-eeprom.h"
37#include "iwl-dev.h"
38#include "iwl-debug.h"
39#include "iwl-core.h"
40#include "iwl-io.h"
41#include "iwl-power.h"
42#include "iwl-sta.h"
43#include "iwl-helpers.h"
44
45
46MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
47MODULE_VERSION(IWLWIFI_VERSION);
48MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
49MODULE_LICENSE("GPL");
50
51/*
52 * set bt_coex_active to true, uCode will do kill/defer
53 * every time the priority line is asserted (BT is sending signals on the
54 * priority line in the PCIx).
55 * set bt_coex_active to false, uCode will ignore the BT activity and
56 * perform the normal operation
57 *
58 * User might experience transmit issue on some platform due to WiFi/BT
59 * co-exist problem. The possible behaviors are:
60 * Able to scan and finding all the available AP
61 * Not able to associate with any AP
62 * On those platforms, WiFi communication can be restored by set
63 * "bt_coex_active" module parameter to "false"
64 *
65 * default: bt_coex_active = true (BT_COEX_ENABLE)
66 */
67static bool bt_coex_active = true;
68module_param(bt_coex_active, bool, S_IRUGO);
69MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
70
71u32 iwlegacy_debug_level;
72EXPORT_SYMBOL(iwlegacy_debug_level);
73
74const u8 iwlegacy_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
75EXPORT_SYMBOL(iwlegacy_bcast_addr);
76
77
78/* This function both allocates and initializes hw and priv. */
79struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg)
80{
81 struct iwl_priv *priv;
82 /* mac80211 allocates memory for this device instance, including
83 * space for this driver's private structure */
84 struct ieee80211_hw *hw;
85
86 hw = ieee80211_alloc_hw(sizeof(struct iwl_priv),
87 cfg->ops->ieee80211_ops);
88 if (hw == NULL) {
89 pr_err("%s: Can not allocate network device\n",
90 cfg->name);
91 goto out;
92 }
93
94 priv = hw->priv;
95 priv->hw = hw;
96
97out:
98 return hw;
99}
100EXPORT_SYMBOL(iwl_legacy_alloc_all);
101
102#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
103#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
104static void iwl_legacy_init_ht_hw_capab(const struct iwl_priv *priv,
105 struct ieee80211_sta_ht_cap *ht_info,
106 enum ieee80211_band band)
107{
108 u16 max_bit_rate = 0;
109 u8 rx_chains_num = priv->hw_params.rx_chains_num;
110 u8 tx_chains_num = priv->hw_params.tx_chains_num;
111
112 ht_info->cap = 0;
113 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
114
115 ht_info->ht_supported = true;
116
117 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
118 max_bit_rate = MAX_BIT_RATE_20_MHZ;
119 if (priv->hw_params.ht40_channel & BIT(band)) {
120 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
121 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
122 ht_info->mcs.rx_mask[4] = 0x01;
123 max_bit_rate = MAX_BIT_RATE_40_MHZ;
124 }
125
126 if (priv->cfg->mod_params->amsdu_size_8K)
127 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
128
129 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
130 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
131
132 ht_info->mcs.rx_mask[0] = 0xFF;
133 if (rx_chains_num >= 2)
134 ht_info->mcs.rx_mask[1] = 0xFF;
135 if (rx_chains_num >= 3)
136 ht_info->mcs.rx_mask[2] = 0xFF;
137
138 /* Highest supported Rx data rate */
139 max_bit_rate *= rx_chains_num;
140 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
141 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
142
143 /* Tx MCS capabilities */
144 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
145 if (tx_chains_num != rx_chains_num) {
146 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
147 ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
148 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
149 }
150}
151
152/**
153 * iwl_legacy_init_geos - Initialize mac80211's geo/channel info based from eeprom
154 */
155int iwl_legacy_init_geos(struct iwl_priv *priv)
156{
157 struct iwl_channel_info *ch;
158 struct ieee80211_supported_band *sband;
159 struct ieee80211_channel *channels;
160 struct ieee80211_channel *geo_ch;
161 struct ieee80211_rate *rates;
162 int i = 0;
163
164 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
165 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
166 IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
167 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
168 return 0;
169 }
170
171 channels = kzalloc(sizeof(struct ieee80211_channel) *
172 priv->channel_count, GFP_KERNEL);
173 if (!channels)
174 return -ENOMEM;
175
176 rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY),
177 GFP_KERNEL);
178 if (!rates) {
179 kfree(channels);
180 return -ENOMEM;
181 }
182
183 /* 5.2GHz channels start after the 2.4GHz channels */
184 sband = &priv->bands[IEEE80211_BAND_5GHZ];
185 sband->channels = &channels[ARRAY_SIZE(iwlegacy_eeprom_band_1)];
186 /* just OFDM */
187 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
188 sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
189
190 if (priv->cfg->sku & IWL_SKU_N)
191 iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
192 IEEE80211_BAND_5GHZ);
193
194 sband = &priv->bands[IEEE80211_BAND_2GHZ];
195 sband->channels = channels;
196 /* OFDM & CCK */
197 sband->bitrates = rates;
198 sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
199
200 if (priv->cfg->sku & IWL_SKU_N)
201 iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
202 IEEE80211_BAND_2GHZ);
203
204 priv->ieee_channels = channels;
205 priv->ieee_rates = rates;
206
207 for (i = 0; i < priv->channel_count; i++) {
208 ch = &priv->channel_info[i];
209
210 if (!iwl_legacy_is_channel_valid(ch))
211 continue;
212
213 if (iwl_legacy_is_channel_a_band(ch))
214 sband = &priv->bands[IEEE80211_BAND_5GHZ];
215 else
216 sband = &priv->bands[IEEE80211_BAND_2GHZ];
217
218 geo_ch = &sband->channels[sband->n_channels++];
219
220 geo_ch->center_freq =
221 ieee80211_channel_to_frequency(ch->channel, ch->band);
222 geo_ch->max_power = ch->max_power_avg;
223 geo_ch->max_antenna_gain = 0xff;
224 geo_ch->hw_value = ch->channel;
225
226 if (iwl_legacy_is_channel_valid(ch)) {
227 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
228 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
229
230 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
231 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
232
233 if (ch->flags & EEPROM_CHANNEL_RADAR)
234 geo_ch->flags |= IEEE80211_CHAN_RADAR;
235
236 geo_ch->flags |= ch->ht40_extension_channel;
237
238 if (ch->max_power_avg > priv->tx_power_device_lmt)
239 priv->tx_power_device_lmt = ch->max_power_avg;
240 } else {
241 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
242 }
243
244 IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
245 ch->channel, geo_ch->center_freq,
246 iwl_legacy_is_channel_a_band(ch) ? "5.2" : "2.4",
247 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
248 "restricted" : "valid",
249 geo_ch->flags);
250 }
251
252 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
253 priv->cfg->sku & IWL_SKU_A) {
254 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
255 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
256 priv->pci_dev->device,
257 priv->pci_dev->subsystem_device);
258 priv->cfg->sku &= ~IWL_SKU_A;
259 }
260
261 IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
262 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
263 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
264
265 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
266
267 return 0;
268}
269EXPORT_SYMBOL(iwl_legacy_init_geos);
270
271/*
272 * iwl_legacy_free_geos - undo allocations in iwl_legacy_init_geos
273 */
274void iwl_legacy_free_geos(struct iwl_priv *priv)
275{
276 kfree(priv->ieee_channels);
277 kfree(priv->ieee_rates);
278 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
279}
280EXPORT_SYMBOL(iwl_legacy_free_geos);
281
282static bool iwl_legacy_is_channel_extension(struct iwl_priv *priv,
283 enum ieee80211_band band,
284 u16 channel, u8 extension_chan_offset)
285{
286 const struct iwl_channel_info *ch_info;
287
288 ch_info = iwl_legacy_get_channel_info(priv, band, channel);
289 if (!iwl_legacy_is_channel_valid(ch_info))
290 return false;
291
292 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
293 return !(ch_info->ht40_extension_channel &
294 IEEE80211_CHAN_NO_HT40PLUS);
295 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
296 return !(ch_info->ht40_extension_channel &
297 IEEE80211_CHAN_NO_HT40MINUS);
298
299 return false;
300}
301
302bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
303 struct iwl_rxon_context *ctx,
304 struct ieee80211_sta_ht_cap *ht_cap)
305{
306 if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
307 return false;
308
309 /*
310 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
311 * the bit will not set if it is pure 40MHz case
312 */
313 if (ht_cap && !ht_cap->ht_supported)
314 return false;
315
316#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
317 if (priv->disable_ht40)
318 return false;
319#endif
320
321 return iwl_legacy_is_channel_extension(priv, priv->band,
322 le16_to_cpu(ctx->staging.channel),
323 ctx->ht.extension_chan_offset);
324}
325EXPORT_SYMBOL(iwl_legacy_is_ht40_tx_allowed);
326
327static u16 iwl_legacy_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
328{
329 u16 new_val;
330 u16 beacon_factor;
331
332 /*
333 * If mac80211 hasn't given us a beacon interval, program
334 * the default into the device.
335 */
336 if (!beacon_val)
337 return DEFAULT_BEACON_INTERVAL;
338
339 /*
340 * If the beacon interval we obtained from the peer
341 * is too large, we'll have to wake up more often
342 * (and in IBSS case, we'll beacon too much)
343 *
344 * For example, if max_beacon_val is 4096, and the
345 * requested beacon interval is 7000, we'll have to
346 * use 3500 to be able to wake up on the beacons.
347 *
348 * This could badly influence beacon detection stats.
349 */
350
351 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
352 new_val = beacon_val / beacon_factor;
353
354 if (!new_val)
355 new_val = max_beacon_val;
356
357 return new_val;
358}
359
360int
361iwl_legacy_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
362{
363 u64 tsf;
364 s32 interval_tm, rem;
365 struct ieee80211_conf *conf = NULL;
366 u16 beacon_int;
367 struct ieee80211_vif *vif = ctx->vif;
368
369 conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
370
371 lockdep_assert_held(&priv->mutex);
372
373 memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
374
375 ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
376 ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
377
378 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
379
380 /*
381 * TODO: For IBSS we need to get atim_window from mac80211,
382 * for now just always use 0
383 */
384 ctx->timing.atim_window = 0;
385
386 beacon_int = iwl_legacy_adjust_beacon_interval(beacon_int,
387 priv->hw_params.max_beacon_itrvl * TIME_UNIT);
388 ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
389
390 tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
391 interval_tm = beacon_int * TIME_UNIT;
392 rem = do_div(tsf, interval_tm);
393 ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
394
395 ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
396
397 IWL_DEBUG_ASSOC(priv,
398 "beacon interval %d beacon timer %d beacon tim %d\n",
399 le16_to_cpu(ctx->timing.beacon_interval),
400 le32_to_cpu(ctx->timing.beacon_init_val),
401 le16_to_cpu(ctx->timing.atim_window));
402
403 return iwl_legacy_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
404 sizeof(ctx->timing), &ctx->timing);
405}
406EXPORT_SYMBOL(iwl_legacy_send_rxon_timing);
407
408void
409iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
410 struct iwl_rxon_context *ctx,
411 int hw_decrypt)
412{
413 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
414
415 if (hw_decrypt)
416 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
417 else
418 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
419
420}
421EXPORT_SYMBOL(iwl_legacy_set_rxon_hwcrypto);
422
423/* validate RXON structure is valid */
424int
425iwl_legacy_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
426{
427 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
428 bool error = false;
429
430 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
431 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
432 IWL_WARN(priv, "check 2.4G: wrong narrow\n");
433 error = true;
434 }
435 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
436 IWL_WARN(priv, "check 2.4G: wrong radar\n");
437 error = true;
438 }
439 } else {
440 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
441 IWL_WARN(priv, "check 5.2G: not short slot!\n");
442 error = true;
443 }
444 if (rxon->flags & RXON_FLG_CCK_MSK) {
445 IWL_WARN(priv, "check 5.2G: CCK!\n");
446 error = true;
447 }
448 }
449 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
450 IWL_WARN(priv, "mac/bssid mcast!\n");
451 error = true;
452 }
453
454 /* make sure basic rates 6Mbps and 1Mbps are supported */
455 if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
456 (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
457 IWL_WARN(priv, "neither 1 nor 6 are basic\n");
458 error = true;
459 }
460
461 if (le16_to_cpu(rxon->assoc_id) > 2007) {
462 IWL_WARN(priv, "aid > 2007\n");
463 error = true;
464 }
465
466 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
467 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
468 IWL_WARN(priv, "CCK and short slot\n");
469 error = true;
470 }
471
472 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
473 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
474 IWL_WARN(priv, "CCK and auto detect");
475 error = true;
476 }
477
478 if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
479 RXON_FLG_TGG_PROTECT_MSK)) ==
480 RXON_FLG_TGG_PROTECT_MSK) {
481 IWL_WARN(priv, "TGg but no auto-detect\n");
482 error = true;
483 }
484
485 if (error)
486 IWL_WARN(priv, "Tuning to channel %d\n",
487 le16_to_cpu(rxon->channel));
488
489 if (error) {
490 IWL_ERR(priv, "Invalid RXON\n");
491 return -EINVAL;
492 }
493 return 0;
494}
495EXPORT_SYMBOL(iwl_legacy_check_rxon_cmd);
496
497/**
498 * iwl_legacy_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
499 * @priv: staging_rxon is compared to active_rxon
500 *
501 * If the RXON structure is changing enough to require a new tune,
502 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
503 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
504 */
505int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
506 struct iwl_rxon_context *ctx)
507{
508 const struct iwl_legacy_rxon_cmd *staging = &ctx->staging;
509 const struct iwl_legacy_rxon_cmd *active = &ctx->active;
510
511#define CHK(cond) \
512 if ((cond)) { \
513 IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
514 return 1; \
515 }
516
517#define CHK_NEQ(c1, c2) \
518 if ((c1) != (c2)) { \
519 IWL_DEBUG_INFO(priv, "need full RXON - " \
520 #c1 " != " #c2 " - %d != %d\n", \
521 (c1), (c2)); \
522 return 1; \
523 }
524
525 /* These items are only settable from the full RXON command */
526 CHK(!iwl_legacy_is_associated_ctx(ctx));
527 CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
528 CHK(compare_ether_addr(staging->node_addr, active->node_addr));
529 CHK(compare_ether_addr(staging->wlap_bssid_addr,
530 active->wlap_bssid_addr));
531 CHK_NEQ(staging->dev_type, active->dev_type);
532 CHK_NEQ(staging->channel, active->channel);
533 CHK_NEQ(staging->air_propagation, active->air_propagation);
534 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
535 active->ofdm_ht_single_stream_basic_rates);
536 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
537 active->ofdm_ht_dual_stream_basic_rates);
538 CHK_NEQ(staging->assoc_id, active->assoc_id);
539
540 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
541 * be updated with the RXON_ASSOC command -- however only some
542 * flag transitions are allowed using RXON_ASSOC */
543
544 /* Check if we are not switching bands */
545 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
546 active->flags & RXON_FLG_BAND_24G_MSK);
547
548 /* Check if we are switching association toggle */
549 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
550 active->filter_flags & RXON_FILTER_ASSOC_MSK);
551
552#undef CHK
553#undef CHK_NEQ
554
555 return 0;
556}
557EXPORT_SYMBOL(iwl_legacy_full_rxon_required);
558
559u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
560 struct iwl_rxon_context *ctx)
561{
562 /*
563 * Assign the lowest rate -- should really get this from
564 * the beacon skb from mac80211.
565 */
566 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
567 return IWL_RATE_1M_PLCP;
568 else
569 return IWL_RATE_6M_PLCP;
570}
571EXPORT_SYMBOL(iwl_legacy_get_lowest_plcp);
572
573static void _iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
574 struct iwl_ht_config *ht_conf,
575 struct iwl_rxon_context *ctx)
576{
577 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
578
579 if (!ctx->ht.enabled) {
580 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
581 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
582 RXON_FLG_HT40_PROT_MSK |
583 RXON_FLG_HT_PROT_MSK);
584 return;
585 }
586
587 rxon->flags |= cpu_to_le32(ctx->ht.protection <<
588 RXON_FLG_HT_OPERATING_MODE_POS);
589
590 /* Set up channel bandwidth:
591 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
592 /* clear the HT channel mode before set the mode */
593 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
594 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
595 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, NULL)) {
596 /* pure ht40 */
597 if (ctx->ht.protection ==
598 IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
599 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
600 /* Note: control channel is opposite of extension channel */
601 switch (ctx->ht.extension_chan_offset) {
602 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
603 rxon->flags &=
604 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
605 break;
606 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
607 rxon->flags |=
608 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
609 break;
610 }
611 } else {
612 /* Note: control channel is opposite of extension channel */
613 switch (ctx->ht.extension_chan_offset) {
614 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
615 rxon->flags &=
616 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
617 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
618 break;
619 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
620 rxon->flags |=
621 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
622 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
623 break;
624 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
625 default:
626 /* channel location only valid if in Mixed mode */
627 IWL_ERR(priv,
628 "invalid extension channel offset\n");
629 break;
630 }
631 }
632 } else {
633 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
634 }
635
636 if (priv->cfg->ops->hcmd->set_rxon_chain)
637 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
638
639 IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
640 "extension channel offset 0x%x\n",
641 le32_to_cpu(rxon->flags), ctx->ht.protection,
642 ctx->ht.extension_chan_offset);
643}
644
645void iwl_legacy_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
646{
647 struct iwl_rxon_context *ctx;
648
649 for_each_context(priv, ctx)
650 _iwl_legacy_set_rxon_ht(priv, ht_conf, ctx);
651}
652EXPORT_SYMBOL(iwl_legacy_set_rxon_ht);
653
654/* Return valid, unused, channel for a passive scan to reset the RF */
655u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
656 enum ieee80211_band band)
657{
658 const struct iwl_channel_info *ch_info;
659 int i;
660 u8 channel = 0;
661 u8 min, max;
662 struct iwl_rxon_context *ctx;
663
664 if (band == IEEE80211_BAND_5GHZ) {
665 min = 14;
666 max = priv->channel_count;
667 } else {
668 min = 0;
669 max = 14;
670 }
671
672 for (i = min; i < max; i++) {
673 bool busy = false;
674
675 for_each_context(priv, ctx) {
676 busy = priv->channel_info[i].channel ==
677 le16_to_cpu(ctx->staging.channel);
678 if (busy)
679 break;
680 }
681
682 if (busy)
683 continue;
684
685 channel = priv->channel_info[i].channel;
686 ch_info = iwl_legacy_get_channel_info(priv, band, channel);
687 if (iwl_legacy_is_channel_valid(ch_info))
688 break;
689 }
690
691 return channel;
692}
693EXPORT_SYMBOL(iwl_legacy_get_single_channel_number);
694
695/**
696 * iwl_legacy_set_rxon_channel - Set the band and channel values in staging RXON
697 * @ch: requested channel as a pointer to struct ieee80211_channel
698
699 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
700 * in the staging RXON flag structure based on the ch->band
701 */
702int
703iwl_legacy_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
704 struct iwl_rxon_context *ctx)
705{
706 enum ieee80211_band band = ch->band;
707 u16 channel = ch->hw_value;
708
709 if ((le16_to_cpu(ctx->staging.channel) == channel) &&
710 (priv->band == band))
711 return 0;
712
713 ctx->staging.channel = cpu_to_le16(channel);
714 if (band == IEEE80211_BAND_5GHZ)
715 ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
716 else
717 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
718
719 priv->band = band;
720
721 IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
722
723 return 0;
724}
725EXPORT_SYMBOL(iwl_legacy_set_rxon_channel);
726
727void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
728 struct iwl_rxon_context *ctx,
729 enum ieee80211_band band,
730 struct ieee80211_vif *vif)
731{
732 if (band == IEEE80211_BAND_5GHZ) {
733 ctx->staging.flags &=
734 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
735 | RXON_FLG_CCK_MSK);
736 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
737 } else {
738 /* Copied from iwl_post_associate() */
739 if (vif && vif->bss_conf.use_short_slot)
740 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
741 else
742 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
743
744 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
745 ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
746 ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
747 }
748}
749EXPORT_SYMBOL(iwl_legacy_set_flags_for_band);
750
751/*
752 * initialize rxon structure with default values from eeprom
753 */
754void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
755 struct iwl_rxon_context *ctx)
756{
757 const struct iwl_channel_info *ch_info;
758
759 memset(&ctx->staging, 0, sizeof(ctx->staging));
760
761 if (!ctx->vif) {
762 ctx->staging.dev_type = ctx->unused_devtype;
763 } else
764 switch (ctx->vif->type) {
765
766 case NL80211_IFTYPE_STATION:
767 ctx->staging.dev_type = ctx->station_devtype;
768 ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
769 break;
770
771 case NL80211_IFTYPE_ADHOC:
772 ctx->staging.dev_type = ctx->ibss_devtype;
773 ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
774 ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
775 RXON_FILTER_ACCEPT_GRP_MSK;
776 break;
777
778 default:
779 IWL_ERR(priv, "Unsupported interface type %d\n",
780 ctx->vif->type);
781 break;
782 }
783
784#if 0
785 /* TODO: Figure out when short_preamble would be set and cache from
786 * that */
787 if (!hw_to_local(priv->hw)->short_preamble)
788 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
789 else
790 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
791#endif
792
793 ch_info = iwl_legacy_get_channel_info(priv, priv->band,
794 le16_to_cpu(ctx->active.channel));
795
796 if (!ch_info)
797 ch_info = &priv->channel_info[0];
798
799 ctx->staging.channel = cpu_to_le16(ch_info->channel);
800 priv->band = ch_info->band;
801
802 iwl_legacy_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
803
804 ctx->staging.ofdm_basic_rates =
805 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
806 ctx->staging.cck_basic_rates =
807 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
808
809 /* clear both MIX and PURE40 mode flag */
810 ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
811 RXON_FLG_CHANNEL_MODE_PURE_40);
812 if (ctx->vif)
813 memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
814
815 ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
816 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
817}
818EXPORT_SYMBOL(iwl_legacy_connection_init_rx_config);
819
820void iwl_legacy_set_rate(struct iwl_priv *priv)
821{
822 const struct ieee80211_supported_band *hw = NULL;
823 struct ieee80211_rate *rate;
824 struct iwl_rxon_context *ctx;
825 int i;
826
827 hw = iwl_get_hw_mode(priv, priv->band);
828 if (!hw) {
829 IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
830 return;
831 }
832
833 priv->active_rate = 0;
834
835 for (i = 0; i < hw->n_bitrates; i++) {
836 rate = &(hw->bitrates[i]);
837 if (rate->hw_value < IWL_RATE_COUNT_LEGACY)
838 priv->active_rate |= (1 << rate->hw_value);
839 }
840
841 IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
842
843 for_each_context(priv, ctx) {
844 ctx->staging.cck_basic_rates =
845 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
846
847 ctx->staging.ofdm_basic_rates =
848 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
849 }
850}
851EXPORT_SYMBOL(iwl_legacy_set_rate);
852
853void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success)
854{
855 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
856
857 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
858 return;
859
860 if (priv->switch_rxon.switch_in_progress) {
861 ieee80211_chswitch_done(ctx->vif, is_success);
862 mutex_lock(&priv->mutex);
863 priv->switch_rxon.switch_in_progress = false;
864 mutex_unlock(&priv->mutex);
865 }
866}
867EXPORT_SYMBOL(iwl_legacy_chswitch_done);
868
869void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
870{
871 struct iwl_rx_packet *pkt = rxb_addr(rxb);
872 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
873
874 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
875 struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active;
876
877 if (priv->switch_rxon.switch_in_progress) {
878 if (!le32_to_cpu(csa->status) &&
879 (csa->channel == priv->switch_rxon.channel)) {
880 rxon->channel = csa->channel;
881 ctx->staging.channel = csa->channel;
882 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
883 le16_to_cpu(csa->channel));
884 iwl_legacy_chswitch_done(priv, true);
885 } else {
886 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
887 le16_to_cpu(csa->channel));
888 iwl_legacy_chswitch_done(priv, false);
889 }
890 }
891}
892EXPORT_SYMBOL(iwl_legacy_rx_csa);
893
894#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
895void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
896 struct iwl_rxon_context *ctx)
897{
898 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
899
900 IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
901 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
902 IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n",
903 le16_to_cpu(rxon->channel));
904 IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
905 IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
906 le32_to_cpu(rxon->filter_flags));
907 IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
908 IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
909 rxon->ofdm_basic_rates);
910 IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n",
911 rxon->cck_basic_rates);
912 IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
913 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
914 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n",
915 le16_to_cpu(rxon->assoc_id));
916}
917EXPORT_SYMBOL(iwl_legacy_print_rx_config_cmd);
918#endif
919/**
920 * iwl_legacy_irq_handle_error - called for HW or SW error interrupt from card
921 */
922void iwl_legacy_irq_handle_error(struct iwl_priv *priv)
923{
924 /* Set the FW error flag -- cleared on iwl_down */
925 set_bit(STATUS_FW_ERROR, &priv->status);
926
927 /* Cancel currently queued command. */
928 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
929
930 IWL_ERR(priv, "Loaded firmware version: %s\n",
931 priv->hw->wiphy->fw_version);
932
933 priv->cfg->ops->lib->dump_nic_error_log(priv);
934 if (priv->cfg->ops->lib->dump_fh)
935 priv->cfg->ops->lib->dump_fh(priv, NULL, false);
936 priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false);
937#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
938 if (iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS)
939 iwl_legacy_print_rx_config_cmd(priv,
940 &priv->contexts[IWL_RXON_CTX_BSS]);
941#endif
942
943 wake_up_interruptible(&priv->wait_command_queue);
944
945 /* Keep the restart process from trying to send host
946 * commands by clearing the INIT status bit */
947 clear_bit(STATUS_READY, &priv->status);
948
949 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
950 IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
951 "Restarting adapter due to uCode error.\n");
952
953 if (priv->cfg->mod_params->restart_fw)
954 queue_work(priv->workqueue, &priv->restart);
955 }
956}
957EXPORT_SYMBOL(iwl_legacy_irq_handle_error);
958
959static int iwl_legacy_apm_stop_master(struct iwl_priv *priv)
960{
961 int ret = 0;
962
963 /* stop device's busmaster DMA activity */
964 iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
965
966 ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
967 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
968 if (ret)
969 IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
970
971 IWL_DEBUG_INFO(priv, "stop master\n");
972
973 return ret;
974}
975
976void iwl_legacy_apm_stop(struct iwl_priv *priv)
977{
978 IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
979
980 /* Stop device's DMA activity */
981 iwl_legacy_apm_stop_master(priv);
982
983 /* Reset the entire device */
984 iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
985
986 udelay(10);
987
988 /*
989 * Clear "initialization complete" bit to move adapter from
990 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
991 */
992 iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
993 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
994}
995EXPORT_SYMBOL(iwl_legacy_apm_stop);
996
997
998/*
999 * Start up NIC's basic functionality after it has been reset
1000 * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
1001 * NOTE: This does not load uCode nor start the embedded processor
1002 */
1003int iwl_legacy_apm_init(struct iwl_priv *priv)
1004{
1005 int ret = 0;
1006 u16 lctl;
1007
1008 IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
1009
1010 /*
1011 * Use "set_bit" below rather than "write", to preserve any hardware
1012 * bits already set by default after reset.
1013 */
1014
1015 /* Disable L0S exit timer (platform NMI Work/Around) */
1016 iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1017 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1018
1019 /*
1020 * Disable L0s without affecting L1;
1021 * don't wait for ICH L0s (ICH bug W/A)
1022 */
1023 iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1024 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1025
1026 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1027 iwl_legacy_set_bit(priv, CSR_DBG_HPET_MEM_REG,
1028 CSR_DBG_HPET_MEM_REG_VAL);
1029
1030 /*
1031 * Enable HAP INTA (interrupt from management bus) to
1032 * wake device's PCI Express link L1a -> L0s
1033 * NOTE: This is no-op for 3945 (non-existant bit)
1034 */
1035 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1036 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1037
1038 /*
1039 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
1040 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1041 * If so (likely), disable L0S, so device moves directly L0->L1;
1042 * costs negligible amount of power savings.
1043 * If not (unlikely), enable L0S, so there is at least some
1044 * power savings, even without L1.
1045 */
1046 if (priv->cfg->base_params->set_l0s) {
1047 lctl = iwl_legacy_pcie_link_ctl(priv);
1048 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
1049 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
1050 /* L1-ASPM enabled; disable(!) L0S */
1051 iwl_legacy_set_bit(priv, CSR_GIO_REG,
1052 CSR_GIO_REG_VAL_L0S_ENABLED);
1053 IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
1054 } else {
1055 /* L1-ASPM disabled; enable(!) L0S */
1056 iwl_legacy_clear_bit(priv, CSR_GIO_REG,
1057 CSR_GIO_REG_VAL_L0S_ENABLED);
1058 IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
1059 }
1060 }
1061
1062 /* Configure analog phase-lock-loop before activating to D0A */
1063 if (priv->cfg->base_params->pll_cfg_val)
1064 iwl_legacy_set_bit(priv, CSR_ANA_PLL_CFG,
1065 priv->cfg->base_params->pll_cfg_val);
1066
1067 /*
1068 * Set "initialization complete" bit to move adapter from
1069 * D0U* --> D0A* (powered-up active) state.
1070 */
1071 iwl_legacy_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1072
1073 /*
1074 * Wait for clock stabilization; once stabilized, access to
1075 * device-internal resources is supported, e.g. iwl_legacy_write_prph()
1076 * and accesses to uCode SRAM.
1077 */
1078 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
1079 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1080 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1081 if (ret < 0) {
1082 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
1083 goto out;
1084 }
1085
1086 /*
1087 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
1088 * BSM (Boostrap State Machine) is only in 3945 and 4965.
1089 *
1090 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1091 * do not disable clocks. This preserves any hardware bits already
1092 * set by default in "CLK_CTRL_REG" after reset.
1093 */
1094 if (priv->cfg->base_params->use_bsm)
1095 iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
1096 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
1097 else
1098 iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
1099 APMG_CLK_VAL_DMA_CLK_RQT);
1100 udelay(20);
1101
1102 /* Disable L1-Active */
1103 iwl_legacy_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
1104 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1105
1106out:
1107 return ret;
1108}
1109EXPORT_SYMBOL(iwl_legacy_apm_init);
1110
1111
1112int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1113{
1114 int ret;
1115 s8 prev_tx_power;
1116 bool defer;
1117 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1118
1119 lockdep_assert_held(&priv->mutex);
1120
1121 if (priv->tx_power_user_lmt == tx_power && !force)
1122 return 0;
1123
1124 if (!priv->cfg->ops->lib->send_tx_power)
1125 return -EOPNOTSUPP;
1126
1127 if (tx_power < IWL4965_TX_POWER_TARGET_POWER_MIN) {
1128 IWL_WARN(priv,
1129 "Requested user TXPOWER %d below lower limit %d.\n",
1130 tx_power,
1131 IWL4965_TX_POWER_TARGET_POWER_MIN);
1132 return -EINVAL;
1133 }
1134
1135 if (tx_power > priv->tx_power_device_lmt) {
1136 IWL_WARN(priv,
1137 "Requested user TXPOWER %d above upper limit %d.\n",
1138 tx_power, priv->tx_power_device_lmt);
1139 return -EINVAL;
1140 }
1141
1142 if (!iwl_legacy_is_ready_rf(priv))
1143 return -EIO;
1144
1145 /* scan complete and commit_rxon use tx_power_next value,
1146 * it always need to be updated for newest request */
1147 priv->tx_power_next = tx_power;
1148
1149 /* do not set tx power when scanning or channel changing */
1150 defer = test_bit(STATUS_SCANNING, &priv->status) ||
1151 memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
1152 if (defer && !force) {
1153 IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
1154 return 0;
1155 }
1156
1157 prev_tx_power = priv->tx_power_user_lmt;
1158 priv->tx_power_user_lmt = tx_power;
1159
1160 ret = priv->cfg->ops->lib->send_tx_power(priv);
1161
1162 /* if fail to set tx_power, restore the orig. tx power */
1163 if (ret) {
1164 priv->tx_power_user_lmt = prev_tx_power;
1165 priv->tx_power_next = prev_tx_power;
1166 }
1167 return ret;
1168}
1169EXPORT_SYMBOL(iwl_legacy_set_tx_power);
1170
1171void iwl_legacy_send_bt_config(struct iwl_priv *priv)
1172{
1173 struct iwl_bt_cmd bt_cmd = {
1174 .lead_time = BT_LEAD_TIME_DEF,
1175 .max_kill = BT_MAX_KILL_DEF,
1176 .kill_ack_mask = 0,
1177 .kill_cts_mask = 0,
1178 };
1179
1180 if (!bt_coex_active)
1181 bt_cmd.flags = BT_COEX_DISABLE;
1182 else
1183 bt_cmd.flags = BT_COEX_ENABLE;
1184
1185 IWL_DEBUG_INFO(priv, "BT coex %s\n",
1186 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
1187
1188 if (iwl_legacy_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1189 sizeof(struct iwl_bt_cmd), &bt_cmd))
1190 IWL_ERR(priv, "failed to send BT Coex Config\n");
1191}
1192EXPORT_SYMBOL(iwl_legacy_send_bt_config);
1193
1194int iwl_legacy_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
1195{
1196 struct iwl_statistics_cmd statistics_cmd = {
1197 .configuration_flags =
1198 clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
1199 };
1200
1201 if (flags & CMD_ASYNC)
1202 return iwl_legacy_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD,
1203 sizeof(struct iwl_statistics_cmd),
1204 &statistics_cmd, NULL);
1205 else
1206 return iwl_legacy_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
1207 sizeof(struct iwl_statistics_cmd),
1208 &statistics_cmd);
1209}
1210EXPORT_SYMBOL(iwl_legacy_send_statistics_request);
1211
1212void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
1213 struct iwl_rx_mem_buffer *rxb)
1214{
1215#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1216 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1217 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
1218 IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
1219 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
1220#endif
1221}
1222EXPORT_SYMBOL(iwl_legacy_rx_pm_sleep_notif);
1223
1224void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
1225 struct iwl_rx_mem_buffer *rxb)
1226{
1227 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1228 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
1229 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
1230 "notification for %s:\n", len,
1231 iwl_legacy_get_cmd_string(pkt->hdr.cmd));
1232 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
1233}
1234EXPORT_SYMBOL(iwl_legacy_rx_pm_debug_statistics_notif);
1235
1236void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
1237 struct iwl_rx_mem_buffer *rxb)
1238{
1239 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1240
1241 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
1242 "seq 0x%04X ser 0x%08X\n",
1243 le32_to_cpu(pkt->u.err_resp.error_type),
1244 iwl_legacy_get_cmd_string(pkt->u.err_resp.cmd_id),
1245 pkt->u.err_resp.cmd_id,
1246 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
1247 le32_to_cpu(pkt->u.err_resp.error_info));
1248}
1249EXPORT_SYMBOL(iwl_legacy_rx_reply_error);
1250
1251void iwl_legacy_clear_isr_stats(struct iwl_priv *priv)
1252{
1253 memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
1254}
1255
1256int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
1257 const struct ieee80211_tx_queue_params *params)
1258{
1259 struct iwl_priv *priv = hw->priv;
1260 struct iwl_rxon_context *ctx;
1261 unsigned long flags;
1262 int q;
1263
1264 IWL_DEBUG_MAC80211(priv, "enter\n");
1265
1266 if (!iwl_legacy_is_ready_rf(priv)) {
1267 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
1268 return -EIO;
1269 }
1270
1271 if (queue >= AC_NUM) {
1272 IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
1273 return 0;
1274 }
1275
1276 q = AC_NUM - 1 - queue;
1277
1278 spin_lock_irqsave(&priv->lock, flags);
1279
1280 for_each_context(priv, ctx) {
1281 ctx->qos_data.def_qos_parm.ac[q].cw_min =
1282 cpu_to_le16(params->cw_min);
1283 ctx->qos_data.def_qos_parm.ac[q].cw_max =
1284 cpu_to_le16(params->cw_max);
1285 ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
1286 ctx->qos_data.def_qos_parm.ac[q].edca_txop =
1287 cpu_to_le16((params->txop * 32));
1288
1289 ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
1290 }
1291
1292 spin_unlock_irqrestore(&priv->lock, flags);
1293
1294 IWL_DEBUG_MAC80211(priv, "leave\n");
1295 return 0;
1296}
1297EXPORT_SYMBOL(iwl_legacy_mac_conf_tx);
1298
1299int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw)
1300{
1301 struct iwl_priv *priv = hw->priv;
1302
1303 return priv->ibss_manager == IWL_IBSS_MANAGER;
1304}
1305EXPORT_SYMBOL_GPL(iwl_legacy_mac_tx_last_beacon);
1306
1307static int
1308iwl_legacy_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1309{
1310 iwl_legacy_connection_init_rx_config(priv, ctx);
1311
1312 if (priv->cfg->ops->hcmd->set_rxon_chain)
1313 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1314
1315 return iwl_legacy_commit_rxon(priv, ctx);
1316}
1317
1318static int iwl_legacy_setup_interface(struct iwl_priv *priv,
1319 struct iwl_rxon_context *ctx)
1320{
1321 struct ieee80211_vif *vif = ctx->vif;
1322 int err;
1323
1324 lockdep_assert_held(&priv->mutex);
1325
1326 /*
1327 * This variable will be correct only when there's just
1328 * a single context, but all code using it is for hardware
1329 * that supports only one context.
1330 */
1331 priv->iw_mode = vif->type;
1332
1333 ctx->is_active = true;
1334
1335 err = iwl_legacy_set_mode(priv, ctx);
1336 if (err) {
1337 if (!ctx->always_active)
1338 ctx->is_active = false;
1339 return err;
1340 }
1341
1342 return 0;
1343}
1344
1345int
1346iwl_legacy_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1347{
1348 struct iwl_priv *priv = hw->priv;
1349 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1350 struct iwl_rxon_context *tmp, *ctx = NULL;
1351 int err;
1352
1353 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
1354 vif->type, vif->addr);
1355
1356 mutex_lock(&priv->mutex);
1357
1358 if (!iwl_legacy_is_ready_rf(priv)) {
1359 IWL_WARN(priv, "Try to add interface when device not ready\n");
1360 err = -EINVAL;
1361 goto out;
1362 }
1363
1364 for_each_context(priv, tmp) {
1365 u32 possible_modes =
1366 tmp->interface_modes | tmp->exclusive_interface_modes;
1367
1368 if (tmp->vif) {
1369 /* check if this busy context is exclusive */
1370 if (tmp->exclusive_interface_modes &
1371 BIT(tmp->vif->type)) {
1372 err = -EINVAL;
1373 goto out;
1374 }
1375 continue;
1376 }
1377
1378 if (!(possible_modes & BIT(vif->type)))
1379 continue;
1380
1381 /* have maybe usable context w/o interface */
1382 ctx = tmp;
1383 break;
1384 }
1385
1386 if (!ctx) {
1387 err = -EOPNOTSUPP;
1388 goto out;
1389 }
1390
1391 vif_priv->ctx = ctx;
1392 ctx->vif = vif;
1393
1394 err = iwl_legacy_setup_interface(priv, ctx);
1395 if (!err)
1396 goto out;
1397
1398 ctx->vif = NULL;
1399 priv->iw_mode = NL80211_IFTYPE_STATION;
1400 out:
1401 mutex_unlock(&priv->mutex);
1402
1403 IWL_DEBUG_MAC80211(priv, "leave\n");
1404 return err;
1405}
1406EXPORT_SYMBOL(iwl_legacy_mac_add_interface);
1407
1408static void iwl_legacy_teardown_interface(struct iwl_priv *priv,
1409 struct ieee80211_vif *vif,
1410 bool mode_change)
1411{
1412 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
1413
1414 lockdep_assert_held(&priv->mutex);
1415
1416 if (priv->scan_vif == vif) {
1417 iwl_legacy_scan_cancel_timeout(priv, 200);
1418 iwl_legacy_force_scan_end(priv);
1419 }
1420
1421 if (!mode_change) {
1422 iwl_legacy_set_mode(priv, ctx);
1423 if (!ctx->always_active)
1424 ctx->is_active = false;
1425 }
1426}
1427
1428void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
1429 struct ieee80211_vif *vif)
1430{
1431 struct iwl_priv *priv = hw->priv;
1432 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
1433
1434 IWL_DEBUG_MAC80211(priv, "enter\n");
1435
1436 mutex_lock(&priv->mutex);
1437
1438 WARN_ON(ctx->vif != vif);
1439 ctx->vif = NULL;
1440
1441 iwl_legacy_teardown_interface(priv, vif, false);
1442
1443 memset(priv->bssid, 0, ETH_ALEN);
1444 mutex_unlock(&priv->mutex);
1445
1446 IWL_DEBUG_MAC80211(priv, "leave\n");
1447
1448}
1449EXPORT_SYMBOL(iwl_legacy_mac_remove_interface);
1450
1451int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv)
1452{
1453 if (!priv->txq)
1454 priv->txq = kzalloc(
1455 sizeof(struct iwl_tx_queue) *
1456 priv->cfg->base_params->num_of_queues,
1457 GFP_KERNEL);
1458 if (!priv->txq) {
1459 IWL_ERR(priv, "Not enough memory for txq\n");
1460 return -ENOMEM;
1461 }
1462 return 0;
1463}
1464EXPORT_SYMBOL(iwl_legacy_alloc_txq_mem);
1465
1466void iwl_legacy_txq_mem(struct iwl_priv *priv)
1467{
1468 kfree(priv->txq);
1469 priv->txq = NULL;
1470}
1471EXPORT_SYMBOL(iwl_legacy_txq_mem);
1472
1473#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1474
1475#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
1476
1477void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
1478{
1479 priv->tx_traffic_idx = 0;
1480 priv->rx_traffic_idx = 0;
1481 if (priv->tx_traffic)
1482 memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
1483 if (priv->rx_traffic)
1484 memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
1485}
1486
1487int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
1488{
1489 u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
1490
1491 if (iwlegacy_debug_level & IWL_DL_TX) {
1492 if (!priv->tx_traffic) {
1493 priv->tx_traffic =
1494 kzalloc(traffic_size, GFP_KERNEL);
1495 if (!priv->tx_traffic)
1496 return -ENOMEM;
1497 }
1498 }
1499 if (iwlegacy_debug_level & IWL_DL_RX) {
1500 if (!priv->rx_traffic) {
1501 priv->rx_traffic =
1502 kzalloc(traffic_size, GFP_KERNEL);
1503 if (!priv->rx_traffic)
1504 return -ENOMEM;
1505 }
1506 }
1507 iwl_legacy_reset_traffic_log(priv);
1508 return 0;
1509}
1510EXPORT_SYMBOL(iwl_legacy_alloc_traffic_mem);
1511
1512void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
1513{
1514 kfree(priv->tx_traffic);
1515 priv->tx_traffic = NULL;
1516
1517 kfree(priv->rx_traffic);
1518 priv->rx_traffic = NULL;
1519}
1520EXPORT_SYMBOL(iwl_legacy_free_traffic_mem);
1521
1522void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
1523 u16 length, struct ieee80211_hdr *header)
1524{
1525 __le16 fc;
1526 u16 len;
1527
1528 if (likely(!(iwlegacy_debug_level & IWL_DL_TX)))
1529 return;
1530
1531 if (!priv->tx_traffic)
1532 return;
1533
1534 fc = header->frame_control;
1535 if (ieee80211_is_data(fc)) {
1536 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
1537 ? IWL_TRAFFIC_ENTRY_SIZE : length;
1538 memcpy((priv->tx_traffic +
1539 (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
1540 header, len);
1541 priv->tx_traffic_idx =
1542 (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1543 }
1544}
1545EXPORT_SYMBOL(iwl_legacy_dbg_log_tx_data_frame);
1546
1547void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
1548 u16 length, struct ieee80211_hdr *header)
1549{
1550 __le16 fc;
1551 u16 len;
1552
1553 if (likely(!(iwlegacy_debug_level & IWL_DL_RX)))
1554 return;
1555
1556 if (!priv->rx_traffic)
1557 return;
1558
1559 fc = header->frame_control;
1560 if (ieee80211_is_data(fc)) {
1561 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
1562 ? IWL_TRAFFIC_ENTRY_SIZE : length;
1563 memcpy((priv->rx_traffic +
1564 (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
1565 header, len);
1566 priv->rx_traffic_idx =
1567 (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1568 }
1569}
1570EXPORT_SYMBOL(iwl_legacy_dbg_log_rx_data_frame);
1571
1572const char *iwl_legacy_get_mgmt_string(int cmd)
1573{
1574 switch (cmd) {
1575 IWL_CMD(MANAGEMENT_ASSOC_REQ);
1576 IWL_CMD(MANAGEMENT_ASSOC_RESP);
1577 IWL_CMD(MANAGEMENT_REASSOC_REQ);
1578 IWL_CMD(MANAGEMENT_REASSOC_RESP);
1579 IWL_CMD(MANAGEMENT_PROBE_REQ);
1580 IWL_CMD(MANAGEMENT_PROBE_RESP);
1581 IWL_CMD(MANAGEMENT_BEACON);
1582 IWL_CMD(MANAGEMENT_ATIM);
1583 IWL_CMD(MANAGEMENT_DISASSOC);
1584 IWL_CMD(MANAGEMENT_AUTH);
1585 IWL_CMD(MANAGEMENT_DEAUTH);
1586 IWL_CMD(MANAGEMENT_ACTION);
1587 default:
1588 return "UNKNOWN";
1589
1590 }
1591}
1592
1593const char *iwl_legacy_get_ctrl_string(int cmd)
1594{
1595 switch (cmd) {
1596 IWL_CMD(CONTROL_BACK_REQ);
1597 IWL_CMD(CONTROL_BACK);
1598 IWL_CMD(CONTROL_PSPOLL);
1599 IWL_CMD(CONTROL_RTS);
1600 IWL_CMD(CONTROL_CTS);
1601 IWL_CMD(CONTROL_ACK);
1602 IWL_CMD(CONTROL_CFEND);
1603 IWL_CMD(CONTROL_CFENDACK);
1604 default:
1605 return "UNKNOWN";
1606
1607 }
1608}
1609
1610void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv)
1611{
1612 memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
1613 memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
1614}
1615
1616/*
1617 * if CONFIG_IWLWIFI_LEGACY_DEBUGFS defined,
1618 * iwl_legacy_update_stats function will
1619 * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
1620 * Use debugFs to display the rx/rx_statistics
1621 * if CONFIG_IWLWIFI_LEGACY_DEBUGFS not being defined, then no MGMT and CTRL
1622 * information will be recorded, but DATA pkt still will be recorded
1623 * for the reason of iwl_led.c need to control the led blinking based on
1624 * number of tx and rx data.
1625 *
1626 */
1627void
1628iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
1629{
1630 struct traffic_stats *stats;
1631
1632 if (is_tx)
1633 stats = &priv->tx_stats;
1634 else
1635 stats = &priv->rx_stats;
1636
1637 if (ieee80211_is_mgmt(fc)) {
1638 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
1639 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
1640 stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
1641 break;
1642 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
1643 stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
1644 break;
1645 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
1646 stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
1647 break;
1648 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
1649 stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
1650 break;
1651 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
1652 stats->mgmt[MANAGEMENT_PROBE_REQ]++;
1653 break;
1654 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
1655 stats->mgmt[MANAGEMENT_PROBE_RESP]++;
1656 break;
1657 case cpu_to_le16(IEEE80211_STYPE_BEACON):
1658 stats->mgmt[MANAGEMENT_BEACON]++;
1659 break;
1660 case cpu_to_le16(IEEE80211_STYPE_ATIM):
1661 stats->mgmt[MANAGEMENT_ATIM]++;
1662 break;
1663 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
1664 stats->mgmt[MANAGEMENT_DISASSOC]++;
1665 break;
1666 case cpu_to_le16(IEEE80211_STYPE_AUTH):
1667 stats->mgmt[MANAGEMENT_AUTH]++;
1668 break;
1669 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
1670 stats->mgmt[MANAGEMENT_DEAUTH]++;
1671 break;
1672 case cpu_to_le16(IEEE80211_STYPE_ACTION):
1673 stats->mgmt[MANAGEMENT_ACTION]++;
1674 break;
1675 }
1676 } else if (ieee80211_is_ctl(fc)) {
1677 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
1678 case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
1679 stats->ctrl[CONTROL_BACK_REQ]++;
1680 break;
1681 case cpu_to_le16(IEEE80211_STYPE_BACK):
1682 stats->ctrl[CONTROL_BACK]++;
1683 break;
1684 case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
1685 stats->ctrl[CONTROL_PSPOLL]++;
1686 break;
1687 case cpu_to_le16(IEEE80211_STYPE_RTS):
1688 stats->ctrl[CONTROL_RTS]++;
1689 break;
1690 case cpu_to_le16(IEEE80211_STYPE_CTS):
1691 stats->ctrl[CONTROL_CTS]++;
1692 break;
1693 case cpu_to_le16(IEEE80211_STYPE_ACK):
1694 stats->ctrl[CONTROL_ACK]++;
1695 break;
1696 case cpu_to_le16(IEEE80211_STYPE_CFEND):
1697 stats->ctrl[CONTROL_CFEND]++;
1698 break;
1699 case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
1700 stats->ctrl[CONTROL_CFENDACK]++;
1701 break;
1702 }
1703 } else {
1704 /* data */
1705 stats->data_cnt++;
1706 stats->data_bytes += len;
1707 }
1708}
1709EXPORT_SYMBOL(iwl_legacy_update_stats);
1710#endif
1711
1712static void _iwl_legacy_force_rf_reset(struct iwl_priv *priv)
1713{
1714 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1715 return;
1716
1717 if (!iwl_legacy_is_any_associated(priv)) {
1718 IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
1719 return;
1720 }
1721 /*
1722 * There is no easy and better way to force reset the radio,
1723 * the only known method is switching channel which will force to
1724 * reset and tune the radio.
1725 * Use internal short scan (single channel) operation to should
1726 * achieve this objective.
1727 * Driver should reset the radio when number of consecutive missed
1728 * beacon, or any other uCode error condition detected.
1729 */
1730 IWL_DEBUG_INFO(priv, "perform radio reset.\n");
1731 iwl_legacy_internal_short_hw_scan(priv);
1732}
1733
1734
1735int iwl_legacy_force_reset(struct iwl_priv *priv, int mode, bool external)
1736{
1737 struct iwl_force_reset *force_reset;
1738
1739 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1740 return -EINVAL;
1741
1742 if (mode >= IWL_MAX_FORCE_RESET) {
1743 IWL_DEBUG_INFO(priv, "invalid reset request.\n");
1744 return -EINVAL;
1745 }
1746 force_reset = &priv->force_reset[mode];
1747 force_reset->reset_request_count++;
1748 if (!external) {
1749 if (force_reset->last_force_reset_jiffies &&
1750 time_after(force_reset->last_force_reset_jiffies +
1751 force_reset->reset_duration, jiffies)) {
1752 IWL_DEBUG_INFO(priv, "force reset rejected\n");
1753 force_reset->reset_reject_count++;
1754 return -EAGAIN;
1755 }
1756 }
1757 force_reset->reset_success_count++;
1758 force_reset->last_force_reset_jiffies = jiffies;
1759 IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode);
1760 switch (mode) {
1761 case IWL_RF_RESET:
1762 _iwl_legacy_force_rf_reset(priv);
1763 break;
1764 case IWL_FW_RESET:
1765 /*
1766 * if the request is from external(ex: debugfs),
1767 * then always perform the request in regardless the module
1768 * parameter setting
1769 * if the request is from internal (uCode error or driver
1770 * detect failure), then fw_restart module parameter
1771 * need to be check before performing firmware reload
1772 */
1773 if (!external && !priv->cfg->mod_params->restart_fw) {
1774 IWL_DEBUG_INFO(priv, "Cancel firmware reload based on "
1775 "module parameter setting\n");
1776 break;
1777 }
1778 IWL_ERR(priv, "On demand firmware reload\n");
1779 /* Set the FW error flag -- cleared on iwl_down */
1780 set_bit(STATUS_FW_ERROR, &priv->status);
1781 wake_up_interruptible(&priv->wait_command_queue);
1782 /*
1783 * Keep the restart process from trying to send host
1784 * commands by clearing the INIT status bit
1785 */
1786 clear_bit(STATUS_READY, &priv->status);
1787 queue_work(priv->workqueue, &priv->restart);
1788 break;
1789 }
1790 return 0;
1791}
1792
1793int
1794iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
1795 struct ieee80211_vif *vif,
1796 enum nl80211_iftype newtype, bool newp2p)
1797{
1798 struct iwl_priv *priv = hw->priv;
1799 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
1800 struct iwl_rxon_context *tmp;
1801 u32 interface_modes;
1802 int err;
1803
1804 newtype = ieee80211_iftype_p2p(newtype, newp2p);
1805
1806 mutex_lock(&priv->mutex);
1807
1808 interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
1809
1810 if (!(interface_modes & BIT(newtype))) {
1811 err = -EBUSY;
1812 goto out;
1813 }
1814
1815 if (ctx->exclusive_interface_modes & BIT(newtype)) {
1816 for_each_context(priv, tmp) {
1817 if (ctx == tmp)
1818 continue;
1819
1820 if (!tmp->vif)
1821 continue;
1822
1823 /*
1824 * The current mode switch would be exclusive, but
1825 * another context is active ... refuse the switch.
1826 */
1827 err = -EBUSY;
1828 goto out;
1829 }
1830 }
1831
1832 /* success */
1833 iwl_legacy_teardown_interface(priv, vif, true);
1834 vif->type = newtype;
1835 err = iwl_legacy_setup_interface(priv, ctx);
1836 WARN_ON(err);
1837 /*
1838 * We've switched internally, but submitting to the
1839 * device may have failed for some reason. Mask this
1840 * error, because otherwise mac80211 will not switch
1841 * (and set the interface type back) and we'll be
1842 * out of sync with it.
1843 */
1844 err = 0;
1845
1846 out:
1847 mutex_unlock(&priv->mutex);
1848 return err;
1849}
1850EXPORT_SYMBOL(iwl_legacy_mac_change_interface);
1851
1852/*
1853 * On every watchdog tick we check (latest) time stamp. If it does not
1854 * change during timeout period and queue is not empty we reset firmware.
1855 */
1856static int iwl_legacy_check_stuck_queue(struct iwl_priv *priv, int cnt)
1857{
1858 struct iwl_tx_queue *txq = &priv->txq[cnt];
1859 struct iwl_queue *q = &txq->q;
1860 unsigned long timeout;
1861 int ret;
1862
1863 if (q->read_ptr == q->write_ptr) {
1864 txq->time_stamp = jiffies;
1865 return 0;
1866 }
1867
1868 timeout = txq->time_stamp +
1869 msecs_to_jiffies(priv->cfg->base_params->wd_timeout);
1870
1871 if (time_after(jiffies, timeout)) {
1872 IWL_ERR(priv, "Queue %d stuck for %u ms.\n",
1873 q->id, priv->cfg->base_params->wd_timeout);
1874 ret = iwl_legacy_force_reset(priv, IWL_FW_RESET, false);
1875 return (ret == -EAGAIN) ? 0 : 1;
1876 }
1877
1878 return 0;
1879}
1880
1881/*
1882 * Making watchdog tick be a quarter of timeout assure we will
1883 * discover the queue hung between timeout and 1.25*timeout
1884 */
1885#define IWL_WD_TICK(timeout) ((timeout) / 4)
1886
1887/*
1888 * Watchdog timer callback, we check each tx queue for stuck, if if hung
1889 * we reset the firmware. If everything is fine just rearm the timer.
1890 */
1891void iwl_legacy_bg_watchdog(unsigned long data)
1892{
1893 struct iwl_priv *priv = (struct iwl_priv *)data;
1894 int cnt;
1895 unsigned long timeout;
1896
1897 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1898 return;
1899
1900 timeout = priv->cfg->base_params->wd_timeout;
1901 if (timeout == 0)
1902 return;
1903
1904 /* monitor and check for stuck cmd queue */
1905 if (iwl_legacy_check_stuck_queue(priv, priv->cmd_queue))
1906 return;
1907
1908 /* monitor and check for other stuck queues */
1909 if (iwl_legacy_is_any_associated(priv)) {
1910 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
1911 /* skip as we already checked the command queue */
1912 if (cnt == priv->cmd_queue)
1913 continue;
1914 if (iwl_legacy_check_stuck_queue(priv, cnt))
1915 return;
1916 }
1917 }
1918
1919 mod_timer(&priv->watchdog, jiffies +
1920 msecs_to_jiffies(IWL_WD_TICK(timeout)));
1921}
1922EXPORT_SYMBOL(iwl_legacy_bg_watchdog);
1923
1924void iwl_legacy_setup_watchdog(struct iwl_priv *priv)
1925{
1926 unsigned int timeout = priv->cfg->base_params->wd_timeout;
1927
1928 if (timeout)
1929 mod_timer(&priv->watchdog,
1930 jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
1931 else
1932 del_timer(&priv->watchdog);
1933}
1934EXPORT_SYMBOL(iwl_legacy_setup_watchdog);
1935
1936/*
1937 * extended beacon time format
1938 * time in usec will be changed into a 32-bit value in extended:internal format
1939 * the extended part is the beacon counts
1940 * the internal part is the time in usec within one beacon interval
1941 */
1942u32
1943iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
1944 u32 usec, u32 beacon_interval)
1945{
1946 u32 quot;
1947 u32 rem;
1948 u32 interval = beacon_interval * TIME_UNIT;
1949
1950 if (!interval || !usec)
1951 return 0;
1952
1953 quot = (usec / interval) &
1954 (iwl_legacy_beacon_time_mask_high(priv,
1955 priv->hw_params.beacon_time_tsf_bits) >>
1956 priv->hw_params.beacon_time_tsf_bits);
1957 rem = (usec % interval) & iwl_legacy_beacon_time_mask_low(priv,
1958 priv->hw_params.beacon_time_tsf_bits);
1959
1960 return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
1961}
1962EXPORT_SYMBOL(iwl_legacy_usecs_to_beacons);
1963
1964/* base is usually what we get from ucode with each received frame,
1965 * the same as HW timer counter counting down
1966 */
1967__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
1968 u32 addon, u32 beacon_interval)
1969{
1970 u32 base_low = base & iwl_legacy_beacon_time_mask_low(priv,
1971 priv->hw_params.beacon_time_tsf_bits);
1972 u32 addon_low = addon & iwl_legacy_beacon_time_mask_low(priv,
1973 priv->hw_params.beacon_time_tsf_bits);
1974 u32 interval = beacon_interval * TIME_UNIT;
1975 u32 res = (base & iwl_legacy_beacon_time_mask_high(priv,
1976 priv->hw_params.beacon_time_tsf_bits)) +
1977 (addon & iwl_legacy_beacon_time_mask_high(priv,
1978 priv->hw_params.beacon_time_tsf_bits));
1979
1980 if (base_low > addon_low)
1981 res += base_low - addon_low;
1982 else if (base_low < addon_low) {
1983 res += interval + base_low - addon_low;
1984 res += (1 << priv->hw_params.beacon_time_tsf_bits);
1985 } else
1986 res += (1 << priv->hw_params.beacon_time_tsf_bits);
1987
1988 return cpu_to_le32(res);
1989}
1990EXPORT_SYMBOL(iwl_legacy_add_beacon_time);
1991
1992#ifdef CONFIG_PM
1993
1994int iwl_legacy_pci_suspend(struct device *device)
1995{
1996 struct pci_dev *pdev = to_pci_dev(device);
1997 struct iwl_priv *priv = pci_get_drvdata(pdev);
1998
1999 /*
2000 * This function is called when system goes into suspend state
2001 * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
2002 * first but since iwl_mac_stop() has no knowledge of who the caller is,
2003 * it will not call apm_ops.stop() to stop the DMA operation.
2004 * Calling apm_ops.stop here to make sure we stop the DMA.
2005 */
2006 iwl_legacy_apm_stop(priv);
2007
2008 return 0;
2009}
2010EXPORT_SYMBOL(iwl_legacy_pci_suspend);
2011
2012int iwl_legacy_pci_resume(struct device *device)
2013{
2014 struct pci_dev *pdev = to_pci_dev(device);
2015 struct iwl_priv *priv = pci_get_drvdata(pdev);
2016 bool hw_rfkill = false;
2017
2018 /*
2019 * We disable the RETRY_TIMEOUT register (0x41) to keep
2020 * PCI Tx retries from interfering with C3 CPU state.
2021 */
2022 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
2023
2024 iwl_legacy_enable_interrupts(priv);
2025
2026 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
2027 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
2028 hw_rfkill = true;
2029
2030 if (hw_rfkill)
2031 set_bit(STATUS_RF_KILL_HW, &priv->status);
2032 else
2033 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2034
2035 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill);
2036
2037 return 0;
2038}
2039EXPORT_SYMBOL(iwl_legacy_pci_resume);
2040
2041const struct dev_pm_ops iwl_legacy_pm_ops = {
2042 .suspend = iwl_legacy_pci_suspend,
2043 .resume = iwl_legacy_pci_resume,
2044 .freeze = iwl_legacy_pci_suspend,
2045 .thaw = iwl_legacy_pci_resume,
2046 .poweroff = iwl_legacy_pci_suspend,
2047 .restore = iwl_legacy_pci_resume,
2048};
2049EXPORT_SYMBOL(iwl_legacy_pm_ops);
2050
2051#endif /* CONFIG_PM */
2052
2053static void
2054iwl_legacy_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
2055{
2056 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2057 return;
2058
2059 if (!ctx->is_active)
2060 return;
2061
2062 ctx->qos_data.def_qos_parm.qos_flags = 0;
2063
2064 if (ctx->qos_data.qos_active)
2065 ctx->qos_data.def_qos_parm.qos_flags |=
2066 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
2067
2068 if (ctx->ht.enabled)
2069 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
2070
2071 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
2072 ctx->qos_data.qos_active,
2073 ctx->qos_data.def_qos_parm.qos_flags);
2074
2075 iwl_legacy_send_cmd_pdu_async(priv, ctx->qos_cmd,
2076 sizeof(struct iwl_qosparam_cmd),
2077 &ctx->qos_data.def_qos_parm, NULL);
2078}
2079
2080/**
2081 * iwl_legacy_mac_config - mac80211 config callback
2082 */
2083int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
2084{
2085 struct iwl_priv *priv = hw->priv;
2086 const struct iwl_channel_info *ch_info;
2087 struct ieee80211_conf *conf = &hw->conf;
2088 struct ieee80211_channel *channel = conf->channel;
2089 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2090 struct iwl_rxon_context *ctx;
2091 unsigned long flags = 0;
2092 int ret = 0;
2093 u16 ch;
2094 int scan_active = 0;
2095 bool ht_changed[NUM_IWL_RXON_CTX] = {};
2096
2097 if (WARN_ON(!priv->cfg->ops->legacy))
2098 return -EOPNOTSUPP;
2099
2100 mutex_lock(&priv->mutex);
2101
2102 IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
2103 channel->hw_value, changed);
2104
2105 if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
2106 test_bit(STATUS_SCANNING, &priv->status))) {
2107 scan_active = 1;
2108 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
2109 }
2110
2111 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
2112 IEEE80211_CONF_CHANGE_CHANNEL)) {
2113 /* mac80211 uses static for non-HT which is what we want */
2114 priv->current_ht_config.smps = conf->smps_mode;
2115
2116 /*
2117 * Recalculate chain counts.
2118 *
2119 * If monitor mode is enabled then mac80211 will
2120 * set up the SM PS mode to OFF if an HT channel is
2121 * configured.
2122 */
2123 if (priv->cfg->ops->hcmd->set_rxon_chain)
2124 for_each_context(priv, ctx)
2125 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2126 }
2127
2128 /* during scanning mac80211 will delay channel setting until
2129 * scan finish with changed = 0
2130 */
2131 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
2132 if (scan_active)
2133 goto set_ch_out;
2134
2135 ch = channel->hw_value;
2136 ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch);
2137 if (!iwl_legacy_is_channel_valid(ch_info)) {
2138 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
2139 ret = -EINVAL;
2140 goto set_ch_out;
2141 }
2142
2143 spin_lock_irqsave(&priv->lock, flags);
2144
2145 for_each_context(priv, ctx) {
2146 /* Configure HT40 channels */
2147 if (ctx->ht.enabled != conf_is_ht(conf)) {
2148 ctx->ht.enabled = conf_is_ht(conf);
2149 ht_changed[ctx->ctxid] = true;
2150 }
2151 if (ctx->ht.enabled) {
2152 if (conf_is_ht40_minus(conf)) {
2153 ctx->ht.extension_chan_offset =
2154 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
2155 ctx->ht.is_40mhz = true;
2156 } else if (conf_is_ht40_plus(conf)) {
2157 ctx->ht.extension_chan_offset =
2158 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
2159 ctx->ht.is_40mhz = true;
2160 } else {
2161 ctx->ht.extension_chan_offset =
2162 IEEE80211_HT_PARAM_CHA_SEC_NONE;
2163 ctx->ht.is_40mhz = false;
2164 }
2165 } else
2166 ctx->ht.is_40mhz = false;
2167
2168 /*
2169 * Default to no protection. Protection mode will
2170 * later be set from BSS config in iwl_ht_conf
2171 */
2172 ctx->ht.protection =
2173 IEEE80211_HT_OP_MODE_PROTECTION_NONE;
2174
2175 /* if we are switching from ht to 2.4 clear flags
2176 * from any ht related info since 2.4 does not
2177 * support ht */
2178 if ((le16_to_cpu(ctx->staging.channel) != ch))
2179 ctx->staging.flags = 0;
2180
2181 iwl_legacy_set_rxon_channel(priv, channel, ctx);
2182 iwl_legacy_set_rxon_ht(priv, ht_conf);
2183
2184 iwl_legacy_set_flags_for_band(priv, ctx, channel->band,
2185 ctx->vif);
2186 }
2187
2188 spin_unlock_irqrestore(&priv->lock, flags);
2189
2190 if (priv->cfg->ops->legacy->update_bcast_stations)
2191 ret =
2192 priv->cfg->ops->legacy->update_bcast_stations(priv);
2193
2194 set_ch_out:
2195 /* The list of supported rates and rate mask can be different
2196 * for each band; since the band may have changed, reset
2197 * the rate mask to what mac80211 lists */
2198 iwl_legacy_set_rate(priv);
2199 }
2200
2201 if (changed & (IEEE80211_CONF_CHANGE_PS |
2202 IEEE80211_CONF_CHANGE_IDLE)) {
2203 ret = iwl_legacy_power_update_mode(priv, false);
2204 if (ret)
2205 IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
2206 }
2207
2208 if (changed & IEEE80211_CONF_CHANGE_POWER) {
2209 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
2210 priv->tx_power_user_lmt, conf->power_level);
2211
2212 iwl_legacy_set_tx_power(priv, conf->power_level, false);
2213 }
2214
2215 if (!iwl_legacy_is_ready(priv)) {
2216 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2217 goto out;
2218 }
2219
2220 if (scan_active)
2221 goto out;
2222
2223 for_each_context(priv, ctx) {
2224 if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
2225 iwl_legacy_commit_rxon(priv, ctx);
2226 else
2227 IWL_DEBUG_INFO(priv,
2228 "Not re-sending same RXON configuration.\n");
2229 if (ht_changed[ctx->ctxid])
2230 iwl_legacy_update_qos(priv, ctx);
2231 }
2232
2233out:
2234 IWL_DEBUG_MAC80211(priv, "leave\n");
2235 mutex_unlock(&priv->mutex);
2236 return ret;
2237}
2238EXPORT_SYMBOL(iwl_legacy_mac_config);
2239
2240void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw)
2241{
2242 struct iwl_priv *priv = hw->priv;
2243 unsigned long flags;
2244 /* IBSS can only be the IWL_RXON_CTX_BSS context */
2245 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2246
2247 if (WARN_ON(!priv->cfg->ops->legacy))
2248 return;
2249
2250 mutex_lock(&priv->mutex);
2251 IWL_DEBUG_MAC80211(priv, "enter\n");
2252
2253 spin_lock_irqsave(&priv->lock, flags);
2254 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
2255 spin_unlock_irqrestore(&priv->lock, flags);
2256
2257 spin_lock_irqsave(&priv->lock, flags);
2258
2259 /* new association get rid of ibss beacon skb */
2260 if (priv->beacon_skb)
2261 dev_kfree_skb(priv->beacon_skb);
2262
2263 priv->beacon_skb = NULL;
2264
2265 priv->timestamp = 0;
2266
2267 spin_unlock_irqrestore(&priv->lock, flags);
2268
2269 iwl_legacy_scan_cancel_timeout(priv, 100);
2270 if (!iwl_legacy_is_ready_rf(priv)) {
2271 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2272 mutex_unlock(&priv->mutex);
2273 return;
2274 }
2275
2276 /* we are restarting association process
2277 * clear RXON_FILTER_ASSOC_MSK bit
2278 */
2279 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2280 iwl_legacy_commit_rxon(priv, ctx);
2281
2282 iwl_legacy_set_rate(priv);
2283
2284 mutex_unlock(&priv->mutex);
2285
2286 IWL_DEBUG_MAC80211(priv, "leave\n");
2287}
2288EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf);
2289
2290static void iwl_legacy_ht_conf(struct iwl_priv *priv,
2291 struct ieee80211_vif *vif)
2292{
2293 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2294 struct ieee80211_sta *sta;
2295 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
2296 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
2297
2298 IWL_DEBUG_ASSOC(priv, "enter:\n");
2299
2300 if (!ctx->ht.enabled)
2301 return;
2302
2303 ctx->ht.protection =
2304 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
2305 ctx->ht.non_gf_sta_present =
2306 !!(bss_conf->ht_operation_mode &
2307 IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
2308
2309 ht_conf->single_chain_sufficient = false;
2310
2311 switch (vif->type) {
2312 case NL80211_IFTYPE_STATION:
2313 rcu_read_lock();
2314 sta = ieee80211_find_sta(vif, bss_conf->bssid);
2315 if (sta) {
2316 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2317 int maxstreams;
2318
2319 maxstreams = (ht_cap->mcs.tx_params &
2320 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
2321 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
2322 maxstreams += 1;
2323
2324 if ((ht_cap->mcs.rx_mask[1] == 0) &&
2325 (ht_cap->mcs.rx_mask[2] == 0))
2326 ht_conf->single_chain_sufficient = true;
2327 if (maxstreams <= 1)
2328 ht_conf->single_chain_sufficient = true;
2329 } else {
2330 /*
2331 * If at all, this can only happen through a race
2332 * when the AP disconnects us while we're still
2333 * setting up the connection, in that case mac80211
2334 * will soon tell us about that.
2335 */
2336 ht_conf->single_chain_sufficient = true;
2337 }
2338 rcu_read_unlock();
2339 break;
2340 case NL80211_IFTYPE_ADHOC:
2341 ht_conf->single_chain_sufficient = true;
2342 break;
2343 default:
2344 break;
2345 }
2346
2347 IWL_DEBUG_ASSOC(priv, "leave\n");
2348}
2349
2350static inline void iwl_legacy_set_no_assoc(struct iwl_priv *priv,
2351 struct ieee80211_vif *vif)
2352{
2353 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
2354
2355 /*
2356 * inform the ucode that there is no longer an
2357 * association and that no more packets should be
2358 * sent
2359 */
2360 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2361 ctx->staging.assoc_id = 0;
2362 iwl_legacy_commit_rxon(priv, ctx);
2363}
2364
2365static void iwl_legacy_beacon_update(struct ieee80211_hw *hw,
2366 struct ieee80211_vif *vif)
2367{
2368 struct iwl_priv *priv = hw->priv;
2369 unsigned long flags;
2370 __le64 timestamp;
2371 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
2372
2373 if (!skb)
2374 return;
2375
2376 IWL_DEBUG_MAC80211(priv, "enter\n");
2377
2378 lockdep_assert_held(&priv->mutex);
2379
2380 if (!priv->beacon_ctx) {
2381 IWL_ERR(priv, "update beacon but no beacon context!\n");
2382 dev_kfree_skb(skb);
2383 return;
2384 }
2385
2386 spin_lock_irqsave(&priv->lock, flags);
2387
2388 if (priv->beacon_skb)
2389 dev_kfree_skb(priv->beacon_skb);
2390
2391 priv->beacon_skb = skb;
2392
2393 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
2394 priv->timestamp = le64_to_cpu(timestamp);
2395
2396 IWL_DEBUG_MAC80211(priv, "leave\n");
2397 spin_unlock_irqrestore(&priv->lock, flags);
2398
2399 if (!iwl_legacy_is_ready_rf(priv)) {
2400 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
2401 return;
2402 }
2403
2404 priv->cfg->ops->legacy->post_associate(priv);
2405}
2406
2407void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
2408 struct ieee80211_vif *vif,
2409 struct ieee80211_bss_conf *bss_conf,
2410 u32 changes)
2411{
2412 struct iwl_priv *priv = hw->priv;
2413 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
2414 int ret;
2415
2416 if (WARN_ON(!priv->cfg->ops->legacy))
2417 return;
2418
2419 IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
2420
2421 if (!iwl_legacy_is_alive(priv))
2422 return;
2423
2424 mutex_lock(&priv->mutex);
2425
2426 if (changes & BSS_CHANGED_QOS) {
2427 unsigned long flags;
2428
2429 spin_lock_irqsave(&priv->lock, flags);
2430 ctx->qos_data.qos_active = bss_conf->qos;
2431 iwl_legacy_update_qos(priv, ctx);
2432 spin_unlock_irqrestore(&priv->lock, flags);
2433 }
2434
2435 if (changes & BSS_CHANGED_BEACON_ENABLED) {
2436 /*
2437 * the add_interface code must make sure we only ever
2438 * have a single interface that could be beaconing at
2439 * any time.
2440 */
2441 if (vif->bss_conf.enable_beacon)
2442 priv->beacon_ctx = ctx;
2443 else
2444 priv->beacon_ctx = NULL;
2445 }
2446
2447 if (changes & BSS_CHANGED_BSSID) {
2448 IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
2449
2450 /*
2451 * If there is currently a HW scan going on in the
2452 * background then we need to cancel it else the RXON
2453 * below/in post_associate will fail.
2454 */
2455 if (iwl_legacy_scan_cancel_timeout(priv, 100)) {
2456 IWL_WARN(priv,
2457 "Aborted scan still in progress after 100ms\n");
2458 IWL_DEBUG_MAC80211(priv,
2459 "leaving - scan abort failed.\n");
2460 mutex_unlock(&priv->mutex);
2461 return;
2462 }
2463
2464 /* mac80211 only sets assoc when in STATION mode */
2465 if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
2466 memcpy(ctx->staging.bssid_addr,
2467 bss_conf->bssid, ETH_ALEN);
2468
2469 /* currently needed in a few places */
2470 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
2471 } else {
2472 ctx->staging.filter_flags &=
2473 ~RXON_FILTER_ASSOC_MSK;
2474 }
2475
2476 }
2477
2478 /*
2479 * This needs to be after setting the BSSID in case
2480 * mac80211 decides to do both changes at once because
2481 * it will invoke post_associate.
2482 */
2483 if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
2484 iwl_legacy_beacon_update(hw, vif);
2485
2486 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
2487 IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
2488 bss_conf->use_short_preamble);
2489 if (bss_conf->use_short_preamble)
2490 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2491 else
2492 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2493 }
2494
2495 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
2496 IWL_DEBUG_MAC80211(priv,
2497 "ERP_CTS %d\n", bss_conf->use_cts_prot);
2498 if (bss_conf->use_cts_prot &&
2499 (priv->band != IEEE80211_BAND_5GHZ))
2500 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
2501 else
2502 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
2503 if (bss_conf->use_cts_prot)
2504 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
2505 else
2506 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
2507 }
2508
2509 if (changes & BSS_CHANGED_BASIC_RATES) {
2510 /* XXX use this information
2511 *
2512 * To do that, remove code from iwl_legacy_set_rate() and put something
2513 * like this here:
2514 *
2515 if (A-band)
2516 ctx->staging.ofdm_basic_rates =
2517 bss_conf->basic_rates;
2518 else
2519 ctx->staging.ofdm_basic_rates =
2520 bss_conf->basic_rates >> 4;
2521 ctx->staging.cck_basic_rates =
2522 bss_conf->basic_rates & 0xF;
2523 */
2524 }
2525
2526 if (changes & BSS_CHANGED_HT) {
2527 iwl_legacy_ht_conf(priv, vif);
2528
2529 if (priv->cfg->ops->hcmd->set_rxon_chain)
2530 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2531 }
2532
2533 if (changes & BSS_CHANGED_ASSOC) {
2534 IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
2535 if (bss_conf->assoc) {
2536 priv->timestamp = bss_conf->timestamp;
2537
2538 if (!iwl_legacy_is_rfkill(priv))
2539 priv->cfg->ops->legacy->post_associate(priv);
2540 } else
2541 iwl_legacy_set_no_assoc(priv, vif);
2542 }
2543
2544 if (changes && iwl_legacy_is_associated_ctx(ctx) && bss_conf->aid) {
2545 IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
2546 changes);
2547 ret = iwl_legacy_send_rxon_assoc(priv, ctx);
2548 if (!ret) {
2549 /* Sync active_rxon with latest change. */
2550 memcpy((void *)&ctx->active,
2551 &ctx->staging,
2552 sizeof(struct iwl_legacy_rxon_cmd));
2553 }
2554 }
2555
2556 if (changes & BSS_CHANGED_BEACON_ENABLED) {
2557 if (vif->bss_conf.enable_beacon) {
2558 memcpy(ctx->staging.bssid_addr,
2559 bss_conf->bssid, ETH_ALEN);
2560 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
2561 priv->cfg->ops->legacy->config_ap(priv);
2562 } else
2563 iwl_legacy_set_no_assoc(priv, vif);
2564 }
2565
2566 if (changes & BSS_CHANGED_IBSS) {
2567 ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif,
2568 bss_conf->ibss_joined);
2569 if (ret)
2570 IWL_ERR(priv, "failed to %s IBSS station %pM\n",
2571 bss_conf->ibss_joined ? "add" : "remove",
2572 bss_conf->bssid);
2573 }
2574
2575 mutex_unlock(&priv->mutex);
2576
2577 IWL_DEBUG_MAC80211(priv, "leave\n");
2578}
2579EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed);
2580
2581irqreturn_t iwl_legacy_isr(int irq, void *data)
2582{
2583 struct iwl_priv *priv = data;
2584 u32 inta, inta_mask;
2585 u32 inta_fh;
2586 unsigned long flags;
2587 if (!priv)
2588 return IRQ_NONE;
2589
2590 spin_lock_irqsave(&priv->lock, flags);
2591
2592 /* Disable (but don't clear!) interrupts here to avoid
2593 * back-to-back ISRs and sporadic interrupts from our NIC.
2594 * If we have something to service, the tasklet will re-enable ints.
2595 * If we *don't* have something, we'll re-enable before leaving here. */
2596 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
2597 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
2598
2599 /* Discover which interrupts are active/pending */
2600 inta = iwl_read32(priv, CSR_INT);
2601 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
2602
2603 /* Ignore interrupt if there's nothing in NIC to service.
2604 * This may be due to IRQ shared with another device,
2605 * or due to sporadic interrupts thrown from our NIC. */
2606 if (!inta && !inta_fh) {
2607 IWL_DEBUG_ISR(priv,
2608 "Ignore interrupt, inta == 0, inta_fh == 0\n");
2609 goto none;
2610 }
2611
2612 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
2613 /* Hardware disappeared. It might have already raised
2614 * an interrupt */
2615 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
2616 goto unplugged;
2617 }
2618
2619 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
2620 inta, inta_mask, inta_fh);
2621
2622 inta &= ~CSR_INT_BIT_SCD;
2623
2624 /* iwl_irq_tasklet() will service interrupts and re-enable them */
2625 if (likely(inta || inta_fh))
2626 tasklet_schedule(&priv->irq_tasklet);
2627
2628unplugged:
2629 spin_unlock_irqrestore(&priv->lock, flags);
2630 return IRQ_HANDLED;
2631
2632none:
2633 /* re-enable interrupts here since we don't have anything to service. */
2634 /* only Re-enable if diabled by irq */
2635 if (test_bit(STATUS_INT_ENABLED, &priv->status))
2636 iwl_legacy_enable_interrupts(priv);
2637 spin_unlock_irqrestore(&priv->lock, flags);
2638 return IRQ_NONE;
2639}
2640EXPORT_SYMBOL(iwl_legacy_isr);
2641
2642/*
2643 * iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
2644 * function.
2645 */
2646void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
2647 struct ieee80211_tx_info *info,
2648 __le16 fc, __le32 *tx_flags)
2649{
2650 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
2651 *tx_flags |= TX_CMD_FLG_RTS_MSK;
2652 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2653 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2654
2655 if (!ieee80211_is_mgmt(fc))
2656 return;
2657
2658 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
2659 case cpu_to_le16(IEEE80211_STYPE_AUTH):
2660 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2661 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
2662 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
2663 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2664 *tx_flags |= TX_CMD_FLG_CTS_MSK;
2665 break;
2666 }
2667 } else if (info->control.rates[0].flags &
2668 IEEE80211_TX_RC_USE_CTS_PROTECT) {
2669 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2670 *tx_flags |= TX_CMD_FLG_CTS_MSK;
2671 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2672 }
2673}
2674EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.h b/drivers/net/wireless/iwlegacy/iwl-core.h
new file mode 100644
index 000000000000..f03b463e4378
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-core.h
@@ -0,0 +1,646 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_legacy_core_h__
64#define __iwl_legacy_core_h__
65
66/************************
67 * forward declarations *
68 ************************/
69struct iwl_host_cmd;
70struct iwl_cmd;
71
72
73#define IWLWIFI_VERSION "in-tree:"
74#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
75#define DRV_AUTHOR "<ilw@linux.intel.com>"
76
77#define IWL_PCI_DEVICE(dev, subdev, cfg) \
78 .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
79 .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
80 .driver_data = (kernel_ulong_t)&(cfg)
81
82#define TIME_UNIT 1024
83
84#define IWL_SKU_G 0x1
85#define IWL_SKU_A 0x2
86#define IWL_SKU_N 0x8
87
88#define IWL_CMD(x) case x: return #x
89
90struct iwl_hcmd_ops {
91 int (*rxon_assoc)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
92 int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
93 void (*set_rxon_chain)(struct iwl_priv *priv,
94 struct iwl_rxon_context *ctx);
95};
96
97struct iwl_hcmd_utils_ops {
98 u16 (*get_hcmd_size)(u8 cmd_id, u16 len);
99 u16 (*build_addsta_hcmd)(const struct iwl_legacy_addsta_cmd *cmd,
100 u8 *data);
101 int (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
102 void (*post_scan)(struct iwl_priv *priv);
103};
104
105struct iwl_apm_ops {
106 int (*init)(struct iwl_priv *priv);
107 void (*config)(struct iwl_priv *priv);
108};
109
110struct iwl_debugfs_ops {
111 ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf,
112 size_t count, loff_t *ppos);
113 ssize_t (*tx_stats_read)(struct file *file, char __user *user_buf,
114 size_t count, loff_t *ppos);
115 ssize_t (*general_stats_read)(struct file *file, char __user *user_buf,
116 size_t count, loff_t *ppos);
117};
118
119struct iwl_temp_ops {
120 void (*temperature)(struct iwl_priv *priv);
121};
122
123struct iwl_lib_ops {
124 /* set hw dependent parameters */
125 int (*set_hw_params)(struct iwl_priv *priv);
126 /* Handling TX */
127 void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv,
128 struct iwl_tx_queue *txq,
129 u16 byte_cnt);
130 int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv,
131 struct iwl_tx_queue *txq,
132 dma_addr_t addr,
133 u16 len, u8 reset, u8 pad);
134 void (*txq_free_tfd)(struct iwl_priv *priv,
135 struct iwl_tx_queue *txq);
136 int (*txq_init)(struct iwl_priv *priv,
137 struct iwl_tx_queue *txq);
138 /* setup Rx handler */
139 void (*rx_handler_setup)(struct iwl_priv *priv);
140 /* alive notification after init uCode load */
141 void (*init_alive_start)(struct iwl_priv *priv);
142 /* check validity of rtc data address */
143 int (*is_valid_rtc_data_addr)(u32 addr);
144 /* 1st ucode load */
145 int (*load_ucode)(struct iwl_priv *priv);
146 int (*dump_nic_event_log)(struct iwl_priv *priv,
147 bool full_log, char **buf, bool display);
148 void (*dump_nic_error_log)(struct iwl_priv *priv);
149 int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display);
150 int (*set_channel_switch)(struct iwl_priv *priv,
151 struct ieee80211_channel_switch *ch_switch);
152 /* power management */
153 struct iwl_apm_ops apm_ops;
154
155 /* power */
156 int (*send_tx_power) (struct iwl_priv *priv);
157 void (*update_chain_flags)(struct iwl_priv *priv);
158
159 /* eeprom operations (as defined in iwl-eeprom.h) */
160 struct iwl_eeprom_ops eeprom_ops;
161
162 /* temperature */
163 struct iwl_temp_ops temp_ops;
164 /* check for plcp health */
165 bool (*check_plcp_health)(struct iwl_priv *priv,
166 struct iwl_rx_packet *pkt);
167
168 struct iwl_debugfs_ops debugfs_ops;
169
170};
171
172struct iwl_led_ops {
173 int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd);
174};
175
176struct iwl_legacy_ops {
177 void (*post_associate)(struct iwl_priv *priv);
178 void (*config_ap)(struct iwl_priv *priv);
179 /* station management */
180 int (*update_bcast_stations)(struct iwl_priv *priv);
181 int (*manage_ibss_station)(struct iwl_priv *priv,
182 struct ieee80211_vif *vif, bool add);
183};
184
185struct iwl_ops {
186 const struct iwl_lib_ops *lib;
187 const struct iwl_hcmd_ops *hcmd;
188 const struct iwl_hcmd_utils_ops *utils;
189 const struct iwl_led_ops *led;
190 const struct iwl_nic_ops *nic;
191 const struct iwl_legacy_ops *legacy;
192 const struct ieee80211_ops *ieee80211_ops;
193};
194
195struct iwl_mod_params {
196 int sw_crypto; /* def: 0 = using hardware encryption */
197 int disable_hw_scan; /* def: 0 = use h/w scan */
198 int num_of_queues; /* def: HW dependent */
199 int disable_11n; /* def: 0 = 11n capabilities enabled */
200 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
201 int antenna; /* def: 0 = both antennas (use diversity) */
202 int restart_fw; /* def: 1 = restart firmware */
203};
204
205/*
206 * @led_compensation: compensate on the led on/off time per HW according
207 * to the deviation to achieve the desired led frequency.
208 * The detail algorithm is described in iwl-led.c
209 * @chain_noise_num_beacons: number of beacons used to compute chain noise
210 * @plcp_delta_threshold: plcp error rate threshold used to trigger
211 * radio tuning when there is a high receiving plcp error rate
212 * @wd_timeout: TX queues watchdog timeout
213 * @temperature_kelvin: temperature report by uCode in kelvin
214 * @max_event_log_size: size of event log buffer size for ucode event logging
215 * @ucode_tracing: support ucode continuous tracing
216 * @sensitivity_calib_by_driver: driver has the capability to perform
217 * sensitivity calibration operation
218 * @chain_noise_calib_by_driver: driver has the capability to perform
219 * chain noise calibration operation
220 */
221struct iwl_base_params {
222 int eeprom_size;
223 int num_of_queues; /* def: HW dependent */
224 int num_of_ampdu_queues;/* def: HW dependent */
225 /* for iwl_legacy_apm_init() */
226 u32 pll_cfg_val;
227 bool set_l0s;
228 bool use_bsm;
229
230 u16 led_compensation;
231 int chain_noise_num_beacons;
232 u8 plcp_delta_threshold;
233 unsigned int wd_timeout;
234 bool temperature_kelvin;
235 u32 max_event_log_size;
236 const bool ucode_tracing;
237 const bool sensitivity_calib_by_driver;
238 const bool chain_noise_calib_by_driver;
239};
240
241/**
242 * struct iwl_cfg
243 * @fw_name_pre: Firmware filename prefix. The api version and extension
244 * (.ucode) will be added to filename before loading from disk. The
245 * filename is constructed as fw_name_pre<api>.ucode.
246 * @ucode_api_max: Highest version of uCode API supported by driver.
247 * @ucode_api_min: Lowest version of uCode API supported by driver.
248 * @scan_antennas: available antenna for scan operation
249 * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
250 *
251 * We enable the driver to be backward compatible wrt API version. The
252 * driver specifies which APIs it supports (with @ucode_api_max being the
253 * highest and @ucode_api_min the lowest). Firmware will only be loaded if
254 * it has a supported API version. The firmware's API version will be
255 * stored in @iwl_priv, enabling the driver to make runtime changes based
256 * on firmware version used.
257 *
258 * For example,
259 * if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
260 * Driver interacts with Firmware API version >= 2.
261 * } else {
262 * Driver interacts with Firmware API version 1.
263 * }
264 *
265 * The ideal usage of this infrastructure is to treat a new ucode API
266 * release as a new hardware revision. That is, through utilizing the
267 * iwl_hcmd_utils_ops etc. we accommodate different command structures
268 * and flows between hardware versions as well as their API
269 * versions.
270 *
271 */
272struct iwl_cfg {
273 /* params specific to an individual device within a device family */
274 const char *name;
275 const char *fw_name_pre;
276 const unsigned int ucode_api_max;
277 const unsigned int ucode_api_min;
278 u8 valid_tx_ant;
279 u8 valid_rx_ant;
280 unsigned int sku;
281 u16 eeprom_ver;
282 u16 eeprom_calib_ver;
283 const struct iwl_ops *ops;
284 /* module based parameters which can be set from modprobe cmd */
285 const struct iwl_mod_params *mod_params;
286 /* params not likely to change within a device family */
287 struct iwl_base_params *base_params;
288 /* params likely to change within a device family */
289 u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
290 u8 scan_tx_antennas[IEEE80211_NUM_BANDS];
291 enum iwl_led_mode led_mode;
292};
293
294/***************************
295 * L i b *
296 ***************************/
297
298struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg);
299int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
300 const struct ieee80211_tx_queue_params *params);
301int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw);
302void iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
303 struct iwl_rxon_context *ctx,
304 int hw_decrypt);
305int iwl_legacy_check_rxon_cmd(struct iwl_priv *priv,
306 struct iwl_rxon_context *ctx);
307int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
308 struct iwl_rxon_context *ctx);
309int iwl_legacy_set_rxon_channel(struct iwl_priv *priv,
310 struct ieee80211_channel *ch,
311 struct iwl_rxon_context *ctx);
312void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
313 struct iwl_rxon_context *ctx,
314 enum ieee80211_band band,
315 struct ieee80211_vif *vif);
316u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
317 enum ieee80211_band band);
318void iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
319 struct iwl_ht_config *ht_conf);
320bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
321 struct iwl_rxon_context *ctx,
322 struct ieee80211_sta_ht_cap *ht_cap);
323void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
324 struct iwl_rxon_context *ctx);
325void iwl_legacy_set_rate(struct iwl_priv *priv);
326int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
327 struct ieee80211_hdr *hdr,
328 u32 decrypt_res,
329 struct ieee80211_rx_status *stats);
330void iwl_legacy_irq_handle_error(struct iwl_priv *priv);
331int iwl_legacy_mac_add_interface(struct ieee80211_hw *hw,
332 struct ieee80211_vif *vif);
333void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
334 struct ieee80211_vif *vif);
335int iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
336 struct ieee80211_vif *vif,
337 enum nl80211_iftype newtype, bool newp2p);
338int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv);
339void iwl_legacy_txq_mem(struct iwl_priv *priv);
340
341#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
342int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv);
343void iwl_legacy_free_traffic_mem(struct iwl_priv *priv);
344void iwl_legacy_reset_traffic_log(struct iwl_priv *priv);
345void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
346 u16 length, struct ieee80211_hdr *header);
347void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
348 u16 length, struct ieee80211_hdr *header);
349const char *iwl_legacy_get_mgmt_string(int cmd);
350const char *iwl_legacy_get_ctrl_string(int cmd);
351void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv);
352void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc,
353 u16 len);
354#else
355static inline int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
356{
357 return 0;
358}
359static inline void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
360{
361}
362static inline void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
363{
364}
365static inline void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
366 u16 length, struct ieee80211_hdr *header)
367{
368}
369static inline void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
370 u16 length, struct ieee80211_hdr *header)
371{
372}
373static inline void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx,
374 __le16 fc, u16 len)
375{
376}
377#endif
378/*****************************************************
379 * RX handlers.
380 * **************************************************/
381void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
382 struct iwl_rx_mem_buffer *rxb);
383void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
384 struct iwl_rx_mem_buffer *rxb);
385void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
386 struct iwl_rx_mem_buffer *rxb);
387
388/*****************************************************
389* RX
390******************************************************/
391void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv);
392void iwl_legacy_cmd_queue_free(struct iwl_priv *priv);
393int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv);
394void iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
395 struct iwl_rx_queue *q);
396int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q);
397void iwl_legacy_tx_cmd_complete(struct iwl_priv *priv,
398 struct iwl_rx_mem_buffer *rxb);
399/* Handlers */
400void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
401 struct iwl_rx_mem_buffer *rxb);
402void iwl_legacy_recover_from_statistics(struct iwl_priv *priv,
403 struct iwl_rx_packet *pkt);
404void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success);
405void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
406
407/* TX helpers */
408
409/*****************************************************
410* TX
411******************************************************/
412void iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv,
413 struct iwl_tx_queue *txq);
414int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
415 int slots_num, u32 txq_id);
416void iwl_legacy_tx_queue_reset(struct iwl_priv *priv,
417 struct iwl_tx_queue *txq,
418 int slots_num, u32 txq_id);
419void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id);
420void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id);
421void iwl_legacy_setup_watchdog(struct iwl_priv *priv);
422/*****************************************************
423 * TX power
424 ****************************************************/
425int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
426
427/*******************************************************************************
428 * Rate
429 ******************************************************************************/
430
431u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
432 struct iwl_rxon_context *ctx);
433
434/*******************************************************************************
435 * Scanning
436 ******************************************************************************/
437void iwl_legacy_init_scan_params(struct iwl_priv *priv);
438int iwl_legacy_scan_cancel(struct iwl_priv *priv);
439int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
440void iwl_legacy_force_scan_end(struct iwl_priv *priv);
441int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
442 struct ieee80211_vif *vif,
443 struct cfg80211_scan_request *req);
444void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv);
445int iwl_legacy_force_reset(struct iwl_priv *priv, int mode, bool external);
446u16 iwl_legacy_fill_probe_req(struct iwl_priv *priv,
447 struct ieee80211_mgmt *frame,
448 const u8 *ta, const u8 *ie, int ie_len, int left);
449void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv);
450u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
451 enum ieee80211_band band,
452 u8 n_probes);
453u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
454 enum ieee80211_band band,
455 struct ieee80211_vif *vif);
456void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv);
457void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv);
458
459/* For faster active scanning, scan will move to the next channel if fewer than
460 * PLCP_QUIET_THRESH packets are heard on this channel within
461 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
462 * time if it's a quiet channel (nothing responded to our probe, and there's
463 * no other traffic).
464 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
465#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
466#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
467
468#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7)
469
470/*****************************************************
471 * S e n d i n g H o s t C o m m a n d s *
472 *****************************************************/
473
474const char *iwl_legacy_get_cmd_string(u8 cmd);
475int __must_check iwl_legacy_send_cmd_sync(struct iwl_priv *priv,
476 struct iwl_host_cmd *cmd);
477int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
478int __must_check iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id,
479 u16 len, const void *data);
480int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
481 const void *data,
482 void (*callback)(struct iwl_priv *priv,
483 struct iwl_device_cmd *cmd,
484 struct iwl_rx_packet *pkt));
485
486int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
487
488
489/*****************************************************
490 * PCI *
491 *****************************************************/
492
493static inline u16 iwl_legacy_pcie_link_ctl(struct iwl_priv *priv)
494{
495 int pos;
496 u16 pci_lnk_ctl;
497 pos = pci_find_capability(priv->pci_dev, PCI_CAP_ID_EXP);
498 pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
499 return pci_lnk_ctl;
500}
501
502void iwl_legacy_bg_watchdog(unsigned long data);
503u32 iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
504 u32 usec, u32 beacon_interval);
505__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
506 u32 addon, u32 beacon_interval);
507
508#ifdef CONFIG_PM
509int iwl_legacy_pci_suspend(struct device *device);
510int iwl_legacy_pci_resume(struct device *device);
511extern const struct dev_pm_ops iwl_legacy_pm_ops;
512
513#define IWL_LEGACY_PM_OPS (&iwl_legacy_pm_ops)
514
515#else /* !CONFIG_PM */
516
517#define IWL_LEGACY_PM_OPS NULL
518
519#endif /* !CONFIG_PM */
520
521/*****************************************************
522* Error Handling Debugging
523******************************************************/
524void iwl4965_dump_nic_error_log(struct iwl_priv *priv);
525int iwl4965_dump_nic_event_log(struct iwl_priv *priv,
526 bool full_log, char **buf, bool display);
527#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
528void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
529 struct iwl_rxon_context *ctx);
530#else
531static inline void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
532 struct iwl_rxon_context *ctx)
533{
534}
535#endif
536
537void iwl_legacy_clear_isr_stats(struct iwl_priv *priv);
538
539/*****************************************************
540* GEOS
541******************************************************/
542int iwl_legacy_init_geos(struct iwl_priv *priv);
543void iwl_legacy_free_geos(struct iwl_priv *priv);
544
545/*************** DRIVER STATUS FUNCTIONS *****/
546
547#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
548/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
549#define STATUS_INT_ENABLED 2
550#define STATUS_RF_KILL_HW 3
551#define STATUS_CT_KILL 4
552#define STATUS_INIT 5
553#define STATUS_ALIVE 6
554#define STATUS_READY 7
555#define STATUS_TEMPERATURE 8
556#define STATUS_GEO_CONFIGURED 9
557#define STATUS_EXIT_PENDING 10
558#define STATUS_STATISTICS 12
559#define STATUS_SCANNING 13
560#define STATUS_SCAN_ABORTING 14
561#define STATUS_SCAN_HW 15
562#define STATUS_POWER_PMI 16
563#define STATUS_FW_ERROR 17
564
565
566static inline int iwl_legacy_is_ready(struct iwl_priv *priv)
567{
568 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
569 * set but EXIT_PENDING is not */
570 return test_bit(STATUS_READY, &priv->status) &&
571 test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
572 !test_bit(STATUS_EXIT_PENDING, &priv->status);
573}
574
575static inline int iwl_legacy_is_alive(struct iwl_priv *priv)
576{
577 return test_bit(STATUS_ALIVE, &priv->status);
578}
579
580static inline int iwl_legacy_is_init(struct iwl_priv *priv)
581{
582 return test_bit(STATUS_INIT, &priv->status);
583}
584
585static inline int iwl_legacy_is_rfkill_hw(struct iwl_priv *priv)
586{
587 return test_bit(STATUS_RF_KILL_HW, &priv->status);
588}
589
590static inline int iwl_legacy_is_rfkill(struct iwl_priv *priv)
591{
592 return iwl_legacy_is_rfkill_hw(priv);
593}
594
595static inline int iwl_legacy_is_ctkill(struct iwl_priv *priv)
596{
597 return test_bit(STATUS_CT_KILL, &priv->status);
598}
599
600static inline int iwl_legacy_is_ready_rf(struct iwl_priv *priv)
601{
602
603 if (iwl_legacy_is_rfkill(priv))
604 return 0;
605
606 return iwl_legacy_is_ready(priv);
607}
608
609extern void iwl_legacy_send_bt_config(struct iwl_priv *priv);
610extern int iwl_legacy_send_statistics_request(struct iwl_priv *priv,
611 u8 flags, bool clear);
612void iwl_legacy_apm_stop(struct iwl_priv *priv);
613int iwl_legacy_apm_init(struct iwl_priv *priv);
614
615int iwl_legacy_send_rxon_timing(struct iwl_priv *priv,
616 struct iwl_rxon_context *ctx);
617static inline int iwl_legacy_send_rxon_assoc(struct iwl_priv *priv,
618 struct iwl_rxon_context *ctx)
619{
620 return priv->cfg->ops->hcmd->rxon_assoc(priv, ctx);
621}
622static inline int iwl_legacy_commit_rxon(struct iwl_priv *priv,
623 struct iwl_rxon_context *ctx)
624{
625 return priv->cfg->ops->hcmd->commit_rxon(priv, ctx);
626}
627static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
628 struct iwl_priv *priv, enum ieee80211_band band)
629{
630 return priv->hw->wiphy->bands[band];
631}
632
633/* mac80211 handlers */
634int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed);
635void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw);
636void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
637 struct ieee80211_vif *vif,
638 struct ieee80211_bss_conf *bss_conf,
639 u32 changes);
640void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
641 struct ieee80211_tx_info *info,
642 __le16 fc, __le32 *tx_flags);
643
644irqreturn_t iwl_legacy_isr(int irq, void *data);
645
646#endif /* __iwl_legacy_core_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-csr.h b/drivers/net/wireless/iwlegacy/iwl-csr.h
new file mode 100644
index 000000000000..668a9616c269
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-csr.h
@@ -0,0 +1,422 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_legacy_csr_h__
64#define __iwl_legacy_csr_h__
65/*
66 * CSR (control and status registers)
67 *
68 * CSR registers are mapped directly into PCI bus space, and are accessible
69 * whenever platform supplies power to device, even when device is in
70 * low power states due to driver-invoked device resets
71 * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes.
72 *
73 * Use iwl_write32() and iwl_read32() family to access these registers;
74 * these provide simple PCI bus access, without waking up the MAC.
75 * Do not use iwl_legacy_write_direct32() family for these registers;
76 * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ.
77 * The MAC (uCode processor, etc.) does not need to be powered up for accessing
78 * the CSR registers.
79 *
80 * NOTE: Device does need to be awake in order to read this memory
81 * via CSR_EEPROM register
82 */
83#define CSR_BASE (0x000)
84
85#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */
86#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */
87#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */
88#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */
89#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/
90#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */
91#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/
92#define CSR_GP_CNTRL (CSR_BASE+0x024)
93
94/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */
95#define CSR_INT_PERIODIC_REG (CSR_BASE+0x005)
96
97/*
98 * Hardware revision info
99 * Bit fields:
100 * 31-8: Reserved
101 * 7-4: Type of device: see CSR_HW_REV_TYPE_xxx definitions
102 * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D
103 * 1-0: "Dash" (-) value, as in A-1, etc.
104 *
105 * NOTE: Revision step affects calculation of CCK txpower for 4965.
106 * NOTE: See also CSR_HW_REV_WA_REG (work-around for bug in 4965).
107 */
108#define CSR_HW_REV (CSR_BASE+0x028)
109
110/*
111 * EEPROM memory reads
112 *
113 * NOTE: Device must be awake, initialized via apm_ops.init(),
114 * in order to read.
115 */
116#define CSR_EEPROM_REG (CSR_BASE+0x02c)
117#define CSR_EEPROM_GP (CSR_BASE+0x030)
118
119#define CSR_GIO_REG (CSR_BASE+0x03C)
120#define CSR_GP_UCODE_REG (CSR_BASE+0x048)
121#define CSR_GP_DRIVER_REG (CSR_BASE+0x050)
122
123/*
124 * UCODE-DRIVER GP (general purpose) mailbox registers.
125 * SET/CLR registers set/clear bit(s) if "1" is written.
126 */
127#define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054)
128#define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058)
129#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c)
130#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060)
131
132#define CSR_LED_REG (CSR_BASE+0x094)
133#define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0)
134
135/* GIO Chicken Bits (PCI Express bus link power management) */
136#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
137
138/* Analog phase-lock-loop configuration */
139#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c)
140
141/*
142 * CSR Hardware Revision Workaround Register. Indicates hardware rev;
143 * "step" determines CCK backoff for txpower calculation. Used for 4965 only.
144 * See also CSR_HW_REV register.
145 * Bit fields:
146 * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step
147 * 1-0: "Dash" (-) value, as in C-1, etc.
148 */
149#define CSR_HW_REV_WA_REG (CSR_BASE+0x22C)
150
151#define CSR_DBG_HPET_MEM_REG (CSR_BASE+0x240)
152#define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250)
153
154/* Bits for CSR_HW_IF_CONFIG_REG */
155#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R (0x00000010)
156#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x00000C00)
157#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
158#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200)
159
160#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MB (0x00000100)
161#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MM (0x00000200)
162#define CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC (0x00000400)
163#define CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE (0x00000800)
164#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A (0x00000000)
165#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B (0x00001000)
166
167#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
168#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
169#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
170#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
171#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
172
173#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/
174#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/
175
176/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
177 * acknowledged (reset) by host writing "1" to flagged bits. */
178#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
179#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */
180#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */
181#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
182#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
183#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
184#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
185#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
186#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */
187#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
188#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
189
190#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \
191 CSR_INT_BIT_HW_ERR | \
192 CSR_INT_BIT_FH_TX | \
193 CSR_INT_BIT_SW_ERR | \
194 CSR_INT_BIT_RF_KILL | \
195 CSR_INT_BIT_SW_RX | \
196 CSR_INT_BIT_WAKEUP | \
197 CSR_INT_BIT_ALIVE)
198
199/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
200#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */
201#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */
202#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */
203#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */
204#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */
205#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */
206#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */
207#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */
208
209#define CSR39_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
210 CSR39_FH_INT_BIT_RX_CHNL2 | \
211 CSR_FH_INT_BIT_RX_CHNL1 | \
212 CSR_FH_INT_BIT_RX_CHNL0)
213
214
215#define CSR39_FH_INT_TX_MASK (CSR39_FH_INT_BIT_TX_CHNL6 | \
216 CSR_FH_INT_BIT_TX_CHNL1 | \
217 CSR_FH_INT_BIT_TX_CHNL0)
218
219#define CSR49_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
220 CSR_FH_INT_BIT_RX_CHNL1 | \
221 CSR_FH_INT_BIT_RX_CHNL0)
222
223#define CSR49_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL1 | \
224 CSR_FH_INT_BIT_TX_CHNL0)
225
226/* GPIO */
227#define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200)
228#define CSR_GPIO_IN_VAL_VAUX_PWR_SRC (0x00000000)
229#define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC (0x00000200)
230
231/* RESET */
232#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001)
233#define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002)
234#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080)
235#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100)
236#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200)
237#define CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000)
238
239/*
240 * GP (general purpose) CONTROL REGISTER
241 * Bit fields:
242 * 27: HW_RF_KILL_SW
243 * Indicates state of (platform's) hardware RF-Kill switch
244 * 26-24: POWER_SAVE_TYPE
245 * Indicates current power-saving mode:
246 * 000 -- No power saving
247 * 001 -- MAC power-down
248 * 010 -- PHY (radio) power-down
249 * 011 -- Error
250 * 9-6: SYS_CONFIG
251 * Indicates current system configuration, reflecting pins on chip
252 * as forced high/low by device circuit board.
253 * 4: GOING_TO_SLEEP
254 * Indicates MAC is entering a power-saving sleep power-down.
255 * Not a good time to access device-internal resources.
256 * 3: MAC_ACCESS_REQ
257 * Host sets this to request and maintain MAC wakeup, to allow host
258 * access to device-internal resources. Host must wait for
259 * MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR
260 * device registers.
261 * 2: INIT_DONE
262 * Host sets this to put device into fully operational D0 power mode.
263 * Host resets this after SW_RESET to put device into low power mode.
264 * 0: MAC_CLOCK_READY
265 * Indicates MAC (ucode processor, etc.) is powered up and can run.
266 * Internal resources are accessible.
267 * NOTE: This does not indicate that the processor is actually running.
268 * NOTE: This does not indicate that 4965 or 3945 has completed
269 * init or post-power-down restore of internal SRAM memory.
270 * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that
271 * SRAM is restored and uCode is in normal operation mode.
272 * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
273 * do not need to save/restore it.
274 * NOTE: After device reset, this bit remains "0" until host sets
275 * INIT_DONE
276 */
277#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001)
278#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
279#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008)
280#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
281
282#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001)
283
284#define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000)
285#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000)
286#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
287
288
289/* EEPROM REG */
290#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
291#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
292#define CSR_EEPROM_REG_MSK_ADDR (0x0000FFFC)
293#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000)
294
295/* EEPROM GP */
296#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */
297#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)
298#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002)
299#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004)
300
301/* GP REG */
302#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */
303#define CSR_GP_REG_NO_POWER_SAVE (0x00000000)
304#define CSR_GP_REG_MAC_POWER_SAVE (0x01000000)
305#define CSR_GP_REG_PHY_POWER_SAVE (0x02000000)
306#define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000)
307
308
309/* CSR GIO */
310#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
311
312/*
313 * UCODE-DRIVER GP (general purpose) mailbox register 1
314 * Host driver and uCode write and/or read this register to communicate with
315 * each other.
316 * Bit fields:
317 * 4: UCODE_DISABLE
318 * Host sets this to request permanent halt of uCode, same as
319 * sending CARD_STATE command with "halt" bit set.
320 * 3: CT_KILL_EXIT
321 * Host sets this to request exit from CT_KILL state, i.e. host thinks
322 * device temperature is low enough to continue normal operation.
323 * 2: CMD_BLOCKED
324 * Host sets this during RF KILL power-down sequence (HW, SW, CT KILL)
325 * to release uCode to clear all Tx and command queues, enter
326 * unassociated mode, and power down.
327 * NOTE: Some devices also use HBUS_TARG_MBX_C register for this bit.
328 * 1: SW_BIT_RFKILL
329 * Host sets this when issuing CARD_STATE command to request
330 * device sleep.
331 * 0: MAC_SLEEP
332 * uCode sets this when preparing a power-saving power-down.
333 * uCode resets this when power-up is complete and SRAM is sane.
334 * NOTE: 3945/4965 saves internal SRAM data to host when powering down,
335 * and must restore this data after powering back up.
336 * MAC_SLEEP is the best indication that restore is complete.
337 * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
338 * do not need to save/restore it.
339 */
340#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001)
341#define CSR_UCODE_SW_BIT_RFKILL (0x00000002)
342#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004)
343#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008)
344
345/* GIO Chicken Bits (PCI Express bus link power management) */
346#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000)
347#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000)
348
349/* LED */
350#define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF)
351#define CSR_LED_REG_TRUN_ON (0x78)
352#define CSR_LED_REG_TRUN_OFF (0x38)
353
354/* ANA_PLL */
355#define CSR39_ANA_PLL_CFG_VAL (0x01000000)
356
357/* HPET MEM debug */
358#define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000)
359
360/* DRAM INT TABLE */
361#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
362#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
363
364/*
365 * HBUS (Host-side Bus)
366 *
367 * HBUS registers are mapped directly into PCI bus space, but are used
368 * to indirectly access device's internal memory or registers that
369 * may be powered-down.
370 *
371 * Use iwl_legacy_write_direct32()/iwl_legacy_read_direct32() family
372 * for these registers;
373 * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
374 * to make sure the MAC (uCode processor, etc.) is powered up for accessing
375 * internal resources.
376 *
377 * Do not use iwl_write32()/iwl_read32() family to access these registers;
378 * these provide only simple PCI bus access, without waking up the MAC.
379 */
380#define HBUS_BASE (0x400)
381
382/*
383 * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM
384 * structures, error log, event log, verifying uCode load).
385 * First write to address register, then read from or write to data register
386 * to complete the job. Once the address register is set up, accesses to
387 * data registers auto-increment the address by one dword.
388 * Bit usage for address registers (read or write):
389 * 0-31: memory address within device
390 */
391#define HBUS_TARG_MEM_RADDR (HBUS_BASE+0x00c)
392#define HBUS_TARG_MEM_WADDR (HBUS_BASE+0x010)
393#define HBUS_TARG_MEM_WDAT (HBUS_BASE+0x018)
394#define HBUS_TARG_MEM_RDAT (HBUS_BASE+0x01c)
395
396/* Mailbox C, used as workaround alternative to CSR_UCODE_DRV_GP1 mailbox */
397#define HBUS_TARG_MBX_C (HBUS_BASE+0x030)
398#define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED (0x00000004)
399
400/*
401 * Registers for accessing device's internal peripheral registers
402 * (e.g. SCD, BSM, etc.). First write to address register,
403 * then read from or write to data register to complete the job.
404 * Bit usage for address registers (read or write):
405 * 0-15: register address (offset) within device
406 * 24-25: (# bytes - 1) to read or write (e.g. 3 for dword)
407 */
408#define HBUS_TARG_PRPH_WADDR (HBUS_BASE+0x044)
409#define HBUS_TARG_PRPH_RADDR (HBUS_BASE+0x048)
410#define HBUS_TARG_PRPH_WDAT (HBUS_BASE+0x04c)
411#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050)
412
413/*
414 * Per-Tx-queue write pointer (index, really!)
415 * Indicates index to next TFD that driver will fill (1 past latest filled).
416 * Bit usage:
417 * 0-7: queue write index
418 * 11-8: queue selector
419 */
420#define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
421
422#endif /* !__iwl_legacy_csr_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-debug.h b/drivers/net/wireless/iwlegacy/iwl-debug.h
new file mode 100644
index 000000000000..ae13112701bf
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-debug.h
@@ -0,0 +1,198 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __iwl_legacy_debug_h__
30#define __iwl_legacy_debug_h__
31
32struct iwl_priv;
33extern u32 iwlegacy_debug_level;
34
35#define IWL_ERR(p, f, a...) dev_err(&((p)->pci_dev->dev), f, ## a)
36#define IWL_WARN(p, f, a...) dev_warn(&((p)->pci_dev->dev), f, ## a)
37#define IWL_INFO(p, f, a...) dev_info(&((p)->pci_dev->dev), f, ## a)
38#define IWL_CRIT(p, f, a...) dev_crit(&((p)->pci_dev->dev), f, ## a)
39
40#define iwl_print_hex_error(priv, p, len) \
41do { \
42 print_hex_dump(KERN_ERR, "iwl data: ", \
43 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
44} while (0)
45
46#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
47#define IWL_DEBUG(__priv, level, fmt, args...) \
48do { \
49 if (iwl_legacy_get_debug_level(__priv) & (level)) \
50 dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
51 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
52 __func__ , ## args); \
53} while (0)
54
55#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) \
56do { \
57 if ((iwl_legacy_get_debug_level(__priv) & (level)) && net_ratelimit()) \
58 dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
59 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
60 __func__ , ## args); \
61} while (0)
62
63#define iwl_print_hex_dump(priv, level, p, len) \
64do { \
65 if (iwl_legacy_get_debug_level(priv) & level) \
66 print_hex_dump(KERN_DEBUG, "iwl data: ", \
67 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
68} while (0)
69
70#else
71#define IWL_DEBUG(__priv, level, fmt, args...)
72#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
73static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
74 const void *p, u32 len)
75{}
76#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
77
78#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
79int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name);
80void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv);
81#else
82static inline int
83iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
84{
85 return 0;
86}
87static inline void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
88{
89}
90#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
91
92/*
93 * To use the debug system:
94 *
95 * If you are defining a new debug classification, simply add it to the #define
96 * list here in the form of
97 *
98 * #define IWL_DL_xxxx VALUE
99 *
100 * where xxxx should be the name of the classification (for example, WEP).
101 *
102 * You then need to either add a IWL_xxxx_DEBUG() macro definition for your
103 * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want
104 * to send output to that classification.
105 *
106 * The active debug levels can be accessed via files
107 *
108 * /sys/module/iwl4965/parameters/debug{50}
109 * /sys/module/iwl3945/parameters/debug
110 * /sys/class/net/wlan0/device/debug_level
111 *
112 * when CONFIG_IWLWIFI_LEGACY_DEBUG=y.
113 */
114
115/* 0x0000000F - 0x00000001 */
116#define IWL_DL_INFO (1 << 0)
117#define IWL_DL_MAC80211 (1 << 1)
118#define IWL_DL_HCMD (1 << 2)
119#define IWL_DL_STATE (1 << 3)
120/* 0x000000F0 - 0x00000010 */
121#define IWL_DL_MACDUMP (1 << 4)
122#define IWL_DL_HCMD_DUMP (1 << 5)
123#define IWL_DL_EEPROM (1 << 6)
124#define IWL_DL_RADIO (1 << 7)
125/* 0x00000F00 - 0x00000100 */
126#define IWL_DL_POWER (1 << 8)
127#define IWL_DL_TEMP (1 << 9)
128#define IWL_DL_NOTIF (1 << 10)
129#define IWL_DL_SCAN (1 << 11)
130/* 0x0000F000 - 0x00001000 */
131#define IWL_DL_ASSOC (1 << 12)
132#define IWL_DL_DROP (1 << 13)
133#define IWL_DL_TXPOWER (1 << 14)
134#define IWL_DL_AP (1 << 15)
135/* 0x000F0000 - 0x00010000 */
136#define IWL_DL_FW (1 << 16)
137#define IWL_DL_RF_KILL (1 << 17)
138#define IWL_DL_FW_ERRORS (1 << 18)
139#define IWL_DL_LED (1 << 19)
140/* 0x00F00000 - 0x00100000 */
141#define IWL_DL_RATE (1 << 20)
142#define IWL_DL_CALIB (1 << 21)
143#define IWL_DL_WEP (1 << 22)
144#define IWL_DL_TX (1 << 23)
145/* 0x0F000000 - 0x01000000 */
146#define IWL_DL_RX (1 << 24)
147#define IWL_DL_ISR (1 << 25)
148#define IWL_DL_HT (1 << 26)
149#define IWL_DL_IO (1 << 27)
150/* 0xF0000000 - 0x10000000 */
151#define IWL_DL_11H (1 << 28)
152#define IWL_DL_STATS (1 << 29)
153#define IWL_DL_TX_REPLY (1 << 30)
154#define IWL_DL_QOS (1 << 31)
155
156#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
157#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
158#define IWL_DEBUG_MACDUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a)
159#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
160#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
161#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)
162#define IWL_DEBUG_TX(p, f, a...) IWL_DEBUG(p, IWL_DL_TX, f, ## a)
163#define IWL_DEBUG_ISR(p, f, a...) IWL_DEBUG(p, IWL_DL_ISR, f, ## a)
164#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a)
165#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
166#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
167#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
168#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
169#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
170#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
171#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
172#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
173#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \
174 IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
175#define IWL_DEBUG_AP(p, f, a...) IWL_DEBUG(p, IWL_DL_AP, f, ## a)
176#define IWL_DEBUG_TXPOWER(p, f, a...) IWL_DEBUG(p, IWL_DL_TXPOWER, f, ## a)
177#define IWL_DEBUG_IO(p, f, a...) IWL_DEBUG(p, IWL_DL_IO, f, ## a)
178#define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ## a)
179#define IWL_DEBUG_RATE_LIMIT(p, f, a...) \
180 IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a)
181#define IWL_DEBUG_NOTIF(p, f, a...) IWL_DEBUG(p, IWL_DL_NOTIF, f, ## a)
182#define IWL_DEBUG_ASSOC(p, f, a...) \
183 IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
184#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...) \
185 IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
186#define IWL_DEBUG_HT(p, f, a...) IWL_DEBUG(p, IWL_DL_HT, f, ## a)
187#define IWL_DEBUG_STATS(p, f, a...) IWL_DEBUG(p, IWL_DL_STATS, f, ## a)
188#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \
189 IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
190#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
191#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \
192 IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a)
193#define IWL_DEBUG_QOS(p, f, a...) IWL_DEBUG(p, IWL_DL_QOS, f, ## a)
194#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
195#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
196#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
197
198#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-debugfs.c
new file mode 100644
index 000000000000..2d32438b4cb8
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-debugfs.c
@@ -0,0 +1,1467 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#include <linux/ieee80211.h>
29#include <net/mac80211.h>
30
31
32#include "iwl-dev.h"
33#include "iwl-debug.h"
34#include "iwl-core.h"
35#include "iwl-io.h"
36
37/* create and remove of files */
38#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
39 if (!debugfs_create_file(#name, mode, parent, priv, \
40 &iwl_legacy_dbgfs_##name##_ops)) \
41 goto err; \
42} while (0)
43
44#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
45 struct dentry *__tmp; \
46 __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
47 parent, ptr); \
48 if (IS_ERR(__tmp) || !__tmp) \
49 goto err; \
50} while (0)
51
52#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
53 struct dentry *__tmp; \
54 __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
55 parent, ptr); \
56 if (IS_ERR(__tmp) || !__tmp) \
57 goto err; \
58} while (0)
59
60/* file operation */
61#define DEBUGFS_READ_FUNC(name) \
62static ssize_t iwl_legacy_dbgfs_##name##_read(struct file *file, \
63 char __user *user_buf, \
64 size_t count, loff_t *ppos);
65
66#define DEBUGFS_WRITE_FUNC(name) \
67static ssize_t iwl_legacy_dbgfs_##name##_write(struct file *file, \
68 const char __user *user_buf, \
69 size_t count, loff_t *ppos);
70
71
72static int
73iwl_legacy_dbgfs_open_file_generic(struct inode *inode, struct file *file)
74{
75 file->private_data = inode->i_private;
76 return 0;
77}
78
79#define DEBUGFS_READ_FILE_OPS(name) \
80 DEBUGFS_READ_FUNC(name); \
81static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
82 .read = iwl_legacy_dbgfs_##name##_read, \
83 .open = iwl_legacy_dbgfs_open_file_generic, \
84 .llseek = generic_file_llseek, \
85};
86
87#define DEBUGFS_WRITE_FILE_OPS(name) \
88 DEBUGFS_WRITE_FUNC(name); \
89static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
90 .write = iwl_legacy_dbgfs_##name##_write, \
91 .open = iwl_legacy_dbgfs_open_file_generic, \
92 .llseek = generic_file_llseek, \
93};
94
95#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
96 DEBUGFS_READ_FUNC(name); \
97 DEBUGFS_WRITE_FUNC(name); \
98static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
99 .write = iwl_legacy_dbgfs_##name##_write, \
100 .read = iwl_legacy_dbgfs_##name##_read, \
101 .open = iwl_legacy_dbgfs_open_file_generic, \
102 .llseek = generic_file_llseek, \
103};
104
105static ssize_t iwl_legacy_dbgfs_tx_statistics_read(struct file *file,
106 char __user *user_buf,
107 size_t count, loff_t *ppos) {
108
109 struct iwl_priv *priv = file->private_data;
110 char *buf;
111 int pos = 0;
112
113 int cnt;
114 ssize_t ret;
115 const size_t bufsz = 100 +
116 sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
117 buf = kzalloc(bufsz, GFP_KERNEL);
118 if (!buf)
119 return -ENOMEM;
120 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
121 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
122 pos += scnprintf(buf + pos, bufsz - pos,
123 "\t%25s\t\t: %u\n",
124 iwl_legacy_get_mgmt_string(cnt),
125 priv->tx_stats.mgmt[cnt]);
126 }
127 pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
128 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
129 pos += scnprintf(buf + pos, bufsz - pos,
130 "\t%25s\t\t: %u\n",
131 iwl_legacy_get_ctrl_string(cnt),
132 priv->tx_stats.ctrl[cnt]);
133 }
134 pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
135 pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
136 priv->tx_stats.data_cnt);
137 pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
138 priv->tx_stats.data_bytes);
139 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
140 kfree(buf);
141 return ret;
142}
143
144static ssize_t
145iwl_legacy_dbgfs_clear_traffic_statistics_write(struct file *file,
146 const char __user *user_buf,
147 size_t count, loff_t *ppos)
148{
149 struct iwl_priv *priv = file->private_data;
150 u32 clear_flag;
151 char buf[8];
152 int buf_size;
153
154 memset(buf, 0, sizeof(buf));
155 buf_size = min(count, sizeof(buf) - 1);
156 if (copy_from_user(buf, user_buf, buf_size))
157 return -EFAULT;
158 if (sscanf(buf, "%x", &clear_flag) != 1)
159 return -EFAULT;
160 iwl_legacy_clear_traffic_stats(priv);
161
162 return count;
163}
164
165static ssize_t iwl_legacy_dbgfs_rx_statistics_read(struct file *file,
166 char __user *user_buf,
167 size_t count, loff_t *ppos) {
168
169 struct iwl_priv *priv = file->private_data;
170 char *buf;
171 int pos = 0;
172 int cnt;
173 ssize_t ret;
174 const size_t bufsz = 100 +
175 sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
176 buf = kzalloc(bufsz, GFP_KERNEL);
177 if (!buf)
178 return -ENOMEM;
179
180 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
181 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
182 pos += scnprintf(buf + pos, bufsz - pos,
183 "\t%25s\t\t: %u\n",
184 iwl_legacy_get_mgmt_string(cnt),
185 priv->rx_stats.mgmt[cnt]);
186 }
187 pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
188 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
189 pos += scnprintf(buf + pos, bufsz - pos,
190 "\t%25s\t\t: %u\n",
191 iwl_legacy_get_ctrl_string(cnt),
192 priv->rx_stats.ctrl[cnt]);
193 }
194 pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
195 pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
196 priv->rx_stats.data_cnt);
197 pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
198 priv->rx_stats.data_bytes);
199
200 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
201 kfree(buf);
202 return ret;
203}
204
205#define BYTE1_MASK 0x000000ff;
206#define BYTE2_MASK 0x0000ffff;
207#define BYTE3_MASK 0x00ffffff;
208static ssize_t iwl_legacy_dbgfs_sram_read(struct file *file,
209 char __user *user_buf,
210 size_t count, loff_t *ppos)
211{
212 u32 val;
213 char *buf;
214 ssize_t ret;
215 int i;
216 int pos = 0;
217 struct iwl_priv *priv = file->private_data;
218 size_t bufsz;
219
220 /* default is to dump the entire data segment */
221 if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
222 priv->dbgfs_sram_offset = 0x800000;
223 if (priv->ucode_type == UCODE_INIT)
224 priv->dbgfs_sram_len = priv->ucode_init_data.len;
225 else
226 priv->dbgfs_sram_len = priv->ucode_data.len;
227 }
228 bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10;
229 buf = kmalloc(bufsz, GFP_KERNEL);
230 if (!buf)
231 return -ENOMEM;
232 pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
233 priv->dbgfs_sram_len);
234 pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
235 priv->dbgfs_sram_offset);
236 for (i = priv->dbgfs_sram_len; i > 0; i -= 4) {
237 val = iwl_legacy_read_targ_mem(priv, priv->dbgfs_sram_offset + \
238 priv->dbgfs_sram_len - i);
239 if (i < 4) {
240 switch (i) {
241 case 1:
242 val &= BYTE1_MASK;
243 break;
244 case 2:
245 val &= BYTE2_MASK;
246 break;
247 case 3:
248 val &= BYTE3_MASK;
249 break;
250 }
251 }
252 if (!(i % 16))
253 pos += scnprintf(buf + pos, bufsz - pos, "\n");
254 pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
255 }
256 pos += scnprintf(buf + pos, bufsz - pos, "\n");
257
258 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
259 kfree(buf);
260 return ret;
261}
262
263static ssize_t iwl_legacy_dbgfs_sram_write(struct file *file,
264 const char __user *user_buf,
265 size_t count, loff_t *ppos)
266{
267 struct iwl_priv *priv = file->private_data;
268 char buf[64];
269 int buf_size;
270 u32 offset, len;
271
272 memset(buf, 0, sizeof(buf));
273 buf_size = min(count, sizeof(buf) - 1);
274 if (copy_from_user(buf, user_buf, buf_size))
275 return -EFAULT;
276
277 if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
278 priv->dbgfs_sram_offset = offset;
279 priv->dbgfs_sram_len = len;
280 } else {
281 priv->dbgfs_sram_offset = 0;
282 priv->dbgfs_sram_len = 0;
283 }
284
285 return count;
286}
287
288static ssize_t
289iwl_legacy_dbgfs_stations_read(struct file *file, char __user *user_buf,
290 size_t count, loff_t *ppos)
291{
292 struct iwl_priv *priv = file->private_data;
293 struct iwl_station_entry *station;
294 int max_sta = priv->hw_params.max_stations;
295 char *buf;
296 int i, j, pos = 0;
297 ssize_t ret;
298 /* Add 30 for initial string */
299 const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations);
300
301 buf = kmalloc(bufsz, GFP_KERNEL);
302 if (!buf)
303 return -ENOMEM;
304
305 pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
306 priv->num_stations);
307
308 for (i = 0; i < max_sta; i++) {
309 station = &priv->stations[i];
310 if (!station->used)
311 continue;
312 pos += scnprintf(buf + pos, bufsz - pos,
313 "station %d - addr: %pM, flags: %#x\n",
314 i, station->sta.sta.addr,
315 station->sta.station_flags_msk);
316 pos += scnprintf(buf + pos, bufsz - pos,
317 "TID\tseq_num\ttxq_id\tframes\ttfds\t");
318 pos += scnprintf(buf + pos, bufsz - pos,
319 "start_idx\tbitmap\t\t\trate_n_flags\n");
320
321 for (j = 0; j < MAX_TID_COUNT; j++) {
322 pos += scnprintf(buf + pos, bufsz - pos,
323 "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
324 j, station->tid[j].seq_number,
325 station->tid[j].agg.txq_id,
326 station->tid[j].agg.frame_count,
327 station->tid[j].tfds_in_queue,
328 station->tid[j].agg.start_idx,
329 station->tid[j].agg.bitmap,
330 station->tid[j].agg.rate_n_flags);
331
332 if (station->tid[j].agg.wait_for_ba)
333 pos += scnprintf(buf + pos, bufsz - pos,
334 " - waitforba");
335 pos += scnprintf(buf + pos, bufsz - pos, "\n");
336 }
337
338 pos += scnprintf(buf + pos, bufsz - pos, "\n");
339 }
340
341 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
342 kfree(buf);
343 return ret;
344}
345
346static ssize_t iwl_legacy_dbgfs_nvm_read(struct file *file,
347 char __user *user_buf,
348 size_t count,
349 loff_t *ppos)
350{
351 ssize_t ret;
352 struct iwl_priv *priv = file->private_data;
353 int pos = 0, ofs = 0, buf_size = 0;
354 const u8 *ptr;
355 char *buf;
356 u16 eeprom_ver;
357 size_t eeprom_len = priv->cfg->base_params->eeprom_size;
358 buf_size = 4 * eeprom_len + 256;
359
360 if (eeprom_len % 16) {
361 IWL_ERR(priv, "NVM size is not multiple of 16.\n");
362 return -ENODATA;
363 }
364
365 ptr = priv->eeprom;
366 if (!ptr) {
367 IWL_ERR(priv, "Invalid EEPROM memory\n");
368 return -ENOMEM;
369 }
370
371 /* 4 characters for byte 0xYY */
372 buf = kzalloc(buf_size, GFP_KERNEL);
373 if (!buf) {
374 IWL_ERR(priv, "Can not allocate Buffer\n");
375 return -ENOMEM;
376 }
377 eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
378 pos += scnprintf(buf + pos, buf_size - pos, "EEPROM "
379 "version: 0x%x\n", eeprom_ver);
380 for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
381 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
382 hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
383 buf_size - pos, 0);
384 pos += strlen(buf + pos);
385 if (buf_size - pos > 0)
386 buf[pos++] = '\n';
387 }
388
389 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
390 kfree(buf);
391 return ret;
392}
393
394static ssize_t iwl_legacy_dbgfs_log_event_read(struct file *file,
395 char __user *user_buf,
396 size_t count, loff_t *ppos)
397{
398 struct iwl_priv *priv = file->private_data;
399 char *buf;
400 int pos = 0;
401 ssize_t ret = -ENOMEM;
402
403 ret = pos = priv->cfg->ops->lib->dump_nic_event_log(
404 priv, true, &buf, true);
405 if (buf) {
406 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
407 kfree(buf);
408 }
409 return ret;
410}
411
412static ssize_t iwl_legacy_dbgfs_log_event_write(struct file *file,
413 const char __user *user_buf,
414 size_t count, loff_t *ppos)
415{
416 struct iwl_priv *priv = file->private_data;
417 u32 event_log_flag;
418 char buf[8];
419 int buf_size;
420
421 memset(buf, 0, sizeof(buf));
422 buf_size = min(count, sizeof(buf) - 1);
423 if (copy_from_user(buf, user_buf, buf_size))
424 return -EFAULT;
425 if (sscanf(buf, "%d", &event_log_flag) != 1)
426 return -EFAULT;
427 if (event_log_flag == 1)
428 priv->cfg->ops->lib->dump_nic_event_log(priv, true,
429 NULL, false);
430
431 return count;
432}
433
434
435
436static ssize_t
437iwl_legacy_dbgfs_channels_read(struct file *file, char __user *user_buf,
438 size_t count, loff_t *ppos)
439{
440 struct iwl_priv *priv = file->private_data;
441 struct ieee80211_channel *channels = NULL;
442 const struct ieee80211_supported_band *supp_band = NULL;
443 int pos = 0, i, bufsz = PAGE_SIZE;
444 char *buf;
445 ssize_t ret;
446
447 if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
448 return -EAGAIN;
449
450 buf = kzalloc(bufsz, GFP_KERNEL);
451 if (!buf) {
452 IWL_ERR(priv, "Can not allocate Buffer\n");
453 return -ENOMEM;
454 }
455
456 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
457 if (supp_band) {
458 channels = supp_band->channels;
459
460 pos += scnprintf(buf + pos, bufsz - pos,
461 "Displaying %d channels in 2.4GHz band 802.11bg):\n",
462 supp_band->n_channels);
463
464 for (i = 0; i < supp_band->n_channels; i++)
465 pos += scnprintf(buf + pos, bufsz - pos,
466 "%d: %ddBm: BSS%s%s, %s.\n",
467 channels[i].hw_value,
468 channels[i].max_power,
469 channels[i].flags & IEEE80211_CHAN_RADAR ?
470 " (IEEE 802.11h required)" : "",
471 ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
472 || (channels[i].flags &
473 IEEE80211_CHAN_RADAR)) ? "" :
474 ", IBSS",
475 channels[i].flags &
476 IEEE80211_CHAN_PASSIVE_SCAN ?
477 "passive only" : "active/passive");
478 }
479 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
480 if (supp_band) {
481 channels = supp_band->channels;
482
483 pos += scnprintf(buf + pos, bufsz - pos,
484 "Displaying %d channels in 5.2GHz band (802.11a)\n",
485 supp_band->n_channels);
486
487 for (i = 0; i < supp_band->n_channels; i++)
488 pos += scnprintf(buf + pos, bufsz - pos,
489 "%d: %ddBm: BSS%s%s, %s.\n",
490 channels[i].hw_value,
491 channels[i].max_power,
492 channels[i].flags & IEEE80211_CHAN_RADAR ?
493 " (IEEE 802.11h required)" : "",
494 ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
495 || (channels[i].flags &
496 IEEE80211_CHAN_RADAR)) ? "" :
497 ", IBSS",
498 channels[i].flags &
499 IEEE80211_CHAN_PASSIVE_SCAN ?
500 "passive only" : "active/passive");
501 }
502 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
503 kfree(buf);
504 return ret;
505}
506
507static ssize_t iwl_legacy_dbgfs_status_read(struct file *file,
508 char __user *user_buf,
509 size_t count, loff_t *ppos) {
510
511 struct iwl_priv *priv = file->private_data;
512 char buf[512];
513 int pos = 0;
514 const size_t bufsz = sizeof(buf);
515
516 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
517 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
518 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
519 test_bit(STATUS_INT_ENABLED, &priv->status));
520 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
521 test_bit(STATUS_RF_KILL_HW, &priv->status));
522 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
523 test_bit(STATUS_CT_KILL, &priv->status));
524 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
525 test_bit(STATUS_INIT, &priv->status));
526 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
527 test_bit(STATUS_ALIVE, &priv->status));
528 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
529 test_bit(STATUS_READY, &priv->status));
530 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n",
531 test_bit(STATUS_TEMPERATURE, &priv->status));
532 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
533 test_bit(STATUS_GEO_CONFIGURED, &priv->status));
534 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
535 test_bit(STATUS_EXIT_PENDING, &priv->status));
536 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
537 test_bit(STATUS_STATISTICS, &priv->status));
538 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
539 test_bit(STATUS_SCANNING, &priv->status));
540 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
541 test_bit(STATUS_SCAN_ABORTING, &priv->status));
542 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
543 test_bit(STATUS_SCAN_HW, &priv->status));
544 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
545 test_bit(STATUS_POWER_PMI, &priv->status));
546 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
547 test_bit(STATUS_FW_ERROR, &priv->status));
548 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
549}
550
551static ssize_t iwl_legacy_dbgfs_interrupt_read(struct file *file,
552 char __user *user_buf,
553 size_t count, loff_t *ppos) {
554
555 struct iwl_priv *priv = file->private_data;
556 int pos = 0;
557 int cnt = 0;
558 char *buf;
559 int bufsz = 24 * 64; /* 24 items * 64 char per item */
560 ssize_t ret;
561
562 buf = kzalloc(bufsz, GFP_KERNEL);
563 if (!buf) {
564 IWL_ERR(priv, "Can not allocate Buffer\n");
565 return -ENOMEM;
566 }
567
568 pos += scnprintf(buf + pos, bufsz - pos,
569 "Interrupt Statistics Report:\n");
570
571 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
572 priv->isr_stats.hw);
573 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
574 priv->isr_stats.sw);
575 if (priv->isr_stats.sw || priv->isr_stats.hw) {
576 pos += scnprintf(buf + pos, bufsz - pos,
577 "\tLast Restarting Code: 0x%X\n",
578 priv->isr_stats.err_code);
579 }
580#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
581 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
582 priv->isr_stats.sch);
583 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
584 priv->isr_stats.alive);
585#endif
586 pos += scnprintf(buf + pos, bufsz - pos,
587 "HW RF KILL switch toggled:\t %u\n",
588 priv->isr_stats.rfkill);
589
590 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
591 priv->isr_stats.ctkill);
592
593 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
594 priv->isr_stats.wakeup);
595
596 pos += scnprintf(buf + pos, bufsz - pos,
597 "Rx command responses:\t\t %u\n",
598 priv->isr_stats.rx);
599 for (cnt = 0; cnt < REPLY_MAX; cnt++) {
600 if (priv->isr_stats.rx_handlers[cnt] > 0)
601 pos += scnprintf(buf + pos, bufsz - pos,
602 "\tRx handler[%36s]:\t\t %u\n",
603 iwl_legacy_get_cmd_string(cnt),
604 priv->isr_stats.rx_handlers[cnt]);
605 }
606
607 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
608 priv->isr_stats.tx);
609
610 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
611 priv->isr_stats.unhandled);
612
613 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
614 kfree(buf);
615 return ret;
616}
617
618static ssize_t iwl_legacy_dbgfs_interrupt_write(struct file *file,
619 const char __user *user_buf,
620 size_t count, loff_t *ppos)
621{
622 struct iwl_priv *priv = file->private_data;
623 char buf[8];
624 int buf_size;
625 u32 reset_flag;
626
627 memset(buf, 0, sizeof(buf));
628 buf_size = min(count, sizeof(buf) - 1);
629 if (copy_from_user(buf, user_buf, buf_size))
630 return -EFAULT;
631 if (sscanf(buf, "%x", &reset_flag) != 1)
632 return -EFAULT;
633 if (reset_flag == 0)
634 iwl_legacy_clear_isr_stats(priv);
635
636 return count;
637}
638
639static ssize_t
640iwl_legacy_dbgfs_qos_read(struct file *file, char __user *user_buf,
641 size_t count, loff_t *ppos)
642{
643 struct iwl_priv *priv = file->private_data;
644 struct iwl_rxon_context *ctx;
645 int pos = 0, i;
646 char buf[256 * NUM_IWL_RXON_CTX];
647 const size_t bufsz = sizeof(buf);
648
649 for_each_context(priv, ctx) {
650 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
651 ctx->ctxid);
652 for (i = 0; i < AC_NUM; i++) {
653 pos += scnprintf(buf + pos, bufsz - pos,
654 "\tcw_min\tcw_max\taifsn\ttxop\n");
655 pos += scnprintf(buf + pos, bufsz - pos,
656 "AC[%d]\t%u\t%u\t%u\t%u\n", i,
657 ctx->qos_data.def_qos_parm.ac[i].cw_min,
658 ctx->qos_data.def_qos_parm.ac[i].cw_max,
659 ctx->qos_data.def_qos_parm.ac[i].aifsn,
660 ctx->qos_data.def_qos_parm.ac[i].edca_txop);
661 }
662 pos += scnprintf(buf + pos, bufsz - pos, "\n");
663 }
664 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
665}
666
667static ssize_t iwl_legacy_dbgfs_disable_ht40_write(struct file *file,
668 const char __user *user_buf,
669 size_t count, loff_t *ppos)
670{
671 struct iwl_priv *priv = file->private_data;
672 char buf[8];
673 int buf_size;
674 int ht40;
675
676 memset(buf, 0, sizeof(buf));
677 buf_size = min(count, sizeof(buf) - 1);
678 if (copy_from_user(buf, user_buf, buf_size))
679 return -EFAULT;
680 if (sscanf(buf, "%d", &ht40) != 1)
681 return -EFAULT;
682 if (!iwl_legacy_is_any_associated(priv))
683 priv->disable_ht40 = ht40 ? true : false;
684 else {
685 IWL_ERR(priv, "Sta associated with AP - "
686 "Change to 40MHz channel support is not allowed\n");
687 return -EINVAL;
688 }
689
690 return count;
691}
692
693static ssize_t iwl_legacy_dbgfs_disable_ht40_read(struct file *file,
694 char __user *user_buf,
695 size_t count, loff_t *ppos)
696{
697 struct iwl_priv *priv = file->private_data;
698 char buf[100];
699 int pos = 0;
700 const size_t bufsz = sizeof(buf);
701
702 pos += scnprintf(buf + pos, bufsz - pos,
703 "11n 40MHz Mode: %s\n",
704 priv->disable_ht40 ? "Disabled" : "Enabled");
705 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
706}
707
708DEBUGFS_READ_WRITE_FILE_OPS(sram);
709DEBUGFS_READ_WRITE_FILE_OPS(log_event);
710DEBUGFS_READ_FILE_OPS(nvm);
711DEBUGFS_READ_FILE_OPS(stations);
712DEBUGFS_READ_FILE_OPS(channels);
713DEBUGFS_READ_FILE_OPS(status);
714DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
715DEBUGFS_READ_FILE_OPS(qos);
716DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
717
718static ssize_t iwl_legacy_dbgfs_traffic_log_read(struct file *file,
719 char __user *user_buf,
720 size_t count, loff_t *ppos)
721{
722 struct iwl_priv *priv = file->private_data;
723 int pos = 0, ofs = 0;
724 int cnt = 0, entry;
725 struct iwl_tx_queue *txq;
726 struct iwl_queue *q;
727 struct iwl_rx_queue *rxq = &priv->rxq;
728 char *buf;
729 int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
730 (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
731 const u8 *ptr;
732 ssize_t ret;
733
734 if (!priv->txq) {
735 IWL_ERR(priv, "txq not ready\n");
736 return -EAGAIN;
737 }
738 buf = kzalloc(bufsz, GFP_KERNEL);
739 if (!buf) {
740 IWL_ERR(priv, "Can not allocate buffer\n");
741 return -ENOMEM;
742 }
743 pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
744 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
745 txq = &priv->txq[cnt];
746 q = &txq->q;
747 pos += scnprintf(buf + pos, bufsz - pos,
748 "q[%d]: read_ptr: %u, write_ptr: %u\n",
749 cnt, q->read_ptr, q->write_ptr);
750 }
751 if (priv->tx_traffic && (iwlegacy_debug_level & IWL_DL_TX)) {
752 ptr = priv->tx_traffic;
753 pos += scnprintf(buf + pos, bufsz - pos,
754 "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
755 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
756 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
757 entry++, ofs += 16) {
758 pos += scnprintf(buf + pos, bufsz - pos,
759 "0x%.4x ", ofs);
760 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
761 buf + pos, bufsz - pos, 0);
762 pos += strlen(buf + pos);
763 if (bufsz - pos > 0)
764 buf[pos++] = '\n';
765 }
766 }
767 }
768
769 pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
770 pos += scnprintf(buf + pos, bufsz - pos,
771 "read: %u, write: %u\n",
772 rxq->read, rxq->write);
773
774 if (priv->rx_traffic && (iwlegacy_debug_level & IWL_DL_RX)) {
775 ptr = priv->rx_traffic;
776 pos += scnprintf(buf + pos, bufsz - pos,
777 "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
778 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
779 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
780 entry++, ofs += 16) {
781 pos += scnprintf(buf + pos, bufsz - pos,
782 "0x%.4x ", ofs);
783 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
784 buf + pos, bufsz - pos, 0);
785 pos += strlen(buf + pos);
786 if (bufsz - pos > 0)
787 buf[pos++] = '\n';
788 }
789 }
790 }
791
792 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
793 kfree(buf);
794 return ret;
795}
796
797static ssize_t iwl_legacy_dbgfs_traffic_log_write(struct file *file,
798 const char __user *user_buf,
799 size_t count, loff_t *ppos)
800{
801 struct iwl_priv *priv = file->private_data;
802 char buf[8];
803 int buf_size;
804 int traffic_log;
805
806 memset(buf, 0, sizeof(buf));
807 buf_size = min(count, sizeof(buf) - 1);
808 if (copy_from_user(buf, user_buf, buf_size))
809 return -EFAULT;
810 if (sscanf(buf, "%d", &traffic_log) != 1)
811 return -EFAULT;
812 if (traffic_log == 0)
813 iwl_legacy_reset_traffic_log(priv);
814
815 return count;
816}
817
818static ssize_t iwl_legacy_dbgfs_tx_queue_read(struct file *file,
819 char __user *user_buf,
820 size_t count, loff_t *ppos) {
821
822 struct iwl_priv *priv = file->private_data;
823 struct iwl_tx_queue *txq;
824 struct iwl_queue *q;
825 char *buf;
826 int pos = 0;
827 int cnt;
828 int ret;
829 const size_t bufsz = sizeof(char) * 64 *
830 priv->cfg->base_params->num_of_queues;
831
832 if (!priv->txq) {
833 IWL_ERR(priv, "txq not ready\n");
834 return -EAGAIN;
835 }
836 buf = kzalloc(bufsz, GFP_KERNEL);
837 if (!buf)
838 return -ENOMEM;
839
840 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
841 txq = &priv->txq[cnt];
842 q = &txq->q;
843 pos += scnprintf(buf + pos, bufsz - pos,
844 "hwq %.2d: read=%u write=%u stop=%d"
845 " swq_id=%#.2x (ac %d/hwq %d)\n",
846 cnt, q->read_ptr, q->write_ptr,
847 !!test_bit(cnt, priv->queue_stopped),
848 txq->swq_id, txq->swq_id & 3,
849 (txq->swq_id >> 2) & 0x1f);
850 if (cnt >= 4)
851 continue;
852 /* for the ACs, display the stop count too */
853 pos += scnprintf(buf + pos, bufsz - pos,
854 " stop-count: %d\n",
855 atomic_read(&priv->queue_stop_count[cnt]));
856 }
857 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
858 kfree(buf);
859 return ret;
860}
861
862static ssize_t iwl_legacy_dbgfs_rx_queue_read(struct file *file,
863 char __user *user_buf,
864 size_t count, loff_t *ppos) {
865
866 struct iwl_priv *priv = file->private_data;
867 struct iwl_rx_queue *rxq = &priv->rxq;
868 char buf[256];
869 int pos = 0;
870 const size_t bufsz = sizeof(buf);
871
872 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
873 rxq->read);
874 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
875 rxq->write);
876 pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
877 rxq->free_count);
878 if (rxq->rb_stts) {
879 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
880 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
881 } else {
882 pos += scnprintf(buf + pos, bufsz - pos,
883 "closed_rb_num: Not Allocated\n");
884 }
885 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
886}
887
888static ssize_t iwl_legacy_dbgfs_ucode_rx_stats_read(struct file *file,
889 char __user *user_buf,
890 size_t count, loff_t *ppos)
891{
892 struct iwl_priv *priv = file->private_data;
893 return priv->cfg->ops->lib->debugfs_ops.rx_stats_read(file,
894 user_buf, count, ppos);
895}
896
897static ssize_t iwl_legacy_dbgfs_ucode_tx_stats_read(struct file *file,
898 char __user *user_buf,
899 size_t count, loff_t *ppos)
900{
901 struct iwl_priv *priv = file->private_data;
902 return priv->cfg->ops->lib->debugfs_ops.tx_stats_read(file,
903 user_buf, count, ppos);
904}
905
906static ssize_t iwl_legacy_dbgfs_ucode_general_stats_read(struct file *file,
907 char __user *user_buf,
908 size_t count, loff_t *ppos)
909{
910 struct iwl_priv *priv = file->private_data;
911 return priv->cfg->ops->lib->debugfs_ops.general_stats_read(file,
912 user_buf, count, ppos);
913}
914
915static ssize_t iwl_legacy_dbgfs_sensitivity_read(struct file *file,
916 char __user *user_buf,
917 size_t count, loff_t *ppos) {
918
919 struct iwl_priv *priv = file->private_data;
920 int pos = 0;
921 int cnt = 0;
922 char *buf;
923 int bufsz = sizeof(struct iwl_sensitivity_data) * 4 + 100;
924 ssize_t ret;
925 struct iwl_sensitivity_data *data;
926
927 data = &priv->sensitivity_data;
928 buf = kzalloc(bufsz, GFP_KERNEL);
929 if (!buf) {
930 IWL_ERR(priv, "Can not allocate Buffer\n");
931 return -ENOMEM;
932 }
933
934 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
935 data->auto_corr_ofdm);
936 pos += scnprintf(buf + pos, bufsz - pos,
937 "auto_corr_ofdm_mrc:\t\t %u\n",
938 data->auto_corr_ofdm_mrc);
939 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
940 data->auto_corr_ofdm_x1);
941 pos += scnprintf(buf + pos, bufsz - pos,
942 "auto_corr_ofdm_mrc_x1:\t\t %u\n",
943 data->auto_corr_ofdm_mrc_x1);
944 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
945 data->auto_corr_cck);
946 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
947 data->auto_corr_cck_mrc);
948 pos += scnprintf(buf + pos, bufsz - pos,
949 "last_bad_plcp_cnt_ofdm:\t\t %u\n",
950 data->last_bad_plcp_cnt_ofdm);
951 pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
952 data->last_fa_cnt_ofdm);
953 pos += scnprintf(buf + pos, bufsz - pos,
954 "last_bad_plcp_cnt_cck:\t\t %u\n",
955 data->last_bad_plcp_cnt_cck);
956 pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
957 data->last_fa_cnt_cck);
958 pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
959 data->nrg_curr_state);
960 pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
961 data->nrg_prev_state);
962 pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
963 for (cnt = 0; cnt < 10; cnt++) {
964 pos += scnprintf(buf + pos, bufsz - pos, " %u",
965 data->nrg_value[cnt]);
966 }
967 pos += scnprintf(buf + pos, bufsz - pos, "\n");
968 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
969 for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
970 pos += scnprintf(buf + pos, bufsz - pos, " %u",
971 data->nrg_silence_rssi[cnt]);
972 }
973 pos += scnprintf(buf + pos, bufsz - pos, "\n");
974 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
975 data->nrg_silence_ref);
976 pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
977 data->nrg_energy_idx);
978 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
979 data->nrg_silence_idx);
980 pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
981 data->nrg_th_cck);
982 pos += scnprintf(buf + pos, bufsz - pos,
983 "nrg_auto_corr_silence_diff:\t %u\n",
984 data->nrg_auto_corr_silence_diff);
985 pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
986 data->num_in_cck_no_fa);
987 pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
988 data->nrg_th_ofdm);
989
990 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
991 kfree(buf);
992 return ret;
993}
994
995
996static ssize_t iwl_legacy_dbgfs_chain_noise_read(struct file *file,
997 char __user *user_buf,
998 size_t count, loff_t *ppos) {
999
1000 struct iwl_priv *priv = file->private_data;
1001 int pos = 0;
1002 int cnt = 0;
1003 char *buf;
1004 int bufsz = sizeof(struct iwl_chain_noise_data) * 4 + 100;
1005 ssize_t ret;
1006 struct iwl_chain_noise_data *data;
1007
1008 data = &priv->chain_noise_data;
1009 buf = kzalloc(bufsz, GFP_KERNEL);
1010 if (!buf) {
1011 IWL_ERR(priv, "Can not allocate Buffer\n");
1012 return -ENOMEM;
1013 }
1014
1015 pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
1016 data->active_chains);
1017 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
1018 data->chain_noise_a);
1019 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
1020 data->chain_noise_b);
1021 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
1022 data->chain_noise_c);
1023 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
1024 data->chain_signal_a);
1025 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
1026 data->chain_signal_b);
1027 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
1028 data->chain_signal_c);
1029 pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
1030 data->beacon_count);
1031
1032 pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
1033 for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
1034 pos += scnprintf(buf + pos, bufsz - pos, " %u",
1035 data->disconn_array[cnt]);
1036 }
1037 pos += scnprintf(buf + pos, bufsz - pos, "\n");
1038 pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
1039 for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
1040 pos += scnprintf(buf + pos, bufsz - pos, " %u",
1041 data->delta_gain_code[cnt]);
1042 }
1043 pos += scnprintf(buf + pos, bufsz - pos, "\n");
1044 pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
1045 data->radio_write);
1046 pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
1047 data->state);
1048
1049 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1050 kfree(buf);
1051 return ret;
1052}
1053
1054static ssize_t iwl_legacy_dbgfs_power_save_status_read(struct file *file,
1055 char __user *user_buf,
1056 size_t count, loff_t *ppos)
1057{
1058 struct iwl_priv *priv = file->private_data;
1059 char buf[60];
1060 int pos = 0;
1061 const size_t bufsz = sizeof(buf);
1062 u32 pwrsave_status;
1063
1064 pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) &
1065 CSR_GP_REG_POWER_SAVE_STATUS_MSK;
1066
1067 pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
1068 pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
1069 (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
1070 (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
1071 (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
1072 "error");
1073
1074 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1075}
1076
1077static ssize_t iwl_legacy_dbgfs_clear_ucode_statistics_write(struct file *file,
1078 const char __user *user_buf,
1079 size_t count, loff_t *ppos)
1080{
1081 struct iwl_priv *priv = file->private_data;
1082 char buf[8];
1083 int buf_size;
1084 int clear;
1085
1086 memset(buf, 0, sizeof(buf));
1087 buf_size = min(count, sizeof(buf) - 1);
1088 if (copy_from_user(buf, user_buf, buf_size))
1089 return -EFAULT;
1090 if (sscanf(buf, "%d", &clear) != 1)
1091 return -EFAULT;
1092
1093 /* make request to uCode to retrieve statistics information */
1094 mutex_lock(&priv->mutex);
1095 iwl_legacy_send_statistics_request(priv, CMD_SYNC, true);
1096 mutex_unlock(&priv->mutex);
1097
1098 return count;
1099}
1100
1101static ssize_t iwl_legacy_dbgfs_ucode_tracing_read(struct file *file,
1102 char __user *user_buf,
1103 size_t count, loff_t *ppos) {
1104
1105 struct iwl_priv *priv = file->private_data;
1106 int pos = 0;
1107 char buf[128];
1108 const size_t bufsz = sizeof(buf);
1109
1110 pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n",
1111 priv->event_log.ucode_trace ? "On" : "Off");
1112 pos += scnprintf(buf + pos, bufsz - pos, "non_wraps_count:\t\t %u\n",
1113 priv->event_log.non_wraps_count);
1114 pos += scnprintf(buf + pos, bufsz - pos, "wraps_once_count:\t\t %u\n",
1115 priv->event_log.wraps_once_count);
1116 pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n",
1117 priv->event_log.wraps_more_count);
1118
1119 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1120}
1121
1122static ssize_t iwl_legacy_dbgfs_ucode_tracing_write(struct file *file,
1123 const char __user *user_buf,
1124 size_t count, loff_t *ppos)
1125{
1126 struct iwl_priv *priv = file->private_data;
1127 char buf[8];
1128 int buf_size;
1129 int trace;
1130
1131 memset(buf, 0, sizeof(buf));
1132 buf_size = min(count, sizeof(buf) - 1);
1133 if (copy_from_user(buf, user_buf, buf_size))
1134 return -EFAULT;
1135 if (sscanf(buf, "%d", &trace) != 1)
1136 return -EFAULT;
1137
1138 if (trace) {
1139 priv->event_log.ucode_trace = true;
1140 /* schedule the ucode timer to occur in UCODE_TRACE_PERIOD */
1141 mod_timer(&priv->ucode_trace,
1142 jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
1143 } else {
1144 priv->event_log.ucode_trace = false;
1145 del_timer_sync(&priv->ucode_trace);
1146 }
1147
1148 return count;
1149}
1150
1151static ssize_t iwl_legacy_dbgfs_rxon_flags_read(struct file *file,
1152 char __user *user_buf,
1153 size_t count, loff_t *ppos) {
1154
1155 struct iwl_priv *priv = file->private_data;
1156 int len = 0;
1157 char buf[20];
1158
1159 len = sprintf(buf, "0x%04X\n",
1160 le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags));
1161 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1162}
1163
1164static ssize_t iwl_legacy_dbgfs_rxon_filter_flags_read(struct file *file,
1165 char __user *user_buf,
1166 size_t count, loff_t *ppos) {
1167
1168 struct iwl_priv *priv = file->private_data;
1169 int len = 0;
1170 char buf[20];
1171
1172 len = sprintf(buf, "0x%04X\n",
1173 le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags));
1174 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1175}
1176
1177static ssize_t iwl_legacy_dbgfs_fh_reg_read(struct file *file,
1178 char __user *user_buf,
1179 size_t count, loff_t *ppos)
1180{
1181 struct iwl_priv *priv = file->private_data;
1182 char *buf;
1183 int pos = 0;
1184 ssize_t ret = -EFAULT;
1185
1186 if (priv->cfg->ops->lib->dump_fh) {
1187 ret = pos = priv->cfg->ops->lib->dump_fh(priv, &buf, true);
1188 if (buf) {
1189 ret = simple_read_from_buffer(user_buf,
1190 count, ppos, buf, pos);
1191 kfree(buf);
1192 }
1193 }
1194
1195 return ret;
1196}
1197
1198static ssize_t iwl_legacy_dbgfs_missed_beacon_read(struct file *file,
1199 char __user *user_buf,
1200 size_t count, loff_t *ppos) {
1201
1202 struct iwl_priv *priv = file->private_data;
1203 int pos = 0;
1204 char buf[12];
1205 const size_t bufsz = sizeof(buf);
1206
1207 pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
1208 priv->missed_beacon_threshold);
1209
1210 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1211}
1212
1213static ssize_t iwl_legacy_dbgfs_missed_beacon_write(struct file *file,
1214 const char __user *user_buf,
1215 size_t count, loff_t *ppos)
1216{
1217 struct iwl_priv *priv = file->private_data;
1218 char buf[8];
1219 int buf_size;
1220 int missed;
1221
1222 memset(buf, 0, sizeof(buf));
1223 buf_size = min(count, sizeof(buf) - 1);
1224 if (copy_from_user(buf, user_buf, buf_size))
1225 return -EFAULT;
1226 if (sscanf(buf, "%d", &missed) != 1)
1227 return -EINVAL;
1228
1229 if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN ||
1230 missed > IWL_MISSED_BEACON_THRESHOLD_MAX)
1231 priv->missed_beacon_threshold =
1232 IWL_MISSED_BEACON_THRESHOLD_DEF;
1233 else
1234 priv->missed_beacon_threshold = missed;
1235
1236 return count;
1237}
1238
1239static ssize_t iwl_legacy_dbgfs_plcp_delta_read(struct file *file,
1240 char __user *user_buf,
1241 size_t count, loff_t *ppos) {
1242
1243 struct iwl_priv *priv = file->private_data;
1244 int pos = 0;
1245 char buf[12];
1246 const size_t bufsz = sizeof(buf);
1247
1248 pos += scnprintf(buf + pos, bufsz - pos, "%u\n",
1249 priv->cfg->base_params->plcp_delta_threshold);
1250
1251 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1252}
1253
1254static ssize_t iwl_legacy_dbgfs_plcp_delta_write(struct file *file,
1255 const char __user *user_buf,
1256 size_t count, loff_t *ppos) {
1257
1258 struct iwl_priv *priv = file->private_data;
1259 char buf[8];
1260 int buf_size;
1261 int plcp;
1262
1263 memset(buf, 0, sizeof(buf));
1264 buf_size = min(count, sizeof(buf) - 1);
1265 if (copy_from_user(buf, user_buf, buf_size))
1266 return -EFAULT;
1267 if (sscanf(buf, "%d", &plcp) != 1)
1268 return -EINVAL;
1269 if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) ||
1270 (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX))
1271 priv->cfg->base_params->plcp_delta_threshold =
1272 IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE;
1273 else
1274 priv->cfg->base_params->plcp_delta_threshold = plcp;
1275 return count;
1276}
1277
1278static ssize_t iwl_legacy_dbgfs_force_reset_read(struct file *file,
1279 char __user *user_buf,
1280 size_t count, loff_t *ppos) {
1281
1282 struct iwl_priv *priv = file->private_data;
1283 int i, pos = 0;
1284 char buf[300];
1285 const size_t bufsz = sizeof(buf);
1286 struct iwl_force_reset *force_reset;
1287
1288 for (i = 0; i < IWL_MAX_FORCE_RESET; i++) {
1289 force_reset = &priv->force_reset[i];
1290 pos += scnprintf(buf + pos, bufsz - pos,
1291 "Force reset method %d\n", i);
1292 pos += scnprintf(buf + pos, bufsz - pos,
1293 "\tnumber of reset request: %d\n",
1294 force_reset->reset_request_count);
1295 pos += scnprintf(buf + pos, bufsz - pos,
1296 "\tnumber of reset request success: %d\n",
1297 force_reset->reset_success_count);
1298 pos += scnprintf(buf + pos, bufsz - pos,
1299 "\tnumber of reset request reject: %d\n",
1300 force_reset->reset_reject_count);
1301 pos += scnprintf(buf + pos, bufsz - pos,
1302 "\treset duration: %lu\n",
1303 force_reset->reset_duration);
1304 }
1305 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1306}
1307
1308static ssize_t iwl_legacy_dbgfs_force_reset_write(struct file *file,
1309 const char __user *user_buf,
1310 size_t count, loff_t *ppos) {
1311
1312 struct iwl_priv *priv = file->private_data;
1313 char buf[8];
1314 int buf_size;
1315 int reset, ret;
1316
1317 memset(buf, 0, sizeof(buf));
1318 buf_size = min(count, sizeof(buf) - 1);
1319 if (copy_from_user(buf, user_buf, buf_size))
1320 return -EFAULT;
1321 if (sscanf(buf, "%d", &reset) != 1)
1322 return -EINVAL;
1323 switch (reset) {
1324 case IWL_RF_RESET:
1325 case IWL_FW_RESET:
1326 ret = iwl_legacy_force_reset(priv, reset, true);
1327 break;
1328 default:
1329 return -EINVAL;
1330 }
1331 return ret ? ret : count;
1332}
1333
1334static ssize_t iwl_legacy_dbgfs_wd_timeout_write(struct file *file,
1335 const char __user *user_buf,
1336 size_t count, loff_t *ppos) {
1337
1338 struct iwl_priv *priv = file->private_data;
1339 char buf[8];
1340 int buf_size;
1341 int timeout;
1342
1343 memset(buf, 0, sizeof(buf));
1344 buf_size = min(count, sizeof(buf) - 1);
1345 if (copy_from_user(buf, user_buf, buf_size))
1346 return -EFAULT;
1347 if (sscanf(buf, "%d", &timeout) != 1)
1348 return -EINVAL;
1349 if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT)
1350 timeout = IWL_DEF_WD_TIMEOUT;
1351
1352 priv->cfg->base_params->wd_timeout = timeout;
1353 iwl_legacy_setup_watchdog(priv);
1354 return count;
1355}
1356
1357DEBUGFS_READ_FILE_OPS(rx_statistics);
1358DEBUGFS_READ_FILE_OPS(tx_statistics);
1359DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
1360DEBUGFS_READ_FILE_OPS(rx_queue);
1361DEBUGFS_READ_FILE_OPS(tx_queue);
1362DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
1363DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
1364DEBUGFS_READ_FILE_OPS(ucode_general_stats);
1365DEBUGFS_READ_FILE_OPS(sensitivity);
1366DEBUGFS_READ_FILE_OPS(chain_noise);
1367DEBUGFS_READ_FILE_OPS(power_save_status);
1368DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
1369DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
1370DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
1371DEBUGFS_READ_FILE_OPS(fh_reg);
1372DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
1373DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
1374DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
1375DEBUGFS_READ_FILE_OPS(rxon_flags);
1376DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
1377DEBUGFS_WRITE_FILE_OPS(wd_timeout);
1378
1379/*
1380 * Create the debugfs files and directories
1381 *
1382 */
1383int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
1384{
1385 struct dentry *phyd = priv->hw->wiphy->debugfsdir;
1386 struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
1387
1388 dir_drv = debugfs_create_dir(name, phyd);
1389 if (!dir_drv)
1390 return -ENOMEM;
1391
1392 priv->debugfs_dir = dir_drv;
1393
1394 dir_data = debugfs_create_dir("data", dir_drv);
1395 if (!dir_data)
1396 goto err;
1397 dir_rf = debugfs_create_dir("rf", dir_drv);
1398 if (!dir_rf)
1399 goto err;
1400 dir_debug = debugfs_create_dir("debug", dir_drv);
1401 if (!dir_debug)
1402 goto err;
1403
1404 DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
1405 DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
1406 DEBUGFS_ADD_FILE(log_event, dir_data, S_IWUSR | S_IRUSR);
1407 DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
1408 DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
1409 DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
1410 DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
1411 DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
1412 DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
1413 DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
1414 DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
1415 DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
1416 DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
1417 DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
1418 DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
1419 DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
1420 DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
1421 DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
1422 DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
1423 DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
1424 DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
1425 DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
1426 DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
1427 DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
1428
1429 if (priv->cfg->base_params->sensitivity_calib_by_driver)
1430 DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
1431 if (priv->cfg->base_params->chain_noise_calib_by_driver)
1432 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
1433 if (priv->cfg->base_params->ucode_tracing)
1434 DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
1435 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
1436 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
1437 DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
1438 if (priv->cfg->base_params->sensitivity_calib_by_driver)
1439 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
1440 &priv->disable_sens_cal);
1441 if (priv->cfg->base_params->chain_noise_calib_by_driver)
1442 DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
1443 &priv->disable_chain_noise_cal);
1444 DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf,
1445 &priv->disable_tx_power_cal);
1446 return 0;
1447
1448err:
1449 IWL_ERR(priv, "Can't create the debugfs directory\n");
1450 iwl_legacy_dbgfs_unregister(priv);
1451 return -ENOMEM;
1452}
1453EXPORT_SYMBOL(iwl_legacy_dbgfs_register);
1454
1455/**
1456 * Remove the debugfs files and directories
1457 *
1458 */
1459void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
1460{
1461 if (!priv->debugfs_dir)
1462 return;
1463
1464 debugfs_remove_recursive(priv->debugfs_dir);
1465 priv->debugfs_dir = NULL;
1466}
1467EXPORT_SYMBOL(iwl_legacy_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h
new file mode 100644
index 000000000000..9ee849d669f3
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-dev.h
@@ -0,0 +1,1426 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26/*
27 * Please use this file (iwl-dev.h) for driver implementation definitions.
28 * Please use iwl-commands.h for uCode API definitions.
29 * Please use iwl-4965-hw.h for hardware-related definitions.
30 */
31
32#ifndef __iwl_legacy_dev_h__
33#define __iwl_legacy_dev_h__
34
35#include <linux/pci.h> /* for struct pci_device_id */
36#include <linux/kernel.h>
37#include <linux/leds.h>
38#include <linux/wait.h>
39#include <net/ieee80211_radiotap.h>
40
41#include "iwl-eeprom.h"
42#include "iwl-csr.h"
43#include "iwl-prph.h"
44#include "iwl-fh.h"
45#include "iwl-debug.h"
46#include "iwl-4965-hw.h"
47#include "iwl-3945-hw.h"
48#include "iwl-led.h"
49#include "iwl-power.h"
50#include "iwl-legacy-rs.h"
51
52struct iwl_tx_queue;
53
54/* CT-KILL constants */
55#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
56
57/* Default noise level to report when noise measurement is not available.
58 * This may be because we're:
59 * 1) Not associated (4965, no beacon statistics being sent to driver)
60 * 2) Scanning (noise measurement does not apply to associated channel)
61 * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
62 * Use default noise value of -127 ... this is below the range of measurable
63 * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
64 * Also, -127 works better than 0 when averaging frames with/without
65 * noise info (e.g. averaging might be done in app); measured dBm values are
66 * always negative ... using a negative value as the default keeps all
67 * averages within an s8's (used in some apps) range of negative values. */
68#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
69
70/*
71 * RTS threshold here is total size [2347] minus 4 FCS bytes
72 * Per spec:
73 * a value of 0 means RTS on all data/management packets
74 * a value > max MSDU size means no RTS
75 * else RTS for data/management frames where MPDU is larger
76 * than RTS value.
77 */
78#define DEFAULT_RTS_THRESHOLD 2347U
79#define MIN_RTS_THRESHOLD 0U
80#define MAX_RTS_THRESHOLD 2347U
81#define MAX_MSDU_SIZE 2304U
82#define MAX_MPDU_SIZE 2346U
83#define DEFAULT_BEACON_INTERVAL 100U
84#define DEFAULT_SHORT_RETRY_LIMIT 7U
85#define DEFAULT_LONG_RETRY_LIMIT 4U
86
87struct iwl_rx_mem_buffer {
88 dma_addr_t page_dma;
89 struct page *page;
90 struct list_head list;
91};
92
93#define rxb_addr(r) page_address(r->page)
94
95/* defined below */
96struct iwl_device_cmd;
97
98struct iwl_cmd_meta {
99 /* only for SYNC commands, iff the reply skb is wanted */
100 struct iwl_host_cmd *source;
101 /*
102 * only for ASYNC commands
103 * (which is somewhat stupid -- look at iwl-sta.c for instance
104 * which duplicates a bunch of code because the callback isn't
105 * invoked for SYNC commands, if it were and its result passed
106 * through it would be simpler...)
107 */
108 void (*callback)(struct iwl_priv *priv,
109 struct iwl_device_cmd *cmd,
110 struct iwl_rx_packet *pkt);
111
112 /* The CMD_SIZE_HUGE flag bit indicates that the command
113 * structure is stored at the end of the shared queue memory. */
114 u32 flags;
115
116 DEFINE_DMA_UNMAP_ADDR(mapping);
117 DEFINE_DMA_UNMAP_LEN(len);
118};
119
120/*
121 * Generic queue structure
122 *
123 * Contains common data for Rx and Tx queues
124 */
125struct iwl_queue {
126 int n_bd; /* number of BDs in this queue */
127 int write_ptr; /* 1-st empty entry (index) host_w*/
128 int read_ptr; /* last used entry (index) host_r*/
129 /* use for monitoring and recovering the stuck queue */
130 dma_addr_t dma_addr; /* physical addr for BD's */
131 int n_window; /* safe queue window */
132 u32 id;
133 int low_mark; /* low watermark, resume queue if free
134 * space more than this */
135 int high_mark; /* high watermark, stop queue if free
136 * space less than this */
137} __packed;
138
139/* One for each TFD */
140struct iwl_tx_info {
141 struct sk_buff *skb;
142 struct iwl_rxon_context *ctx;
143};
144
145/**
146 * struct iwl_tx_queue - Tx Queue for DMA
147 * @q: generic Rx/Tx queue descriptor
148 * @bd: base of circular buffer of TFDs
149 * @cmd: array of command/TX buffer pointers
150 * @meta: array of meta data for each command/tx buffer
151 * @dma_addr_cmd: physical address of cmd/tx buffer array
152 * @txb: array of per-TFD driver data
153 * @time_stamp: time (in jiffies) of last read_ptr change
154 * @need_update: indicates need to update read/write index
155 * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
156 *
157 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
158 * descriptors) and required locking structures.
159 */
160#define TFD_TX_CMD_SLOTS 256
161#define TFD_CMD_SLOTS 32
162
163struct iwl_tx_queue {
164 struct iwl_queue q;
165 void *tfds;
166 struct iwl_device_cmd **cmd;
167 struct iwl_cmd_meta *meta;
168 struct iwl_tx_info *txb;
169 unsigned long time_stamp;
170 u8 need_update;
171 u8 sched_retry;
172 u8 active;
173 u8 swq_id;
174};
175
176#define IWL_NUM_SCAN_RATES (2)
177
178struct iwl4965_channel_tgd_info {
179 u8 type;
180 s8 max_power;
181};
182
183struct iwl4965_channel_tgh_info {
184 s64 last_radar_time;
185};
186
187#define IWL4965_MAX_RATE (33)
188
189struct iwl3945_clip_group {
190 /* maximum power level to prevent clipping for each rate, derived by
191 * us from this band's saturation power in EEPROM */
192 const s8 clip_powers[IWL_MAX_RATES];
193};
194
195/* current Tx power values to use, one for each rate for each channel.
196 * requested power is limited by:
197 * -- regulatory EEPROM limits for this channel
198 * -- hardware capabilities (clip-powers)
199 * -- spectrum management
200 * -- user preference (e.g. iwconfig)
201 * when requested power is set, base power index must also be set. */
202struct iwl3945_channel_power_info {
203 struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
204 s8 power_table_index; /* actual (compenst'd) index into gain table */
205 s8 base_power_index; /* gain index for power at factory temp. */
206 s8 requested_power; /* power (dBm) requested for this chnl/rate */
207};
208
209/* current scan Tx power values to use, one for each scan rate for each
210 * channel. */
211struct iwl3945_scan_power_info {
212 struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
213 s8 power_table_index; /* actual (compenst'd) index into gain table */
214 s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */
215};
216
217/*
218 * One for each channel, holds all channel setup data
219 * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
220 * with one another!
221 */
222struct iwl_channel_info {
223 struct iwl4965_channel_tgd_info tgd;
224 struct iwl4965_channel_tgh_info tgh;
225 struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */
226 struct iwl_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for
227 * HT40 channel */
228
229 u8 channel; /* channel number */
230 u8 flags; /* flags copied from EEPROM */
231 s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
232 s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */
233 s8 min_power; /* always 0 */
234 s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */
235
236 u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */
237 u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */
238 enum ieee80211_band band;
239
240 /* HT40 channel info */
241 s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
242 u8 ht40_flags; /* flags copied from EEPROM */
243 u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
244
245 /* Radio/DSP gain settings for each "normal" data Tx rate.
246 * These include, in addition to RF and DSP gain, a few fields for
247 * remembering/modifying gain settings (indexes). */
248 struct iwl3945_channel_power_info power_info[IWL4965_MAX_RATE];
249
250 /* Radio/DSP gain settings for each scan rate, for directed scans. */
251 struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
252};
253
254#define IWL_TX_FIFO_BK 0 /* shared */
255#define IWL_TX_FIFO_BE 1
256#define IWL_TX_FIFO_VI 2 /* shared */
257#define IWL_TX_FIFO_VO 3
258#define IWL_TX_FIFO_UNUSED -1
259
260/* Minimum number of queues. MAX_NUM is defined in hw specific files.
261 * Set the minimum to accommodate the 4 standard TX queues, 1 command
262 * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
263#define IWL_MIN_NUM_QUEUES 10
264
265#define IWL_DEFAULT_CMD_QUEUE_NUM 4
266
267#define IEEE80211_DATA_LEN 2304
268#define IEEE80211_4ADDR_LEN 30
269#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
270#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
271
272struct iwl_frame {
273 union {
274 struct ieee80211_hdr frame;
275 struct iwl_tx_beacon_cmd beacon;
276 u8 raw[IEEE80211_FRAME_LEN];
277 u8 cmd[360];
278 } u;
279 struct list_head list;
280};
281
282#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
283#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
284#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
285
286enum {
287 CMD_SYNC = 0,
288 CMD_SIZE_NORMAL = 0,
289 CMD_NO_SKB = 0,
290 CMD_SIZE_HUGE = (1 << 0),
291 CMD_ASYNC = (1 << 1),
292 CMD_WANT_SKB = (1 << 2),
293};
294
295#define DEF_CMD_PAYLOAD_SIZE 320
296
297/**
298 * struct iwl_device_cmd
299 *
300 * For allocation of the command and tx queues, this establishes the overall
301 * size of the largest command we send to uCode, except for a scan command
302 * (which is relatively huge; space is allocated separately).
303 */
304struct iwl_device_cmd {
305 struct iwl_cmd_header hdr; /* uCode API */
306 union {
307 u32 flags;
308 u8 val8;
309 u16 val16;
310 u32 val32;
311 struct iwl_tx_cmd tx;
312 u8 payload[DEF_CMD_PAYLOAD_SIZE];
313 } __packed cmd;
314} __packed;
315
316#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
317
318
319struct iwl_host_cmd {
320 const void *data;
321 unsigned long reply_page;
322 void (*callback)(struct iwl_priv *priv,
323 struct iwl_device_cmd *cmd,
324 struct iwl_rx_packet *pkt);
325 u32 flags;
326 u16 len;
327 u8 id;
328};
329
330#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
331#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
332#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
333
334/**
335 * struct iwl_rx_queue - Rx queue
336 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
337 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
338 * @read: Shared index to newest available Rx buffer
339 * @write: Shared index to oldest written Rx packet
340 * @free_count: Number of pre-allocated buffers in rx_free
341 * @rx_free: list of free SKBs for use
342 * @rx_used: List of Rx buffers with no SKB
343 * @need_update: flag to indicate we need to update read/write index
344 * @rb_stts: driver's pointer to receive buffer status
345 * @rb_stts_dma: bus address of receive buffer status
346 *
347 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
348 */
349struct iwl_rx_queue {
350 __le32 *bd;
351 dma_addr_t bd_dma;
352 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
353 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
354 u32 read;
355 u32 write;
356 u32 free_count;
357 u32 write_actual;
358 struct list_head rx_free;
359 struct list_head rx_used;
360 int need_update;
361 struct iwl_rb_status *rb_stts;
362 dma_addr_t rb_stts_dma;
363 spinlock_t lock;
364};
365
366#define IWL_SUPPORTED_RATES_IE_LEN 8
367
368#define MAX_TID_COUNT 9
369
370#define IWL_INVALID_RATE 0xFF
371#define IWL_INVALID_VALUE -1
372
373/**
374 * struct iwl_ht_agg -- aggregation status while waiting for block-ack
375 * @txq_id: Tx queue used for Tx attempt
376 * @frame_count: # frames attempted by Tx command
377 * @wait_for_ba: Expect block-ack before next Tx reply
378 * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx window
379 * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx window
380 * @bitmap1: High order, one bit for each frame pending ACK in Tx window
381 * @rate_n_flags: Rate at which Tx was attempted
382 *
383 * If REPLY_TX indicates that aggregation was attempted, driver must wait
384 * for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info
385 * until block ack arrives.
386 */
387struct iwl_ht_agg {
388 u16 txq_id;
389 u16 frame_count;
390 u16 wait_for_ba;
391 u16 start_idx;
392 u64 bitmap;
393 u32 rate_n_flags;
394#define IWL_AGG_OFF 0
395#define IWL_AGG_ON 1
396#define IWL_EMPTYING_HW_QUEUE_ADDBA 2
397#define IWL_EMPTYING_HW_QUEUE_DELBA 3
398 u8 state;
399};
400
401
402struct iwl_tid_data {
403 u16 seq_number; /* 4965 only */
404 u16 tfds_in_queue;
405 struct iwl_ht_agg agg;
406};
407
408struct iwl_hw_key {
409 u32 cipher;
410 int keylen;
411 u8 keyidx;
412 u8 key[32];
413};
414
415union iwl_ht_rate_supp {
416 u16 rates;
417 struct {
418 u8 siso_rate;
419 u8 mimo_rate;
420 };
421};
422
423#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0)
424#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1)
425#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2)
426#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3)
427#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K
428#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K
429#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K
430
431/*
432 * Maximal MPDU density for TX aggregation
433 * 4 - 2us density
434 * 5 - 4us density
435 * 6 - 8us density
436 * 7 - 16us density
437 */
438#define CFG_HT_MPDU_DENSITY_2USEC (0x4)
439#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
440#define CFG_HT_MPDU_DENSITY_8USEC (0x6)
441#define CFG_HT_MPDU_DENSITY_16USEC (0x7)
442#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
443#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
444#define CFG_HT_MPDU_DENSITY_MIN (0x1)
445
446struct iwl_ht_config {
447 bool single_chain_sufficient;
448 enum ieee80211_smps_mode smps; /* current smps mode */
449};
450
451/* QoS structures */
452struct iwl_qos_info {
453 int qos_active;
454 struct iwl_qosparam_cmd def_qos_parm;
455};
456
457/*
458 * Structure should be accessed with sta_lock held. When station addition
459 * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
460 * the commands (iwl_legacy_addsta_cmd and iwl_link_quality_cmd) without
461 * sta_lock held.
462 */
463struct iwl_station_entry {
464 struct iwl_legacy_addsta_cmd sta;
465 struct iwl_tid_data tid[MAX_TID_COUNT];
466 u8 used, ctxid;
467 struct iwl_hw_key keyinfo;
468 struct iwl_link_quality_cmd *lq;
469};
470
471struct iwl_station_priv_common {
472 struct iwl_rxon_context *ctx;
473 u8 sta_id;
474};
475
476/*
477 * iwl_station_priv: Driver's private station information
478 *
479 * When mac80211 creates a station it reserves some space (hw->sta_data_size)
480 * in the structure for use by driver. This structure is places in that
481 * space.
482 *
483 * The common struct MUST be first because it is shared between
484 * 3945 and 4965!
485 */
486struct iwl_station_priv {
487 struct iwl_station_priv_common common;
488 struct iwl_lq_sta lq_sta;
489 atomic_t pending_frames;
490 bool client;
491 bool asleep;
492};
493
494/**
495 * struct iwl_vif_priv - driver's private per-interface information
496 *
497 * When mac80211 allocates a virtual interface, it can allocate
498 * space for us to put data into.
499 */
500struct iwl_vif_priv {
501 struct iwl_rxon_context *ctx;
502 u8 ibss_bssid_sta_id;
503};
504
505/* one for each uCode image (inst/data, boot/init/runtime) */
506struct fw_desc {
507 void *v_addr; /* access by driver */
508 dma_addr_t p_addr; /* access by card's busmaster DMA */
509 u32 len; /* bytes */
510};
511
512/* uCode file layout */
513struct iwl_ucode_header {
514 __le32 ver; /* major/minor/API/serial */
515 struct {
516 __le32 inst_size; /* bytes of runtime code */
517 __le32 data_size; /* bytes of runtime data */
518 __le32 init_size; /* bytes of init code */
519 __le32 init_data_size; /* bytes of init data */
520 __le32 boot_size; /* bytes of bootstrap code */
521 u8 data[0]; /* in same order as sizes */
522 } v1;
523};
524
525struct iwl4965_ibss_seq {
526 u8 mac[ETH_ALEN];
527 u16 seq_num;
528 u16 frag_num;
529 unsigned long packet_time;
530 struct list_head list;
531};
532
533struct iwl_sensitivity_ranges {
534 u16 min_nrg_cck;
535 u16 max_nrg_cck;
536
537 u16 nrg_th_cck;
538 u16 nrg_th_ofdm;
539
540 u16 auto_corr_min_ofdm;
541 u16 auto_corr_min_ofdm_mrc;
542 u16 auto_corr_min_ofdm_x1;
543 u16 auto_corr_min_ofdm_mrc_x1;
544
545 u16 auto_corr_max_ofdm;
546 u16 auto_corr_max_ofdm_mrc;
547 u16 auto_corr_max_ofdm_x1;
548 u16 auto_corr_max_ofdm_mrc_x1;
549
550 u16 auto_corr_max_cck;
551 u16 auto_corr_max_cck_mrc;
552 u16 auto_corr_min_cck;
553 u16 auto_corr_min_cck_mrc;
554
555 u16 barker_corr_th_min;
556 u16 barker_corr_th_min_mrc;
557 u16 nrg_th_cca;
558};
559
560
561#define KELVIN_TO_CELSIUS(x) ((x)-273)
562#define CELSIUS_TO_KELVIN(x) ((x)+273)
563
564
565/**
566 * struct iwl_hw_params
567 * @max_txq_num: Max # Tx queues supported
568 * @dma_chnl_num: Number of Tx DMA/FIFO channels
569 * @scd_bc_tbls_size: size of scheduler byte count tables
570 * @tfd_size: TFD size
571 * @tx/rx_chains_num: Number of TX/RX chains
572 * @valid_tx/rx_ant: usable antennas
573 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
574 * @max_rxq_log: Log-base-2 of max_rxq_size
575 * @rx_page_order: Rx buffer page order
576 * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
577 * @max_stations:
578 * @ht40_channel: is 40MHz width possible in band 2.4
579 * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
580 * @sw_crypto: 0 for hw, 1 for sw
581 * @max_xxx_size: for ucode uses
582 * @ct_kill_threshold: temperature threshold
583 * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
584 * @struct iwl_sensitivity_ranges: range of sensitivity values
585 */
586struct iwl_hw_params {
587 u8 max_txq_num;
588 u8 dma_chnl_num;
589 u16 scd_bc_tbls_size;
590 u32 tfd_size;
591 u8 tx_chains_num;
592 u8 rx_chains_num;
593 u8 valid_tx_ant;
594 u8 valid_rx_ant;
595 u16 max_rxq_size;
596 u16 max_rxq_log;
597 u32 rx_page_order;
598 u32 rx_wrt_ptr_reg;
599 u8 max_stations;
600 u8 ht40_channel;
601 u8 max_beacon_itrvl; /* in 1024 ms */
602 u32 max_inst_size;
603 u32 max_data_size;
604 u32 max_bsm_size;
605 u32 ct_kill_threshold; /* value in hw-dependent units */
606 u16 beacon_time_tsf_bits;
607 const struct iwl_sensitivity_ranges *sens;
608};
609
610
611/******************************************************************************
612 *
613 * Functions implemented in core module which are forward declared here
614 * for use by iwl-[4-5].c
615 *
616 * NOTE: The implementation of these functions are not hardware specific
617 * which is why they are in the core module files.
618 *
619 * Naming convention --
620 * iwl_ <-- Is part of iwlwifi
621 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
622 * iwl4965_bg_ <-- Called from work queue context
623 * iwl4965_mac_ <-- mac80211 callback
624 *
625 ****************************************************************************/
626extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
627extern const u8 iwlegacy_bcast_addr[ETH_ALEN];
628extern int iwl_legacy_queue_space(const struct iwl_queue *q);
629static inline int iwl_legacy_queue_used(const struct iwl_queue *q, int i)
630{
631 return q->write_ptr >= q->read_ptr ?
632 (i >= q->read_ptr && i < q->write_ptr) :
633 !(i < q->read_ptr && i >= q->write_ptr);
634}
635
636
637static inline u8 iwl_legacy_get_cmd_index(struct iwl_queue *q, u32 index,
638 int is_huge)
639{
640 /*
641 * This is for init calibration result and scan command which
642 * required buffer > TFD_MAX_PAYLOAD_SIZE,
643 * the big buffer at end of command array
644 */
645 if (is_huge)
646 return q->n_window; /* must be power of 2 */
647
648 /* Otherwise, use normal size buffers */
649 return index & (q->n_window - 1);
650}
651
652
653struct iwl_dma_ptr {
654 dma_addr_t dma;
655 void *addr;
656 size_t size;
657};
658
659#define IWL_OPERATION_MODE_AUTO 0
660#define IWL_OPERATION_MODE_HT_ONLY 1
661#define IWL_OPERATION_MODE_MIXED 2
662#define IWL_OPERATION_MODE_20MHZ 3
663
664#define IWL_TX_CRC_SIZE 4
665#define IWL_TX_DELIMITER_SIZE 4
666
667#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
668
669/* Sensitivity and chain noise calibration */
670#define INITIALIZATION_VALUE 0xFFFF
671#define IWL4965_CAL_NUM_BEACONS 20
672#define IWL_CAL_NUM_BEACONS 16
673#define MAXIMUM_ALLOWED_PATHLOSS 15
674
675#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
676
677#define MAX_FA_OFDM 50
678#define MIN_FA_OFDM 5
679#define MAX_FA_CCK 50
680#define MIN_FA_CCK 5
681
682#define AUTO_CORR_STEP_OFDM 1
683
684#define AUTO_CORR_STEP_CCK 3
685#define AUTO_CORR_MAX_TH_CCK 160
686
687#define NRG_DIFF 2
688#define NRG_STEP_CCK 2
689#define NRG_MARGIN 8
690#define MAX_NUMBER_CCK_NO_FA 100
691
692#define AUTO_CORR_CCK_MIN_VAL_DEF (125)
693
694#define CHAIN_A 0
695#define CHAIN_B 1
696#define CHAIN_C 2
697#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
698#define ALL_BAND_FILTER 0xFF00
699#define IN_BAND_FILTER 0xFF
700#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF
701
702#define NRG_NUM_PREV_STAT_L 20
703#define NUM_RX_CHAINS 3
704
705enum iwl4965_false_alarm_state {
706 IWL_FA_TOO_MANY = 0,
707 IWL_FA_TOO_FEW = 1,
708 IWL_FA_GOOD_RANGE = 2,
709};
710
711enum iwl4965_chain_noise_state {
712 IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */
713 IWL_CHAIN_NOISE_ACCUMULATE,
714 IWL_CHAIN_NOISE_CALIBRATED,
715 IWL_CHAIN_NOISE_DONE,
716};
717
718enum iwl4965_calib_enabled_state {
719 IWL_CALIB_DISABLED = 0, /* must be 0 */
720 IWL_CALIB_ENABLED = 1,
721};
722
723/*
724 * enum iwl_calib
725 * defines the order in which results of initial calibrations
726 * should be sent to the runtime uCode
727 */
728enum iwl_calib {
729 IWL_CALIB_MAX,
730};
731
732/* Opaque calibration results */
733struct iwl_calib_result {
734 void *buf;
735 size_t buf_len;
736};
737
738enum ucode_type {
739 UCODE_NONE = 0,
740 UCODE_INIT,
741 UCODE_RT
742};
743
744/* Sensitivity calib data */
745struct iwl_sensitivity_data {
746 u32 auto_corr_ofdm;
747 u32 auto_corr_ofdm_mrc;
748 u32 auto_corr_ofdm_x1;
749 u32 auto_corr_ofdm_mrc_x1;
750 u32 auto_corr_cck;
751 u32 auto_corr_cck_mrc;
752
753 u32 last_bad_plcp_cnt_ofdm;
754 u32 last_fa_cnt_ofdm;
755 u32 last_bad_plcp_cnt_cck;
756 u32 last_fa_cnt_cck;
757
758 u32 nrg_curr_state;
759 u32 nrg_prev_state;
760 u32 nrg_value[10];
761 u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
762 u32 nrg_silence_ref;
763 u32 nrg_energy_idx;
764 u32 nrg_silence_idx;
765 u32 nrg_th_cck;
766 s32 nrg_auto_corr_silence_diff;
767 u32 num_in_cck_no_fa;
768 u32 nrg_th_ofdm;
769
770 u16 barker_corr_th_min;
771 u16 barker_corr_th_min_mrc;
772 u16 nrg_th_cca;
773};
774
775/* Chain noise (differential Rx gain) calib data */
776struct iwl_chain_noise_data {
777 u32 active_chains;
778 u32 chain_noise_a;
779 u32 chain_noise_b;
780 u32 chain_noise_c;
781 u32 chain_signal_a;
782 u32 chain_signal_b;
783 u32 chain_signal_c;
784 u16 beacon_count;
785 u8 disconn_array[NUM_RX_CHAINS];
786 u8 delta_gain_code[NUM_RX_CHAINS];
787 u8 radio_write;
788 u8 state;
789};
790
791#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
792#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
793
794#define IWL_TRAFFIC_ENTRIES (256)
795#define IWL_TRAFFIC_ENTRY_SIZE (64)
796
797enum {
798 MEASUREMENT_READY = (1 << 0),
799 MEASUREMENT_ACTIVE = (1 << 1),
800};
801
802/* interrupt statistics */
803struct isr_statistics {
804 u32 hw;
805 u32 sw;
806 u32 err_code;
807 u32 sch;
808 u32 alive;
809 u32 rfkill;
810 u32 ctkill;
811 u32 wakeup;
812 u32 rx;
813 u32 rx_handlers[REPLY_MAX];
814 u32 tx;
815 u32 unhandled;
816};
817
818/* management statistics */
819enum iwl_mgmt_stats {
820 MANAGEMENT_ASSOC_REQ = 0,
821 MANAGEMENT_ASSOC_RESP,
822 MANAGEMENT_REASSOC_REQ,
823 MANAGEMENT_REASSOC_RESP,
824 MANAGEMENT_PROBE_REQ,
825 MANAGEMENT_PROBE_RESP,
826 MANAGEMENT_BEACON,
827 MANAGEMENT_ATIM,
828 MANAGEMENT_DISASSOC,
829 MANAGEMENT_AUTH,
830 MANAGEMENT_DEAUTH,
831 MANAGEMENT_ACTION,
832 MANAGEMENT_MAX,
833};
834/* control statistics */
835enum iwl_ctrl_stats {
836 CONTROL_BACK_REQ = 0,
837 CONTROL_BACK,
838 CONTROL_PSPOLL,
839 CONTROL_RTS,
840 CONTROL_CTS,
841 CONTROL_ACK,
842 CONTROL_CFEND,
843 CONTROL_CFENDACK,
844 CONTROL_MAX,
845};
846
847struct traffic_stats {
848#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
849 u32 mgmt[MANAGEMENT_MAX];
850 u32 ctrl[CONTROL_MAX];
851 u32 data_cnt;
852 u64 data_bytes;
853#endif
854};
855
856/*
857 * iwl_switch_rxon: "channel switch" structure
858 *
859 * @ switch_in_progress: channel switch in progress
860 * @ channel: new channel
861 */
862struct iwl_switch_rxon {
863 bool switch_in_progress;
864 __le16 channel;
865};
866
867/*
868 * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
869 * to perform continuous uCode event logging operation if enabled
870 */
871#define UCODE_TRACE_PERIOD (100)
872
873/*
874 * iwl_event_log: current uCode event log position
875 *
876 * @ucode_trace: enable/disable ucode continuous trace timer
877 * @num_wraps: how many times the event buffer wraps
878 * @next_entry: the entry just before the next one that uCode would fill
879 * @non_wraps_count: counter for no wrap detected when dump ucode events
880 * @wraps_once_count: counter for wrap once detected when dump ucode events
881 * @wraps_more_count: counter for wrap more than once detected
882 * when dump ucode events
883 */
884struct iwl_event_log {
885 bool ucode_trace;
886 u32 num_wraps;
887 u32 next_entry;
888 int non_wraps_count;
889 int wraps_once_count;
890 int wraps_more_count;
891};
892
893/*
894 * host interrupt timeout value
895 * used with setting interrupt coalescing timer
896 * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
897 *
898 * default interrupt coalescing timer is 64 x 32 = 2048 usecs
899 * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
900 */
901#define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
902#define IWL_HOST_INT_TIMEOUT_DEF (0x40)
903#define IWL_HOST_INT_TIMEOUT_MIN (0x0)
904#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
905#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
906#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
907
908/*
909 * This is the threshold value of plcp error rate per 100mSecs. It is
910 * used to set and check for the validity of plcp_delta.
911 */
912#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (1)
913#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF (50)
914#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF (100)
915#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF (200)
916#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255)
917#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE (0)
918
919#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3)
920#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
921
922/* TX queue watchdog timeouts in mSecs */
923#define IWL_DEF_WD_TIMEOUT (2000)
924#define IWL_LONG_WD_TIMEOUT (10000)
925#define IWL_MAX_WD_TIMEOUT (120000)
926
927enum iwl_reset {
928 IWL_RF_RESET = 0,
929 IWL_FW_RESET,
930 IWL_MAX_FORCE_RESET,
931};
932
933struct iwl_force_reset {
934 int reset_request_count;
935 int reset_success_count;
936 int reset_reject_count;
937 unsigned long reset_duration;
938 unsigned long last_force_reset_jiffies;
939};
940
941/* extend beacon time format bit shifting */
942/*
943 * for _3945 devices
944 * bits 31:24 - extended
945 * bits 23:0 - interval
946 */
947#define IWL3945_EXT_BEACON_TIME_POS 24
948/*
949 * for _4965 devices
950 * bits 31:22 - extended
951 * bits 21:0 - interval
952 */
953#define IWL4965_EXT_BEACON_TIME_POS 22
954
955enum iwl_rxon_context_id {
956 IWL_RXON_CTX_BSS,
957
958 NUM_IWL_RXON_CTX
959};
960
961struct iwl_rxon_context {
962 struct ieee80211_vif *vif;
963
964 const u8 *ac_to_fifo;
965 const u8 *ac_to_queue;
966 u8 mcast_queue;
967
968 /*
969 * We could use the vif to indicate active, but we
970 * also need it to be active during disabling when
971 * we already removed the vif for type setting.
972 */
973 bool always_active, is_active;
974
975 bool ht_need_multiple_chains;
976
977 enum iwl_rxon_context_id ctxid;
978
979 u32 interface_modes, exclusive_interface_modes;
980 u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
981
982 /*
983 * We declare this const so it can only be
984 * changed via explicit cast within the
985 * routines that actually update the physical
986 * hardware.
987 */
988 const struct iwl_legacy_rxon_cmd active;
989 struct iwl_legacy_rxon_cmd staging;
990
991 struct iwl_rxon_time_cmd timing;
992
993 struct iwl_qos_info qos_data;
994
995 u8 bcast_sta_id, ap_sta_id;
996
997 u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
998 u8 qos_cmd;
999 u8 wep_key_cmd;
1000
1001 struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
1002 u8 key_mapping_keys;
1003
1004 __le32 station_flags;
1005
1006 struct {
1007 bool non_gf_sta_present;
1008 u8 protection;
1009 bool enabled, is_40mhz;
1010 u8 extension_chan_offset;
1011 } ht;
1012};
1013
1014struct iwl_priv {
1015
1016 /* ieee device used by generic ieee processing code */
1017 struct ieee80211_hw *hw;
1018 struct ieee80211_channel *ieee_channels;
1019 struct ieee80211_rate *ieee_rates;
1020 struct iwl_cfg *cfg;
1021
1022 /* temporary frame storage list */
1023 struct list_head free_frames;
1024 int frames_count;
1025
1026 enum ieee80211_band band;
1027 int alloc_rxb_page;
1028
1029 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
1030 struct iwl_rx_mem_buffer *rxb);
1031
1032 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
1033
1034 /* spectrum measurement report caching */
1035 struct iwl_spectrum_notification measure_report;
1036 u8 measurement_status;
1037
1038 /* ucode beacon time */
1039 u32 ucode_beacon_time;
1040 int missed_beacon_threshold;
1041
1042 /* track IBSS manager (last beacon) status */
1043 u32 ibss_manager;
1044
1045 /* storing the jiffies when the plcp error rate is received */
1046 unsigned long plcp_jiffies;
1047
1048 /* force reset */
1049 struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];
1050
1051 /* we allocate array of iwl_channel_info for NIC's valid channels.
1052 * Access via channel # using indirect index array */
1053 struct iwl_channel_info *channel_info; /* channel info array */
1054 u8 channel_count; /* # of channels */
1055
1056 /* thermal calibration */
1057 s32 temperature; /* degrees Kelvin */
1058 s32 last_temperature;
1059
1060 /* init calibration results */
1061 struct iwl_calib_result calib_results[IWL_CALIB_MAX];
1062
1063 /* Scan related variables */
1064 unsigned long scan_start;
1065 unsigned long scan_start_tsf;
1066 void *scan_cmd;
1067 enum ieee80211_band scan_band;
1068 struct cfg80211_scan_request *scan_request;
1069 struct ieee80211_vif *scan_vif;
1070 bool is_internal_short_scan;
1071 u8 scan_tx_ant[IEEE80211_NUM_BANDS];
1072 u8 mgmt_tx_ant;
1073
1074 /* spinlock */
1075 spinlock_t lock; /* protect general shared data */
1076 spinlock_t hcmd_lock; /* protect hcmd */
1077 spinlock_t reg_lock; /* protect hw register access */
1078 struct mutex mutex;
1079 struct mutex sync_cmd_mutex; /* enable serialization of sync commands */
1080
1081 /* basic pci-network driver stuff */
1082 struct pci_dev *pci_dev;
1083
1084 /* pci hardware address support */
1085 void __iomem *hw_base;
1086 u32 hw_rev;
1087 u32 hw_wa_rev;
1088 u8 rev_id;
1089
1090 /* microcode/device supports multiple contexts */
1091 u8 valid_contexts;
1092
1093 /* command queue number */
1094 u8 cmd_queue;
1095
1096 /* max number of station keys */
1097 u8 sta_key_max_num;
1098
1099 /* EEPROM MAC addresses */
1100 struct mac_address addresses[1];
1101
1102 /* uCode images, save to reload in case of failure */
1103 int fw_index; /* firmware we're trying to load */
1104 u32 ucode_ver; /* version of ucode, copy of
1105 iwl_ucode.ver */
1106 struct fw_desc ucode_code; /* runtime inst */
1107 struct fw_desc ucode_data; /* runtime data original */
1108 struct fw_desc ucode_data_backup; /* runtime data save/restore */
1109 struct fw_desc ucode_init; /* initialization inst */
1110 struct fw_desc ucode_init_data; /* initialization data */
1111 struct fw_desc ucode_boot; /* bootstrap inst */
1112 enum ucode_type ucode_type;
1113 u8 ucode_write_complete; /* the image write is complete */
1114 char firmware_name[25];
1115
1116 struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
1117
1118 struct iwl_switch_rxon switch_rxon;
1119
1120 /* 1st responses from initialize and runtime uCode images.
1121 * _4965's initialize alive response contains some calibration data. */
1122 struct iwl_init_alive_resp card_alive_init;
1123 struct iwl_alive_resp card_alive;
1124
1125 u16 active_rate;
1126
1127 u8 start_calib;
1128 struct iwl_sensitivity_data sensitivity_data;
1129 struct iwl_chain_noise_data chain_noise_data;
1130 __le16 sensitivity_tbl[HD_TABLE_SIZE];
1131
1132 struct iwl_ht_config current_ht_config;
1133
1134 /* Rate scaling data */
1135 u8 retry_rate;
1136
1137 wait_queue_head_t wait_command_queue;
1138
1139 int activity_timer_active;
1140
1141 /* Rx and Tx DMA processing queues */
1142 struct iwl_rx_queue rxq;
1143 struct iwl_tx_queue *txq;
1144 unsigned long txq_ctx_active_msk;
1145 struct iwl_dma_ptr kw; /* keep warm address */
1146 struct iwl_dma_ptr scd_bc_tbls;
1147
1148 u32 scd_base_addr; /* scheduler sram base address */
1149
1150 unsigned long status;
1151
1152 /* counts mgmt, ctl, and data packets */
1153 struct traffic_stats tx_stats;
1154 struct traffic_stats rx_stats;
1155
1156 /* counts interrupts */
1157 struct isr_statistics isr_stats;
1158
1159 struct iwl_power_mgr power_data;
1160
1161 /* context information */
1162 u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
1163
1164 /* station table variables */
1165
1166 /* Note: if lock and sta_lock are needed, lock must be acquired first */
1167 spinlock_t sta_lock;
1168 int num_stations;
1169 struct iwl_station_entry stations[IWL_STATION_COUNT];
1170 unsigned long ucode_key_table;
1171
1172 /* queue refcounts */
1173#define IWL_MAX_HW_QUEUES 32
1174 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
1175 /* for each AC */
1176 atomic_t queue_stop_count[4];
1177
1178 /* Indication if ieee80211_ops->open has been called */
1179 u8 is_open;
1180
1181 u8 mac80211_registered;
1182
1183 /* eeprom -- this is in the card's little endian byte order */
1184 u8 *eeprom;
1185 struct iwl_eeprom_calib_info *calib_info;
1186
1187 enum nl80211_iftype iw_mode;
1188
1189 /* Last Rx'd beacon timestamp */
1190 u64 timestamp;
1191
1192 union {
1193#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
1194 struct {
1195 void *shared_virt;
1196 dma_addr_t shared_phys;
1197
1198 struct delayed_work thermal_periodic;
1199 struct delayed_work rfkill_poll;
1200
1201 struct iwl3945_notif_statistics statistics;
1202#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1203 struct iwl3945_notif_statistics accum_statistics;
1204 struct iwl3945_notif_statistics delta_statistics;
1205 struct iwl3945_notif_statistics max_delta;
1206#endif
1207
1208 u32 sta_supp_rates;
1209 int last_rx_rssi; /* From Rx packet statistics */
1210
1211 /* Rx'd packet timing information */
1212 u32 last_beacon_time;
1213 u64 last_tsf;
1214
1215 /*
1216 * each calibration channel group in the
1217 * EEPROM has a derived clip setting for
1218 * each rate.
1219 */
1220 const struct iwl3945_clip_group clip_groups[5];
1221
1222 } _3945;
1223#endif
1224#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
1225 struct {
1226 /*
1227 * reporting the number of tids has AGG on. 0 means
1228 * no AGGREGATION
1229 */
1230 u8 agg_tids_count;
1231
1232 struct iwl_rx_phy_res last_phy_res;
1233 bool last_phy_res_valid;
1234
1235 struct completion firmware_loading_complete;
1236
1237 /*
1238 * chain noise reset and gain commands are the
1239 * two extra calibration commands follows the standard
1240 * phy calibration commands
1241 */
1242 u8 phy_calib_chain_noise_reset_cmd;
1243 u8 phy_calib_chain_noise_gain_cmd;
1244
1245 struct iwl_notif_statistics statistics;
1246#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1247 struct iwl_notif_statistics accum_statistics;
1248 struct iwl_notif_statistics delta_statistics;
1249 struct iwl_notif_statistics max_delta;
1250#endif
1251
1252 } _4965;
1253#endif
1254 };
1255
1256 struct iwl_hw_params hw_params;
1257
1258 u32 inta_mask;
1259
1260 struct workqueue_struct *workqueue;
1261
1262 struct work_struct restart;
1263 struct work_struct scan_completed;
1264 struct work_struct rx_replenish;
1265 struct work_struct abort_scan;
1266
1267 struct iwl_rxon_context *beacon_ctx;
1268 struct sk_buff *beacon_skb;
1269
1270 struct work_struct start_internal_scan;
1271 struct work_struct tx_flush;
1272
1273 struct tasklet_struct irq_tasklet;
1274
1275 struct delayed_work init_alive_start;
1276 struct delayed_work alive_start;
1277 struct delayed_work scan_check;
1278
1279 /* TX Power */
1280 s8 tx_power_user_lmt;
1281 s8 tx_power_device_lmt;
1282 s8 tx_power_next;
1283
1284
1285#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1286 /* debugging info */
1287 u32 debug_level; /* per device debugging will override global
1288 iwlegacy_debug_level if set */
1289#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
1290#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1291 /* debugfs */
1292 u16 tx_traffic_idx;
1293 u16 rx_traffic_idx;
1294 u8 *tx_traffic;
1295 u8 *rx_traffic;
1296 struct dentry *debugfs_dir;
1297 u32 dbgfs_sram_offset, dbgfs_sram_len;
1298 bool disable_ht40;
1299#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
1300
1301 struct work_struct txpower_work;
1302 u32 disable_sens_cal;
1303 u32 disable_chain_noise_cal;
1304 u32 disable_tx_power_cal;
1305 struct work_struct run_time_calib_work;
1306 struct timer_list statistics_periodic;
1307 struct timer_list ucode_trace;
1308 struct timer_list watchdog;
1309 bool hw_ready;
1310
1311 struct iwl_event_log event_log;
1312
1313 struct led_classdev led;
1314 unsigned long blink_on, blink_off;
1315 bool led_registered;
1316}; /*iwl_priv */
1317
1318static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
1319{
1320 set_bit(txq_id, &priv->txq_ctx_active_msk);
1321}
1322
1323static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
1324{
1325 clear_bit(txq_id, &priv->txq_ctx_active_msk);
1326}
1327
1328#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1329/*
1330 * iwl_legacy_get_debug_level: Return active debug level for device
1331 *
1332 * Using sysfs it is possible to set per device debug level. This debug
1333 * level will be used if set, otherwise the global debug level which can be
1334 * set via module parameter is used.
1335 */
1336static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
1337{
1338 if (priv->debug_level)
1339 return priv->debug_level;
1340 else
1341 return iwlegacy_debug_level;
1342}
1343#else
1344static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
1345{
1346 return iwlegacy_debug_level;
1347}
1348#endif
1349
1350
1351static inline struct ieee80211_hdr *
1352iwl_legacy_tx_queue_get_hdr(struct iwl_priv *priv,
1353 int txq_id, int idx)
1354{
1355 if (priv->txq[txq_id].txb[idx].skb)
1356 return (struct ieee80211_hdr *)priv->txq[txq_id].
1357 txb[idx].skb->data;
1358 return NULL;
1359}
1360
1361static inline struct iwl_rxon_context *
1362iwl_legacy_rxon_ctx_from_vif(struct ieee80211_vif *vif)
1363{
1364 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1365
1366 return vif_priv->ctx;
1367}
1368
1369#define for_each_context(priv, ctx) \
1370 for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \
1371 ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \
1372 if (priv->valid_contexts & BIT(ctx->ctxid))
1373
1374static inline int iwl_legacy_is_associated(struct iwl_priv *priv,
1375 enum iwl_rxon_context_id ctxid)
1376{
1377 return (priv->contexts[ctxid].active.filter_flags &
1378 RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1379}
1380
1381static inline int iwl_legacy_is_any_associated(struct iwl_priv *priv)
1382{
1383 return iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
1384}
1385
1386static inline int iwl_legacy_is_associated_ctx(struct iwl_rxon_context *ctx)
1387{
1388 return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1389}
1390
1391static inline int iwl_legacy_is_channel_valid(const struct iwl_channel_info *ch_info)
1392{
1393 if (ch_info == NULL)
1394 return 0;
1395 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
1396}
1397
1398static inline int iwl_legacy_is_channel_radar(const struct iwl_channel_info *ch_info)
1399{
1400 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
1401}
1402
1403static inline u8 iwl_legacy_is_channel_a_band(const struct iwl_channel_info *ch_info)
1404{
1405 return ch_info->band == IEEE80211_BAND_5GHZ;
1406}
1407
1408static inline int
1409iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch)
1410{
1411 return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
1412}
1413
1414static inline void
1415__iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page)
1416{
1417 __free_pages(page, priv->hw_params.rx_page_order);
1418 priv->alloc_rxb_page--;
1419}
1420
1421static inline void iwl_legacy_free_pages(struct iwl_priv *priv, unsigned long page)
1422{
1423 free_pages(page, priv->hw_params.rx_page_order);
1424 priv->alloc_rxb_page--;
1425}
1426#endif /* __iwl_legacy_dev_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.c b/drivers/net/wireless/iwlegacy/iwl-devtrace.c
new file mode 100644
index 000000000000..080b852b33bd
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-devtrace.c
@@ -0,0 +1,45 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/module.h>
28
29/* sparse doesn't like tracepoint macros */
30#ifndef __CHECKER__
31#include "iwl-dev.h"
32
33#define CREATE_TRACE_POINTS
34#include "iwl-devtrace.h"
35
36EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite8);
37EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ioread32);
38EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite32);
39EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_rx);
40EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_tx);
41EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_event);
42EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_error);
43EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_cont_event);
44EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_wrap_event);
45#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.h b/drivers/net/wireless/iwlegacy/iwl-devtrace.h
new file mode 100644
index 000000000000..9612aa0f6ec4
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-devtrace.h
@@ -0,0 +1,270 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#if !defined(__IWLWIFI_LEGACY_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
28#define __IWLWIFI_LEGACY_DEVICE_TRACE
29
30#include <linux/tracepoint.h>
31
32#if !defined(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) || defined(__CHECKER__)
33#undef TRACE_EVENT
34#define TRACE_EVENT(name, proto, ...) \
35static inline void trace_ ## name(proto) {}
36#endif
37
38
39#define PRIV_ENTRY __field(struct iwl_priv *, priv)
40#define PRIV_ASSIGN (__entry->priv = priv)
41
42#undef TRACE_SYSTEM
43#define TRACE_SYSTEM iwlwifi_legacy_io
44
45TRACE_EVENT(iwlwifi_legacy_dev_ioread32,
46 TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
47 TP_ARGS(priv, offs, val),
48 TP_STRUCT__entry(
49 PRIV_ENTRY
50 __field(u32, offs)
51 __field(u32, val)
52 ),
53 TP_fast_assign(
54 PRIV_ASSIGN;
55 __entry->offs = offs;
56 __entry->val = val;
57 ),
58 TP_printk("[%p] read io[%#x] = %#x", __entry->priv,
59 __entry->offs, __entry->val)
60);
61
62TRACE_EVENT(iwlwifi_legacy_dev_iowrite8,
63 TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val),
64 TP_ARGS(priv, offs, val),
65 TP_STRUCT__entry(
66 PRIV_ENTRY
67 __field(u32, offs)
68 __field(u8, val)
69 ),
70 TP_fast_assign(
71 PRIV_ASSIGN;
72 __entry->offs = offs;
73 __entry->val = val;
74 ),
75 TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
76 __entry->offs, __entry->val)
77);
78
79TRACE_EVENT(iwlwifi_legacy_dev_iowrite32,
80 TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
81 TP_ARGS(priv, offs, val),
82 TP_STRUCT__entry(
83 PRIV_ENTRY
84 __field(u32, offs)
85 __field(u32, val)
86 ),
87 TP_fast_assign(
88 PRIV_ASSIGN;
89 __entry->offs = offs;
90 __entry->val = val;
91 ),
92 TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
93 __entry->offs, __entry->val)
94);
95
96#undef TRACE_SYSTEM
97#define TRACE_SYSTEM iwlwifi_legacy_ucode
98
99TRACE_EVENT(iwlwifi_legacy_dev_ucode_cont_event,
100 TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
101 TP_ARGS(priv, time, data, ev),
102 TP_STRUCT__entry(
103 PRIV_ENTRY
104
105 __field(u32, time)
106 __field(u32, data)
107 __field(u32, ev)
108 ),
109 TP_fast_assign(
110 PRIV_ASSIGN;
111 __entry->time = time;
112 __entry->data = data;
113 __entry->ev = ev;
114 ),
115 TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
116 __entry->priv, __entry->time, __entry->data, __entry->ev)
117);
118
119TRACE_EVENT(iwlwifi_legacy_dev_ucode_wrap_event,
120 TP_PROTO(struct iwl_priv *priv, u32 wraps, u32 n_entry, u32 p_entry),
121 TP_ARGS(priv, wraps, n_entry, p_entry),
122 TP_STRUCT__entry(
123 PRIV_ENTRY
124
125 __field(u32, wraps)
126 __field(u32, n_entry)
127 __field(u32, p_entry)
128 ),
129 TP_fast_assign(
130 PRIV_ASSIGN;
131 __entry->wraps = wraps;
132 __entry->n_entry = n_entry;
133 __entry->p_entry = p_entry;
134 ),
135 TP_printk("[%p] wraps=#%02d n=0x%X p=0x%X",
136 __entry->priv, __entry->wraps, __entry->n_entry,
137 __entry->p_entry)
138);
139
140#undef TRACE_SYSTEM
141#define TRACE_SYSTEM iwlwifi
142
143TRACE_EVENT(iwlwifi_legacy_dev_hcmd,
144 TP_PROTO(struct iwl_priv *priv, void *hcmd, size_t len, u32 flags),
145 TP_ARGS(priv, hcmd, len, flags),
146 TP_STRUCT__entry(
147 PRIV_ENTRY
148 __dynamic_array(u8, hcmd, len)
149 __field(u32, flags)
150 ),
151 TP_fast_assign(
152 PRIV_ASSIGN;
153 memcpy(__get_dynamic_array(hcmd), hcmd, len);
154 __entry->flags = flags;
155 ),
156 TP_printk("[%p] hcmd %#.2x (%ssync)",
157 __entry->priv, ((u8 *)__get_dynamic_array(hcmd))[0],
158 __entry->flags & CMD_ASYNC ? "a" : "")
159);
160
161TRACE_EVENT(iwlwifi_legacy_dev_rx,
162 TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len),
163 TP_ARGS(priv, rxbuf, len),
164 TP_STRUCT__entry(
165 PRIV_ENTRY
166 __dynamic_array(u8, rxbuf, len)
167 ),
168 TP_fast_assign(
169 PRIV_ASSIGN;
170 memcpy(__get_dynamic_array(rxbuf), rxbuf, len);
171 ),
172 TP_printk("[%p] RX cmd %#.2x",
173 __entry->priv, ((u8 *)__get_dynamic_array(rxbuf))[4])
174);
175
176TRACE_EVENT(iwlwifi_legacy_dev_tx,
177 TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen,
178 void *buf0, size_t buf0_len,
179 void *buf1, size_t buf1_len),
180 TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
181 TP_STRUCT__entry(
182 PRIV_ENTRY
183
184 __field(size_t, framelen)
185 __dynamic_array(u8, tfd, tfdlen)
186
187 /*
188 * Do not insert between or below these items,
189 * we want to keep the frame together (except
190 * for the possible padding).
191 */
192 __dynamic_array(u8, buf0, buf0_len)
193 __dynamic_array(u8, buf1, buf1_len)
194 ),
195 TP_fast_assign(
196 PRIV_ASSIGN;
197 __entry->framelen = buf0_len + buf1_len;
198 memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
199 memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
200 memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
201 ),
202 TP_printk("[%p] TX %.2x (%zu bytes)",
203 __entry->priv,
204 ((u8 *)__get_dynamic_array(buf0))[0],
205 __entry->framelen)
206);
207
208TRACE_EVENT(iwlwifi_legacy_dev_ucode_error,
209 TP_PROTO(struct iwl_priv *priv, u32 desc, u32 time,
210 u32 data1, u32 data2, u32 line, u32 blink1,
211 u32 blink2, u32 ilink1, u32 ilink2),
212 TP_ARGS(priv, desc, time, data1, data2, line,
213 blink1, blink2, ilink1, ilink2),
214 TP_STRUCT__entry(
215 PRIV_ENTRY
216 __field(u32, desc)
217 __field(u32, time)
218 __field(u32, data1)
219 __field(u32, data2)
220 __field(u32, line)
221 __field(u32, blink1)
222 __field(u32, blink2)
223 __field(u32, ilink1)
224 __field(u32, ilink2)
225 ),
226 TP_fast_assign(
227 PRIV_ASSIGN;
228 __entry->desc = desc;
229 __entry->time = time;
230 __entry->data1 = data1;
231 __entry->data2 = data2;
232 __entry->line = line;
233 __entry->blink1 = blink1;
234 __entry->blink2 = blink2;
235 __entry->ilink1 = ilink1;
236 __entry->ilink2 = ilink2;
237 ),
238 TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, "
239 "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X",
240 __entry->priv, __entry->desc, __entry->time, __entry->data1,
241 __entry->data2, __entry->line, __entry->blink1,
242 __entry->blink2, __entry->ilink1, __entry->ilink2)
243);
244
245TRACE_EVENT(iwlwifi_legacy_dev_ucode_event,
246 TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
247 TP_ARGS(priv, time, data, ev),
248 TP_STRUCT__entry(
249 PRIV_ENTRY
250
251 __field(u32, time)
252 __field(u32, data)
253 __field(u32, ev)
254 ),
255 TP_fast_assign(
256 PRIV_ASSIGN;
257 __entry->time = time;
258 __entry->data = data;
259 __entry->ev = ev;
260 ),
261 TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
262 __entry->priv, __entry->time, __entry->data, __entry->ev)
263);
264#endif /* __IWLWIFI_DEVICE_TRACE */
265
266#undef TRACE_INCLUDE_PATH
267#define TRACE_INCLUDE_PATH .
268#undef TRACE_INCLUDE_FILE
269#define TRACE_INCLUDE_FILE iwl-devtrace
270#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
new file mode 100644
index 000000000000..04c5648027df
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
@@ -0,0 +1,561 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63
64#include <linux/kernel.h>
65#include <linux/module.h>
66#include <linux/slab.h>
67#include <linux/init.h>
68
69#include <net/mac80211.h>
70
71#include "iwl-commands.h"
72#include "iwl-dev.h"
73#include "iwl-core.h"
74#include "iwl-debug.h"
75#include "iwl-eeprom.h"
76#include "iwl-io.h"
77
78/************************** EEPROM BANDS ****************************
79 *
80 * The iwlegacy_eeprom_band definitions below provide the mapping from the
81 * EEPROM contents to the specific channel number supported for each
82 * band.
83 *
84 * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
85 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
86 * The specific geography and calibration information for that channel
87 * is contained in the eeprom map itself.
88 *
89 * During init, we copy the eeprom information and channel map
90 * information into priv->channel_info_24/52 and priv->channel_map_24/52
91 *
92 * channel_map_24/52 provides the index in the channel_info array for a
93 * given channel. We have to have two separate maps as there is channel
94 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
95 * band_2
96 *
97 * A value of 0xff stored in the channel_map indicates that the channel
98 * is not supported by the hardware at all.
99 *
100 * A value of 0xfe in the channel_map indicates that the channel is not
101 * valid for Tx with the current hardware. This means that
102 * while the system can tune and receive on a given channel, it may not
103 * be able to associate or transmit any frames on that
104 * channel. There is no corresponding channel information for that
105 * entry.
106 *
107 *********************************************************************/
108
109/* 2.4 GHz */
110const u8 iwlegacy_eeprom_band_1[14] = {
111 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
112};
113
114/* 5.2 GHz bands */
115static const u8 iwlegacy_eeprom_band_2[] = { /* 4915-5080MHz */
116 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
117};
118
119static const u8 iwlegacy_eeprom_band_3[] = { /* 5170-5320MHz */
120 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
121};
122
123static const u8 iwlegacy_eeprom_band_4[] = { /* 5500-5700MHz */
124 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
125};
126
127static const u8 iwlegacy_eeprom_band_5[] = { /* 5725-5825MHz */
128 145, 149, 153, 157, 161, 165
129};
130
131static const u8 iwlegacy_eeprom_band_6[] = { /* 2.4 ht40 channel */
132 1, 2, 3, 4, 5, 6, 7
133};
134
135static const u8 iwlegacy_eeprom_band_7[] = { /* 5.2 ht40 channel */
136 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
137};
138
139/******************************************************************************
140 *
141 * EEPROM related functions
142 *
143******************************************************************************/
144
145static int iwl_legacy_eeprom_verify_signature(struct iwl_priv *priv)
146{
147 u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
148 int ret = 0;
149
150 IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
151 switch (gp) {
152 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
153 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
154 break;
155 default:
156 IWL_ERR(priv, "bad EEPROM signature,"
157 "EEPROM_GP=0x%08x\n", gp);
158 ret = -ENOENT;
159 break;
160 }
161 return ret;
162}
163
164const u8
165*iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
166{
167 BUG_ON(offset >= priv->cfg->base_params->eeprom_size);
168 return &priv->eeprom[offset];
169}
170EXPORT_SYMBOL(iwl_legacy_eeprom_query_addr);
171
172u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset)
173{
174 if (!priv->eeprom)
175 return 0;
176 return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
177}
178EXPORT_SYMBOL(iwl_legacy_eeprom_query16);
179
180/**
181 * iwl_legacy_eeprom_init - read EEPROM contents
182 *
183 * Load the EEPROM contents from adapter into priv->eeprom
184 *
185 * NOTE: This routine uses the non-debug IO access functions.
186 */
187int iwl_legacy_eeprom_init(struct iwl_priv *priv)
188{
189 __le16 *e;
190 u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
191 int sz;
192 int ret;
193 u16 addr;
194
195 /* allocate eeprom */
196 sz = priv->cfg->base_params->eeprom_size;
197 IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
198 priv->eeprom = kzalloc(sz, GFP_KERNEL);
199 if (!priv->eeprom) {
200 ret = -ENOMEM;
201 goto alloc_err;
202 }
203 e = (__le16 *)priv->eeprom;
204
205 priv->cfg->ops->lib->apm_ops.init(priv);
206
207 ret = iwl_legacy_eeprom_verify_signature(priv);
208 if (ret < 0) {
209 IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
210 ret = -ENOENT;
211 goto err;
212 }
213
214 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
215 ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv);
216 if (ret < 0) {
217 IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
218 ret = -ENOENT;
219 goto err;
220 }
221
222 /* eeprom is an array of 16bit values */
223 for (addr = 0; addr < sz; addr += sizeof(u16)) {
224 u32 r;
225
226 _iwl_legacy_write32(priv, CSR_EEPROM_REG,
227 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
228
229 ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
230 CSR_EEPROM_REG_READ_VALID_MSK,
231 CSR_EEPROM_REG_READ_VALID_MSK,
232 IWL_EEPROM_ACCESS_TIMEOUT);
233 if (ret < 0) {
234 IWL_ERR(priv, "Time out reading EEPROM[%d]\n",
235 addr);
236 goto done;
237 }
238 r = _iwl_legacy_read_direct32(priv, CSR_EEPROM_REG);
239 e[addr / 2] = cpu_to_le16(r >> 16);
240 }
241
242 IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
243 "EEPROM",
244 iwl_legacy_eeprom_query16(priv, EEPROM_VERSION));
245
246 ret = 0;
247done:
248 priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv);
249
250err:
251 if (ret)
252 iwl_legacy_eeprom_free(priv);
253 /* Reset chip to save power until we load uCode during "up". */
254 iwl_legacy_apm_stop(priv);
255alloc_err:
256 return ret;
257}
258EXPORT_SYMBOL(iwl_legacy_eeprom_init);
259
260void iwl_legacy_eeprom_free(struct iwl_priv *priv)
261{
262 kfree(priv->eeprom);
263 priv->eeprom = NULL;
264}
265EXPORT_SYMBOL(iwl_legacy_eeprom_free);
266
267static void iwl_legacy_init_band_reference(const struct iwl_priv *priv,
268 int eep_band, int *eeprom_ch_count,
269 const struct iwl_eeprom_channel **eeprom_ch_info,
270 const u8 **eeprom_ch_index)
271{
272 u32 offset = priv->cfg->ops->lib->
273 eeprom_ops.regulatory_bands[eep_band - 1];
274 switch (eep_band) {
275 case 1: /* 2.4GHz band */
276 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_1);
277 *eeprom_ch_info = (struct iwl_eeprom_channel *)
278 iwl_legacy_eeprom_query_addr(priv, offset);
279 *eeprom_ch_index = iwlegacy_eeprom_band_1;
280 break;
281 case 2: /* 4.9GHz band */
282 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_2);
283 *eeprom_ch_info = (struct iwl_eeprom_channel *)
284 iwl_legacy_eeprom_query_addr(priv, offset);
285 *eeprom_ch_index = iwlegacy_eeprom_band_2;
286 break;
287 case 3: /* 5.2GHz band */
288 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_3);
289 *eeprom_ch_info = (struct iwl_eeprom_channel *)
290 iwl_legacy_eeprom_query_addr(priv, offset);
291 *eeprom_ch_index = iwlegacy_eeprom_band_3;
292 break;
293 case 4: /* 5.5GHz band */
294 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_4);
295 *eeprom_ch_info = (struct iwl_eeprom_channel *)
296 iwl_legacy_eeprom_query_addr(priv, offset);
297 *eeprom_ch_index = iwlegacy_eeprom_band_4;
298 break;
299 case 5: /* 5.7GHz band */
300 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_5);
301 *eeprom_ch_info = (struct iwl_eeprom_channel *)
302 iwl_legacy_eeprom_query_addr(priv, offset);
303 *eeprom_ch_index = iwlegacy_eeprom_band_5;
304 break;
305 case 6: /* 2.4GHz ht40 channels */
306 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_6);
307 *eeprom_ch_info = (struct iwl_eeprom_channel *)
308 iwl_legacy_eeprom_query_addr(priv, offset);
309 *eeprom_ch_index = iwlegacy_eeprom_band_6;
310 break;
311 case 7: /* 5 GHz ht40 channels */
312 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_7);
313 *eeprom_ch_info = (struct iwl_eeprom_channel *)
314 iwl_legacy_eeprom_query_addr(priv, offset);
315 *eeprom_ch_index = iwlegacy_eeprom_band_7;
316 break;
317 default:
318 BUG();
319 return;
320 }
321}
322
323#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
324 ? # x " " : "")
325/**
326 * iwl_legacy_mod_ht40_chan_info - Copy ht40 channel info into driver's priv.
327 *
328 * Does not set up a command, or touch hardware.
329 */
330static int iwl_legacy_mod_ht40_chan_info(struct iwl_priv *priv,
331 enum ieee80211_band band, u16 channel,
332 const struct iwl_eeprom_channel *eeprom_ch,
333 u8 clear_ht40_extension_channel)
334{
335 struct iwl_channel_info *ch_info;
336
337 ch_info = (struct iwl_channel_info *)
338 iwl_legacy_get_channel_info(priv, band, channel);
339
340 if (!iwl_legacy_is_channel_valid(ch_info))
341 return -1;
342
343 IWL_DEBUG_EEPROM(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
344 " Ad-Hoc %ssupported\n",
345 ch_info->channel,
346 iwl_legacy_is_channel_a_band(ch_info) ?
347 "5.2" : "2.4",
348 CHECK_AND_PRINT(IBSS),
349 CHECK_AND_PRINT(ACTIVE),
350 CHECK_AND_PRINT(RADAR),
351 CHECK_AND_PRINT(WIDE),
352 CHECK_AND_PRINT(DFS),
353 eeprom_ch->flags,
354 eeprom_ch->max_power_avg,
355 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
356 && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
357 "" : "not ");
358
359 ch_info->ht40_eeprom = *eeprom_ch;
360 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
361 ch_info->ht40_flags = eeprom_ch->flags;
362 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
363 ch_info->ht40_extension_channel &=
364 ~clear_ht40_extension_channel;
365
366 return 0;
367}
368
369#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
370 ? # x " " : "")
371
372/**
373 * iwl_legacy_init_channel_map - Set up driver's info for all possible channels
374 */
375int iwl_legacy_init_channel_map(struct iwl_priv *priv)
376{
377 int eeprom_ch_count = 0;
378 const u8 *eeprom_ch_index = NULL;
379 const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
380 int band, ch;
381 struct iwl_channel_info *ch_info;
382
383 if (priv->channel_count) {
384 IWL_DEBUG_EEPROM(priv, "Channel map already initialized.\n");
385 return 0;
386 }
387
388 IWL_DEBUG_EEPROM(priv, "Initializing regulatory info from EEPROM\n");
389
390 priv->channel_count =
391 ARRAY_SIZE(iwlegacy_eeprom_band_1) +
392 ARRAY_SIZE(iwlegacy_eeprom_band_2) +
393 ARRAY_SIZE(iwlegacy_eeprom_band_3) +
394 ARRAY_SIZE(iwlegacy_eeprom_band_4) +
395 ARRAY_SIZE(iwlegacy_eeprom_band_5);
396
397 IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n",
398 priv->channel_count);
399
400 priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
401 priv->channel_count, GFP_KERNEL);
402 if (!priv->channel_info) {
403 IWL_ERR(priv, "Could not allocate channel_info\n");
404 priv->channel_count = 0;
405 return -ENOMEM;
406 }
407
408 ch_info = priv->channel_info;
409
410 /* Loop through the 5 EEPROM bands adding them in order to the
411 * channel map we maintain (that contains additional information than
412 * what just in the EEPROM) */
413 for (band = 1; band <= 5; band++) {
414
415 iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
416 &eeprom_ch_info, &eeprom_ch_index);
417
418 /* Loop through each band adding each of the channels */
419 for (ch = 0; ch < eeprom_ch_count; ch++) {
420 ch_info->channel = eeprom_ch_index[ch];
421 ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
422 IEEE80211_BAND_5GHZ;
423
424 /* permanently store EEPROM's channel regulatory flags
425 * and max power in channel info database. */
426 ch_info->eeprom = eeprom_ch_info[ch];
427
428 /* Copy the run-time flags so they are there even on
429 * invalid channels */
430 ch_info->flags = eeprom_ch_info[ch].flags;
431 /* First write that ht40 is not enabled, and then enable
432 * one by one */
433 ch_info->ht40_extension_channel =
434 IEEE80211_CHAN_NO_HT40;
435
436 if (!(iwl_legacy_is_channel_valid(ch_info))) {
437 IWL_DEBUG_EEPROM(priv,
438 "Ch. %d Flags %x [%sGHz] - "
439 "No traffic\n",
440 ch_info->channel,
441 ch_info->flags,
442 iwl_legacy_is_channel_a_band(ch_info) ?
443 "5.2" : "2.4");
444 ch_info++;
445 continue;
446 }
447
448 /* Initialize regulatory-based run-time data */
449 ch_info->max_power_avg = ch_info->curr_txpow =
450 eeprom_ch_info[ch].max_power_avg;
451 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
452 ch_info->min_power = 0;
453
454 IWL_DEBUG_EEPROM(priv, "Ch. %d [%sGHz] "
455 "%s%s%s%s%s%s(0x%02x %ddBm):"
456 " Ad-Hoc %ssupported\n",
457 ch_info->channel,
458 iwl_legacy_is_channel_a_band(ch_info) ?
459 "5.2" : "2.4",
460 CHECK_AND_PRINT_I(VALID),
461 CHECK_AND_PRINT_I(IBSS),
462 CHECK_AND_PRINT_I(ACTIVE),
463 CHECK_AND_PRINT_I(RADAR),
464 CHECK_AND_PRINT_I(WIDE),
465 CHECK_AND_PRINT_I(DFS),
466 eeprom_ch_info[ch].flags,
467 eeprom_ch_info[ch].max_power_avg,
468 ((eeprom_ch_info[ch].
469 flags & EEPROM_CHANNEL_IBSS)
470 && !(eeprom_ch_info[ch].
471 flags & EEPROM_CHANNEL_RADAR))
472 ? "" : "not ");
473
474 /* Set the tx_power_user_lmt to the highest power
475 * supported by any channel */
476 if (eeprom_ch_info[ch].max_power_avg >
477 priv->tx_power_user_lmt)
478 priv->tx_power_user_lmt =
479 eeprom_ch_info[ch].max_power_avg;
480
481 ch_info++;
482 }
483 }
484
485 /* Check if we do have HT40 channels */
486 if (priv->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
487 EEPROM_REGULATORY_BAND_NO_HT40 &&
488 priv->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
489 EEPROM_REGULATORY_BAND_NO_HT40)
490 return 0;
491
492 /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
493 for (band = 6; band <= 7; band++) {
494 enum ieee80211_band ieeeband;
495
496 iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
497 &eeprom_ch_info, &eeprom_ch_index);
498
499 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
500 ieeeband =
501 (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
502
503 /* Loop through each band adding each of the channels */
504 for (ch = 0; ch < eeprom_ch_count; ch++) {
505 /* Set up driver's info for lower half */
506 iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
507 eeprom_ch_index[ch],
508 &eeprom_ch_info[ch],
509 IEEE80211_CHAN_NO_HT40PLUS);
510
511 /* Set up driver's info for upper half */
512 iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
513 eeprom_ch_index[ch] + 4,
514 &eeprom_ch_info[ch],
515 IEEE80211_CHAN_NO_HT40MINUS);
516 }
517 }
518
519 return 0;
520}
521EXPORT_SYMBOL(iwl_legacy_init_channel_map);
522
523/*
524 * iwl_legacy_free_channel_map - undo allocations in iwl_legacy_init_channel_map
525 */
526void iwl_legacy_free_channel_map(struct iwl_priv *priv)
527{
528 kfree(priv->channel_info);
529 priv->channel_count = 0;
530}
531EXPORT_SYMBOL(iwl_legacy_free_channel_map);
532
533/**
534 * iwl_legacy_get_channel_info - Find driver's private channel info
535 *
536 * Based on band and channel number.
537 */
538const struct
539iwl_channel_info *iwl_legacy_get_channel_info(const struct iwl_priv *priv,
540 enum ieee80211_band band, u16 channel)
541{
542 int i;
543
544 switch (band) {
545 case IEEE80211_BAND_5GHZ:
546 for (i = 14; i < priv->channel_count; i++) {
547 if (priv->channel_info[i].channel == channel)
548 return &priv->channel_info[i];
549 }
550 break;
551 case IEEE80211_BAND_2GHZ:
552 if (channel >= 1 && channel <= 14)
553 return &priv->channel_info[channel - 1];
554 break;
555 default:
556 BUG();
557 }
558
559 return NULL;
560}
561EXPORT_SYMBOL(iwl_legacy_get_channel_info);
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.h b/drivers/net/wireless/iwlegacy/iwl-eeprom.h
new file mode 100644
index 000000000000..c59c81002022
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-eeprom.h
@@ -0,0 +1,344 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_legacy_eeprom_h__
64#define __iwl_legacy_eeprom_h__
65
66#include <net/mac80211.h>
67
68struct iwl_priv;
69
70/*
71 * EEPROM access time values:
72 *
73 * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
74 * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
75 * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
76 * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
77 */
78#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
79
80#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
81#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
82
83
84/*
85 * Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags.
86 *
87 * IBSS and/or AP operation is allowed *only* on those channels with
88 * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
89 * RADAR detection is not supported by the 4965 driver, but is a
90 * requirement for establishing a new network for legal operation on channels
91 * requiring RADAR detection or restricting ACTIVE scanning.
92 *
93 * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
94 * It only indicates that 20 MHz channel use is supported; HT40 channel
95 * usage is indicated by a separate set of regulatory flags for each
96 * HT40 channel pair.
97 *
98 * NOTE: Using a channel inappropriately will result in a uCode error!
99 */
100#define IWL_NUM_TX_CALIB_GROUPS 5
101enum {
102 EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
103 EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
104 /* Bit 2 Reserved */
105 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
106 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
107 EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
108 /* Bit 6 Reserved (was Narrow Channel) */
109 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
110};
111
112/* SKU Capabilities */
113/* 3945 only */
114#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
115#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
116
117/* *regulatory* channel data format in eeprom, one for each channel.
118 * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
119struct iwl_eeprom_channel {
120 u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
121 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
122} __packed;
123
124/* 3945 Specific */
125#define EEPROM_3945_EEPROM_VERSION (0x2f)
126
127/* 4965 has two radio transmitters (and 3 radio receivers) */
128#define EEPROM_TX_POWER_TX_CHAINS (2)
129
130/* 4965 has room for up to 8 sets of txpower calibration data */
131#define EEPROM_TX_POWER_BANDS (8)
132
133/* 4965 factory calibration measures txpower gain settings for
134 * each of 3 target output levels */
135#define EEPROM_TX_POWER_MEASUREMENTS (3)
136
137/* 4965 Specific */
138/* 4965 driver does not work with txpower calibration version < 5 */
139#define EEPROM_4965_TX_POWER_VERSION (5)
140#define EEPROM_4965_EEPROM_VERSION (0x2f)
141#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */
142#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */
143#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */
144#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */
145
146/* 2.4 GHz */
147extern const u8 iwlegacy_eeprom_band_1[14];
148
149/*
150 * factory calibration data for one txpower level, on one channel,
151 * measured on one of the 2 tx chains (radio transmitter and associated
152 * antenna). EEPROM contains:
153 *
154 * 1) Temperature (degrees Celsius) of device when measurement was made.
155 *
156 * 2) Gain table index used to achieve the target measurement power.
157 * This refers to the "well-known" gain tables (see iwl-4965-hw.h).
158 *
159 * 3) Actual measured output power, in half-dBm ("34" = 17 dBm).
160 *
161 * 4) RF power amplifier detector level measurement (not used).
162 */
163struct iwl_eeprom_calib_measure {
164 u8 temperature; /* Device temperature (Celsius) */
165 u8 gain_idx; /* Index into gain table */
166 u8 actual_pow; /* Measured RF output power, half-dBm */
167 s8 pa_det; /* Power amp detector level (not used) */
168} __packed;
169
170
171/*
172 * measurement set for one channel. EEPROM contains:
173 *
174 * 1) Channel number measured
175 *
176 * 2) Measurements for each of 3 power levels for each of 2 radio transmitters
177 * (a.k.a. "tx chains") (6 measurements altogether)
178 */
179struct iwl_eeprom_calib_ch_info {
180 u8 ch_num;
181 struct iwl_eeprom_calib_measure
182 measurements[EEPROM_TX_POWER_TX_CHAINS]
183 [EEPROM_TX_POWER_MEASUREMENTS];
184} __packed;
185
186/*
187 * txpower subband info.
188 *
189 * For each frequency subband, EEPROM contains the following:
190 *
191 * 1) First and last channels within range of the subband. "0" values
192 * indicate that this sample set is not being used.
193 *
194 * 2) Sample measurement sets for 2 channels close to the range endpoints.
195 */
196struct iwl_eeprom_calib_subband_info {
197 u8 ch_from; /* channel number of lowest channel in subband */
198 u8 ch_to; /* channel number of highest channel in subband */
199 struct iwl_eeprom_calib_ch_info ch1;
200 struct iwl_eeprom_calib_ch_info ch2;
201} __packed;
202
203
204/*
205 * txpower calibration info. EEPROM contains:
206 *
207 * 1) Factory-measured saturation power levels (maximum levels at which
208 * tx power amplifier can output a signal without too much distortion).
209 * There is one level for 2.4 GHz band and one for 5 GHz band. These
210 * values apply to all channels within each of the bands.
211 *
212 * 2) Factory-measured power supply voltage level. This is assumed to be
213 * constant (i.e. same value applies to all channels/bands) while the
214 * factory measurements are being made.
215 *
216 * 3) Up to 8 sets of factory-measured txpower calibration values.
217 * These are for different frequency ranges, since txpower gain
218 * characteristics of the analog radio circuitry vary with frequency.
219 *
220 * Not all sets need to be filled with data;
221 * struct iwl_eeprom_calib_subband_info contains range of channels
222 * (0 if unused) for each set of data.
223 */
224struct iwl_eeprom_calib_info {
225 u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
226 u8 saturation_power52; /* half-dBm */
227 __le16 voltage; /* signed */
228 struct iwl_eeprom_calib_subband_info
229 band_info[EEPROM_TX_POWER_BANDS];
230} __packed;
231
232
233/* General */
234#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
235#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
236#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
237#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
238#define EEPROM_VERSION (2*0x44) /* 2 bytes */
239#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
240#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
241#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
242#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
243#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
244
245/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
246#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
247#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
248#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
249#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
250#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
251#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
252
253#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0
254#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1
255
256/*
257 * Per-channel regulatory data.
258 *
259 * Each channel that *might* be supported by iwl has a fixed location
260 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
261 * txpower (MSB).
262 *
263 * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz)
264 * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
265 *
266 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
267 */
268#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */
269#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */
270#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */
271
272/*
273 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
274 * 5.0 GHz channels 7, 8, 11, 12, 16
275 * (4915-5080MHz) (none of these is ever supported)
276 */
277#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */
278#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */
279
280/*
281 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
282 * (5170-5320MHz)
283 */
284#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */
285#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */
286
287/*
288 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
289 * (5500-5700MHz)
290 */
291#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */
292#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */
293
294/*
295 * 5.7 GHz channels 145, 149, 153, 157, 161, 165
296 * (5725-5825MHz)
297 */
298#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */
299#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */
300
301/*
302 * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
303 *
304 * The channel listed is the center of the lower 20 MHz half of the channel.
305 * The overall center frequency is actually 2 channels (10 MHz) above that,
306 * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
307 * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
308 * and the overall HT40 channel width centers on channel 3.
309 *
310 * NOTE: The RXON command uses 20 MHz channel numbers to specify the
311 * control channel to which to tune. RXON also specifies whether the
312 * control channel is the upper or lower half of a HT40 channel.
313 *
314 * NOTE: 4965 does not support HT40 channels on 2.4 GHz.
315 */
316#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */
317
318/*
319 * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
320 * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
321 */
322#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */
323
324#define EEPROM_REGULATORY_BAND_NO_HT40 (0)
325
326struct iwl_eeprom_ops {
327 const u32 regulatory_bands[7];
328 int (*acquire_semaphore) (struct iwl_priv *priv);
329 void (*release_semaphore) (struct iwl_priv *priv);
330};
331
332
333int iwl_legacy_eeprom_init(struct iwl_priv *priv);
334void iwl_legacy_eeprom_free(struct iwl_priv *priv);
335const u8 *iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv,
336 size_t offset);
337u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset);
338int iwl_legacy_init_channel_map(struct iwl_priv *priv);
339void iwl_legacy_free_channel_map(struct iwl_priv *priv);
340const struct iwl_channel_info *iwl_legacy_get_channel_info(
341 const struct iwl_priv *priv,
342 enum ieee80211_band band, u16 channel);
343
344#endif /* __iwl_legacy_eeprom_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-fh.h b/drivers/net/wireless/iwlegacy/iwl-fh.h
new file mode 100644
index 000000000000..4e20c7e5c883
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-fh.h
@@ -0,0 +1,513 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_legacy_fh_h__
64#define __iwl_legacy_fh_h__
65
66/****************************/
67/* Flow Handler Definitions */
68/****************************/
69
70/**
71 * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
72 * Addresses are offsets from device's PCI hardware base address.
73 */
74#define FH_MEM_LOWER_BOUND (0x1000)
75#define FH_MEM_UPPER_BOUND (0x2000)
76
77/**
78 * Keep-Warm (KW) buffer base address.
79 *
80 * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
81 * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
82 * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host
83 * from going into a power-savings mode that would cause higher DRAM latency,
84 * and possible data over/under-runs, before all Tx/Rx is complete.
85 *
86 * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
87 * of the buffer, which must be 4K aligned. Once this is set up, the 4965
88 * automatically invokes keep-warm accesses when normal accesses might not
89 * be sufficient to maintain fast DRAM response.
90 *
91 * Bit fields:
92 * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
93 */
94#define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C)
95
96
97/**
98 * TFD Circular Buffers Base (CBBC) addresses
99 *
100 * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
101 * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
102 * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04
103 * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
104 * aligned (address bits 0-7 must be 0).
105 *
106 * Bit fields in each pointer register:
107 * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
108 */
109#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
110#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
111
112/* Find TFD CB base pointer for given queue (range 0-15). */
113#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
114
115
116/**
117 * Rx SRAM Control and Status Registers (RSCSR)
118 *
119 * These registers provide handshake between driver and 4965 for the Rx queue
120 * (this queue handles *all* command responses, notifications, Rx data, etc.
121 * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx
122 * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
123 * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
124 * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
125 * mapping between RBDs and RBs.
126 *
127 * Driver must allocate host DRAM memory for the following, and set the
128 * physical address of each into 4965 registers:
129 *
130 * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
131 * entries (although any power of 2, up to 4096, is selectable by driver).
132 * Each entry (1 dword) points to a receive buffer (RB) of consistent size
133 * (typically 4K, although 8K or 16K are also selectable by driver).
134 * Driver sets up RB size and number of RBDs in the CB via Rx config
135 * register FH_MEM_RCSR_CHNL0_CONFIG_REG.
136 *
137 * Bit fields within one RBD:
138 * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
139 *
140 * Driver sets physical address [35:8] of base of RBD circular buffer
141 * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
142 *
143 * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
144 * (RBs) have been filled, via a "write pointer", actually the index of
145 * the RB's corresponding RBD within the circular buffer. Driver sets
146 * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
147 *
148 * Bit fields in lower dword of Rx status buffer (upper dword not used
149 * by driver; see struct iwl4965_shared, val0):
150 * 31-12: Not used by driver
151 * 11- 0: Index of last filled Rx buffer descriptor
152 * (4965 writes, driver reads this value)
153 *
154 * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
155 * enter pointers to these RBs into contiguous RBD circular buffer entries,
156 * and update the 4965's "write" index register,
157 * FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
158 *
159 * This "write" index corresponds to the *next* RBD that the driver will make
160 * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
161 * the circular buffer. This value should initially be 0 (before preparing any
162 * RBs), should be 8 after preparing the first 8 RBs (for example), and must
163 * wrap back to 0 at the end of the circular buffer (but don't wrap before
164 * "read" index has advanced past 1! See below).
165 * NOTE: 4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
166 *
167 * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
168 * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
169 * to tell the driver the index of the latest filled RBD. The driver must
170 * read this "read" index from DRAM after receiving an Rx interrupt from 4965.
171 *
172 * The driver must also internally keep track of a third index, which is the
173 * next RBD to process. When receiving an Rx interrupt, driver should process
174 * all filled but unprocessed RBs up to, but not including, the RB
175 * corresponding to the "read" index. For example, if "read" index becomes "1",
176 * driver may process the RB pointed to by RBD 0. Depending on volume of
177 * traffic, there may be many RBs to process.
178 *
179 * If read index == write index, 4965 thinks there is no room to put new data.
180 * Due to this, the maximum number of filled RBs is 255, instead of 256. To
181 * be safe, make sure that there is a gap of at least 2 RBDs between "write"
182 * and "read" indexes; that is, make sure that there are no more than 254
183 * buffers waiting to be filled.
184 */
185#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0)
186#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
187#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND)
188
189/**
190 * Physical base address of 8-byte Rx Status buffer.
191 * Bit fields:
192 * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
193 */
194#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0)
195
196/**
197 * Physical base address of Rx Buffer Descriptor Circular Buffer.
198 * Bit fields:
199 * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
200 */
201#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004)
202
203/**
204 * Rx write pointer (index, really!).
205 * Bit fields:
206 * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
207 * NOTE: For 256-entry circular buffer, use only bits [7:0].
208 */
209#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008)
210#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
211
212
213/**
214 * Rx Config/Status Registers (RCSR)
215 * Rx Config Reg for channel 0 (only channel used)
216 *
217 * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for
218 * normal operation (see bit fields).
219 *
220 * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
221 * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for
222 * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
223 *
224 * Bit fields:
225 * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
226 * '10' operate normally
227 * 29-24: reserved
228 * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
229 * min "5" for 32 RBDs, max "12" for 4096 RBDs.
230 * 19-18: reserved
231 * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
232 * '10' 12K, '11' 16K.
233 * 15-14: reserved
234 * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
235 * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
236 * typical value 0x10 (about 1/2 msec)
237 * 3- 0: reserved
238 */
239#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
240#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0)
241#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND)
242
243#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0)
244
245#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
246#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
247#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
248#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */
249#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
250#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/
251
252#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20)
253#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4)
254#define RX_RB_TIMEOUT (0x10)
255
256#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
257#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
258#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
259
260#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
261#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
262#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
263#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
264
265#define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004)
266#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
267#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
268
269#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */
270
271/**
272 * Rx Shared Status Registers (RSSR)
273 *
274 * After stopping Rx DMA channel (writing 0 to
275 * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
276 * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
277 *
278 * Bit fields:
279 * 24: 1 = Channel 0 is idle
280 *
281 * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
282 * contain default values that should not be altered by the driver.
283 */
284#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40)
285#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
286
287#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND)
288#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004)
289#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
290 (FH_MEM_RSSR_LOWER_BOUND + 0x008)
291
292#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
293
294#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
295
296/* TFDB Area - TFDs buffer table */
297#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
298#define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900)
299#define FH_TFDIB_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x958)
300#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
301#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
302
303/**
304 * Transmit DMA Channel Control/Status Registers (TCSR)
305 *
306 * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
307 * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
308 * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
309 *
310 * To use a Tx DMA channel, driver must initialize its
311 * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
312 *
313 * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
314 * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
315 *
316 * All other bits should be 0.
317 *
318 * Bit fields:
319 * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
320 * '10' operate normally
321 * 29- 4: Reserved, set to "0"
322 * 3: Enable internal DMA requests (1, normal operation), disable (0)
323 * 2- 0: Reserved, set to "0"
324 */
325#define FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
326#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60)
327
328/* Find Control/Status reg for given Tx DMA/FIFO channel */
329#define FH49_TCSR_CHNL_NUM (7)
330#define FH50_TCSR_CHNL_NUM (8)
331
332/* TCSR: tx_config register values */
333#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
334 (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl))
335#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \
336 (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
337#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \
338 (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
339
340#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
341#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001)
342
343#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000)
344#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008)
345
346#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000)
347#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000)
348#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
349
350#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
351#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000)
352#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000)
353
354#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
355#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
356#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
357
358#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000)
359#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000)
360#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003)
361
362#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
363#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
364
365/**
366 * Tx Shared Status Registers (TSSR)
367 *
368 * After stopping Tx DMA channel (writing 0 to
369 * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
370 * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle
371 * (channel's buffers empty | no pending requests).
372 *
373 * Bit fields:
374 * 31-24: 1 = Channel buffers empty (channel 7:0)
375 * 23-16: 1 = No pending requests (channel 7:0)
376 */
377#define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0)
378#define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0)
379
380#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010)
381
382/**
383 * Bit fields for TSSR(Tx Shared Status & Control) error status register:
384 * 31: Indicates an address error when accessed to internal memory
385 * uCode/driver must write "1" in order to clear this flag
386 * 30: Indicates that Host did not send the expected number of dwords to FH
387 * uCode/driver must write "1" in order to clear this flag
388 * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
389 * command was received from the scheduler while the TRB was already full
390 * with previous command
391 * uCode/driver must write "1" in order to clear this flag
392 * 7-0: Each status bit indicates a channel's TxCredit error. When an error
393 * bit is set, it indicates that the FH has received a full indication
394 * from the RTC TxFIFO and the current value of the TxCredit counter was
395 * not equal to zero. This mean that the credit mechanism was not
396 * synchronized to the TxFIFO status
397 * uCode/driver must write "1" in order to clear this flag
398 */
399#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018)
400
401#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
402
403/* Tx service channels */
404#define FH_SRVC_CHNL (9)
405#define FH_SRVC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9C8)
406#define FH_SRVC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
407#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
408 (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
409
410#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98)
411/* Instruct FH to increment the retry count of a packet when
412 * it is brought from the memory to TX-FIFO
413 */
414#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
415
416#define RX_QUEUE_SIZE 256
417#define RX_QUEUE_MASK 255
418#define RX_QUEUE_SIZE_LOG 8
419
420/*
421 * RX related structures and functions
422 */
423#define RX_FREE_BUFFERS 64
424#define RX_LOW_WATERMARK 8
425
426/* Size of one Rx buffer in host DRAM */
427#define IWL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */
428#define IWL_RX_BUF_SIZE_4K (4 * 1024)
429#define IWL_RX_BUF_SIZE_8K (8 * 1024)
430
431/**
432 * struct iwl_rb_status - reseve buffer status
433 * host memory mapped FH registers
434 * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
435 * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
436 * @finished_rb_num [0:11] - Indicates the index of the current RB
437 * in which the last frame was written to
438 * @finished_fr_num [0:11] - Indicates the index of the RX Frame
439 * which was transfered
440 */
441struct iwl_rb_status {
442 __le16 closed_rb_num;
443 __le16 closed_fr_num;
444 __le16 finished_rb_num;
445 __le16 finished_fr_nam;
446 __le32 __unused; /* 3945 only */
447} __packed;
448
449
450#define TFD_QUEUE_SIZE_MAX (256)
451#define TFD_QUEUE_SIZE_BC_DUP (64)
452#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
453#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
454#define IWL_NUM_OF_TBS 20
455
456static inline u8 iwl_legacy_get_dma_hi_addr(dma_addr_t addr)
457{
458 return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
459}
460/**
461 * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
462 *
463 * This structure contains dma address and length of transmission address
464 *
465 * @lo: low [31:0] portion of the dma address of TX buffer
466 * every even is unaligned on 16 bit boundary
467 * @hi_n_len 0-3 [35:32] portion of dma
468 * 4-15 length of the tx buffer
469 */
470struct iwl_tfd_tb {
471 __le32 lo;
472 __le16 hi_n_len;
473} __packed;
474
475/**
476 * struct iwl_tfd
477 *
478 * Transmit Frame Descriptor (TFD)
479 *
480 * @ __reserved1[3] reserved
481 * @ num_tbs 0-4 number of active tbs
482 * 5 reserved
483 * 6-7 padding (not used)
484 * @ tbs[20] transmit frame buffer descriptors
485 * @ __pad padding
486 *
487 * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
488 * Both driver and device share these circular buffers, each of which must be
489 * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
490 *
491 * Driver must indicate the physical address of the base of each
492 * circular buffer via the FH_MEM_CBBC_QUEUE registers.
493 *
494 * Each TFD contains pointer/size information for up to 20 data buffers
495 * in host DRAM. These buffers collectively contain the (one) frame described
496 * by the TFD. Each buffer must be a single contiguous block of memory within
497 * itself, but buffers may be scattered in host DRAM. Each buffer has max size
498 * of (4K - 4). The concatenates all of a TFD's buffers into a single
499 * Tx frame, up to 8 KBytes in size.
500 *
501 * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
502 */
503struct iwl_tfd {
504 u8 __reserved1[3];
505 u8 num_tbs;
506 struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS];
507 __le32 __pad;
508} __packed;
509
510/* Keep Warm Size */
511#define IWL_KW_SIZE 0x1000 /* 4k */
512
513#endif /* !__iwl_legacy_fh_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
new file mode 100644
index 000000000000..9d721cbda5bb
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
@@ -0,0 +1,271 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/sched.h>
32#include <net/mac80211.h>
33
34#include "iwl-dev.h"
35#include "iwl-debug.h"
36#include "iwl-eeprom.h"
37#include "iwl-core.h"
38
39
40const char *iwl_legacy_get_cmd_string(u8 cmd)
41{
42 switch (cmd) {
43 IWL_CMD(REPLY_ALIVE);
44 IWL_CMD(REPLY_ERROR);
45 IWL_CMD(REPLY_RXON);
46 IWL_CMD(REPLY_RXON_ASSOC);
47 IWL_CMD(REPLY_QOS_PARAM);
48 IWL_CMD(REPLY_RXON_TIMING);
49 IWL_CMD(REPLY_ADD_STA);
50 IWL_CMD(REPLY_REMOVE_STA);
51 IWL_CMD(REPLY_WEPKEY);
52 IWL_CMD(REPLY_3945_RX);
53 IWL_CMD(REPLY_TX);
54 IWL_CMD(REPLY_RATE_SCALE);
55 IWL_CMD(REPLY_LEDS_CMD);
56 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
57 IWL_CMD(REPLY_CHANNEL_SWITCH);
58 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
59 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
60 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
61 IWL_CMD(POWER_TABLE_CMD);
62 IWL_CMD(PM_SLEEP_NOTIFICATION);
63 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
64 IWL_CMD(REPLY_SCAN_CMD);
65 IWL_CMD(REPLY_SCAN_ABORT_CMD);
66 IWL_CMD(SCAN_START_NOTIFICATION);
67 IWL_CMD(SCAN_RESULTS_NOTIFICATION);
68 IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
69 IWL_CMD(BEACON_NOTIFICATION);
70 IWL_CMD(REPLY_TX_BEACON);
71 IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
72 IWL_CMD(REPLY_BT_CONFIG);
73 IWL_CMD(REPLY_STATISTICS_CMD);
74 IWL_CMD(STATISTICS_NOTIFICATION);
75 IWL_CMD(CARD_STATE_NOTIFICATION);
76 IWL_CMD(MISSED_BEACONS_NOTIFICATION);
77 IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
78 IWL_CMD(SENSITIVITY_CMD);
79 IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
80 IWL_CMD(REPLY_RX_PHY_CMD);
81 IWL_CMD(REPLY_RX_MPDU_CMD);
82 IWL_CMD(REPLY_RX);
83 IWL_CMD(REPLY_COMPRESSED_BA);
84 default:
85 return "UNKNOWN";
86
87 }
88}
89EXPORT_SYMBOL(iwl_legacy_get_cmd_string);
90
91#define HOST_COMPLETE_TIMEOUT (HZ / 2)
92
93static void iwl_legacy_generic_cmd_callback(struct iwl_priv *priv,
94 struct iwl_device_cmd *cmd,
95 struct iwl_rx_packet *pkt)
96{
97 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
98 IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
99 iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
100 return;
101 }
102
103#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
104 switch (cmd->hdr.cmd) {
105 case REPLY_TX_LINK_QUALITY_CMD:
106 case SENSITIVITY_CMD:
107 IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n",
108 iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
109 break;
110 default:
111 IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n",
112 iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
113 }
114#endif
115}
116
117static int
118iwl_legacy_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
119{
120 int ret;
121
122 BUG_ON(!(cmd->flags & CMD_ASYNC));
123
124 /* An asynchronous command can not expect an SKB to be set. */
125 BUG_ON(cmd->flags & CMD_WANT_SKB);
126
127 /* Assign a generic callback if one is not provided */
128 if (!cmd->callback)
129 cmd->callback = iwl_legacy_generic_cmd_callback;
130
131 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
132 return -EBUSY;
133
134 ret = iwl_legacy_enqueue_hcmd(priv, cmd);
135 if (ret < 0) {
136 IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
137 iwl_legacy_get_cmd_string(cmd->id), ret);
138 return ret;
139 }
140 return 0;
141}
142
143int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
144{
145 int cmd_idx;
146 int ret;
147
148 BUG_ON(cmd->flags & CMD_ASYNC);
149
150 /* A synchronous command can not have a callback set. */
151 BUG_ON(cmd->callback);
152
153 IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
154 iwl_legacy_get_cmd_string(cmd->id));
155 mutex_lock(&priv->sync_cmd_mutex);
156
157 set_bit(STATUS_HCMD_ACTIVE, &priv->status);
158 IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
159 iwl_legacy_get_cmd_string(cmd->id));
160
161 cmd_idx = iwl_legacy_enqueue_hcmd(priv, cmd);
162 if (cmd_idx < 0) {
163 ret = cmd_idx;
164 IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
165 iwl_legacy_get_cmd_string(cmd->id), ret);
166 goto out;
167 }
168
169 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
170 !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
171 HOST_COMPLETE_TIMEOUT);
172 if (!ret) {
173 if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
174 IWL_ERR(priv,
175 "Error sending %s: time out after %dms.\n",
176 iwl_legacy_get_cmd_string(cmd->id),
177 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
178
179 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
180 IWL_DEBUG_INFO(priv,
181 "Clearing HCMD_ACTIVE for command %s\n",
182 iwl_legacy_get_cmd_string(cmd->id));
183 ret = -ETIMEDOUT;
184 goto cancel;
185 }
186 }
187
188 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
189 IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
190 iwl_legacy_get_cmd_string(cmd->id));
191 ret = -ECANCELED;
192 goto fail;
193 }
194 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
195 IWL_ERR(priv, "Command %s failed: FW Error\n",
196 iwl_legacy_get_cmd_string(cmd->id));
197 ret = -EIO;
198 goto fail;
199 }
200 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
201 IWL_ERR(priv, "Error: Response NULL in '%s'\n",
202 iwl_legacy_get_cmd_string(cmd->id));
203 ret = -EIO;
204 goto cancel;
205 }
206
207 ret = 0;
208 goto out;
209
210cancel:
211 if (cmd->flags & CMD_WANT_SKB) {
212 /*
213 * Cancel the CMD_WANT_SKB flag for the cmd in the
214 * TX cmd queue. Otherwise in case the cmd comes
215 * in later, it will possibly set an invalid
216 * address (cmd->meta.source).
217 */
218 priv->txq[priv->cmd_queue].meta[cmd_idx].flags &=
219 ~CMD_WANT_SKB;
220 }
221fail:
222 if (cmd->reply_page) {
223 iwl_legacy_free_pages(priv, cmd->reply_page);
224 cmd->reply_page = 0;
225 }
226out:
227 mutex_unlock(&priv->sync_cmd_mutex);
228 return ret;
229}
230EXPORT_SYMBOL(iwl_legacy_send_cmd_sync);
231
232int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
233{
234 if (cmd->flags & CMD_ASYNC)
235 return iwl_legacy_send_cmd_async(priv, cmd);
236
237 return iwl_legacy_send_cmd_sync(priv, cmd);
238}
239EXPORT_SYMBOL(iwl_legacy_send_cmd);
240
241int
242iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
243{
244 struct iwl_host_cmd cmd = {
245 .id = id,
246 .len = len,
247 .data = data,
248 };
249
250 return iwl_legacy_send_cmd_sync(priv, &cmd);
251}
252EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu);
253
254int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv,
255 u8 id, u16 len, const void *data,
256 void (*callback)(struct iwl_priv *priv,
257 struct iwl_device_cmd *cmd,
258 struct iwl_rx_packet *pkt))
259{
260 struct iwl_host_cmd cmd = {
261 .id = id,
262 .len = len,
263 .data = data,
264 };
265
266 cmd.flags |= CMD_ASYNC;
267 cmd.callback = callback;
268
269 return iwl_legacy_send_cmd_async(priv, &cmd);
270}
271EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu_async);
diff --git a/drivers/net/wireless/iwlegacy/iwl-helpers.h b/drivers/net/wireless/iwlegacy/iwl-helpers.h
new file mode 100644
index 000000000000..02132e755831
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-helpers.h
@@ -0,0 +1,181 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#ifndef __iwl_legacy_helpers_h__
31#define __iwl_legacy_helpers_h__
32
33#include <linux/ctype.h>
34#include <net/mac80211.h>
35
36#include "iwl-io.h"
37
38#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
39
40
41static inline struct ieee80211_conf *iwl_legacy_ieee80211_get_hw_conf(
42 struct ieee80211_hw *hw)
43{
44 return &hw->conf;
45}
46
47/**
48 * iwl_legacy_queue_inc_wrap - increment queue index, wrap back to beginning
49 * @index -- current index
50 * @n_bd -- total number of entries in queue (must be power of 2)
51 */
52static inline int iwl_legacy_queue_inc_wrap(int index, int n_bd)
53{
54 return ++index & (n_bd - 1);
55}
56
57/**
58 * iwl_legacy_queue_dec_wrap - decrement queue index, wrap back to end
59 * @index -- current index
60 * @n_bd -- total number of entries in queue (must be power of 2)
61 */
62static inline int iwl_legacy_queue_dec_wrap(int index, int n_bd)
63{
64 return --index & (n_bd - 1);
65}
66
67/* TODO: Move fw_desc functions to iwl-pci.ko */
68static inline void iwl_legacy_free_fw_desc(struct pci_dev *pci_dev,
69 struct fw_desc *desc)
70{
71 if (desc->v_addr)
72 dma_free_coherent(&pci_dev->dev, desc->len,
73 desc->v_addr, desc->p_addr);
74 desc->v_addr = NULL;
75 desc->len = 0;
76}
77
78static inline int iwl_legacy_alloc_fw_desc(struct pci_dev *pci_dev,
79 struct fw_desc *desc)
80{
81 if (!desc->len) {
82 desc->v_addr = NULL;
83 return -EINVAL;
84 }
85
86 desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
87 &desc->p_addr, GFP_KERNEL);
88 return (desc->v_addr != NULL) ? 0 : -ENOMEM;
89}
90
91/*
92 * we have 8 bits used like this:
93 *
94 * 7 6 5 4 3 2 1 0
95 * | | | | | | | |
96 * | | | | | | +-+-------- AC queue (0-3)
97 * | | | | | |
98 * | +-+-+-+-+------------ HW queue ID
99 * |
100 * +---------------------- unused
101 */
102static inline void
103iwl_legacy_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
104{
105 BUG_ON(ac > 3); /* only have 2 bits */
106 BUG_ON(hwq > 31); /* only use 5 bits */
107
108 txq->swq_id = (hwq << 2) | ac;
109}
110
111static inline void iwl_legacy_wake_queue(struct iwl_priv *priv,
112 struct iwl_tx_queue *txq)
113{
114 u8 queue = txq->swq_id;
115 u8 ac = queue & 3;
116 u8 hwq = (queue >> 2) & 0x1f;
117
118 if (test_and_clear_bit(hwq, priv->queue_stopped))
119 if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0)
120 ieee80211_wake_queue(priv->hw, ac);
121}
122
123static inline void iwl_legacy_stop_queue(struct iwl_priv *priv,
124 struct iwl_tx_queue *txq)
125{
126 u8 queue = txq->swq_id;
127 u8 ac = queue & 3;
128 u8 hwq = (queue >> 2) & 0x1f;
129
130 if (!test_and_set_bit(hwq, priv->queue_stopped))
131 if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0)
132 ieee80211_stop_queue(priv->hw, ac);
133}
134
135#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
136#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
137
138static inline void iwl_legacy_disable_interrupts(struct iwl_priv *priv)
139{
140 clear_bit(STATUS_INT_ENABLED, &priv->status);
141
142 /* disable interrupts from uCode/NIC to host */
143 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
144
145 /* acknowledge/clear/reset any interrupts still pending
146 * from uCode or flow handler (Rx/Tx DMA) */
147 iwl_write32(priv, CSR_INT, 0xffffffff);
148 iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
149 IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
150}
151
152static inline void iwl_legacy_enable_interrupts(struct iwl_priv *priv)
153{
154 IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
155 set_bit(STATUS_INT_ENABLED, &priv->status);
156 iwl_write32(priv, CSR_INT_MASK, priv->inta_mask);
157}
158
159/**
160 * iwl_legacy_beacon_time_mask_low - mask of lower 32 bit of beacon time
161 * @priv -- pointer to iwl_priv data structure
162 * @tsf_bits -- number of bits need to shift for masking)
163 */
164static inline u32 iwl_legacy_beacon_time_mask_low(struct iwl_priv *priv,
165 u16 tsf_bits)
166{
167 return (1 << tsf_bits) - 1;
168}
169
170/**
171 * iwl_legacy_beacon_time_mask_high - mask of higher 32 bit of beacon time
172 * @priv -- pointer to iwl_priv data structure
173 * @tsf_bits -- number of bits need to shift for masking)
174 */
175static inline u32 iwl_legacy_beacon_time_mask_high(struct iwl_priv *priv,
176 u16 tsf_bits)
177{
178 return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
179}
180
181#endif /* __iwl_legacy_helpers_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-io.h b/drivers/net/wireless/iwlegacy/iwl-io.h
new file mode 100644
index 000000000000..5cc5d342914f
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-io.h
@@ -0,0 +1,545 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __iwl_legacy_io_h__
30#define __iwl_legacy_io_h__
31
32#include <linux/io.h>
33
34#include "iwl-dev.h"
35#include "iwl-debug.h"
36#include "iwl-devtrace.h"
37
38/*
39 * IO, register, and NIC memory access functions
40 *
41 * NOTE on naming convention and macro usage for these
42 *
43 * A single _ prefix before a an access function means that no state
44 * check or debug information is printed when that function is called.
45 *
46 * A double __ prefix before an access function means that state is checked
47 * and the current line number and caller function name are printed in addition
48 * to any other debug output.
49 *
50 * The non-prefixed name is the #define that maps the caller into a
51 * #define that provides the caller's name and __LINE__ to the double
52 * prefix version.
53 *
54 * If you wish to call the function without any debug or state checking,
55 * you should use the single _ prefix version (as is used by dependent IO
56 * routines, for example _iwl_legacy_read_direct32 calls the non-check version of
57 * _iwl_legacy_read32.)
58 *
59 * These declarations are *extremely* useful in quickly isolating code deltas
60 * which result in misconfiguration of the hardware I/O. In combination with
61 * git-bisect and the IO debug level you can quickly determine the specific
62 * commit which breaks the IO sequence to the hardware.
63 *
64 */
65
66static inline void _iwl_legacy_write8(struct iwl_priv *priv, u32 ofs, u8 val)
67{
68 trace_iwlwifi_legacy_dev_iowrite8(priv, ofs, val);
69 iowrite8(val, priv->hw_base + ofs);
70}
71
72#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
73static inline void
74__iwl_legacy_write8(const char *f, u32 l, struct iwl_priv *priv,
75 u32 ofs, u8 val)
76{
77 IWL_DEBUG_IO(priv, "write8(0x%08X, 0x%02X) - %s %d\n", ofs, val, f, l);
78 _iwl_legacy_write8(priv, ofs, val);
79}
80#define iwl_write8(priv, ofs, val) \
81 __iwl_legacy_write8(__FILE__, __LINE__, priv, ofs, val)
82#else
83#define iwl_write8(priv, ofs, val) _iwl_legacy_write8(priv, ofs, val)
84#endif
85
86
87static inline void _iwl_legacy_write32(struct iwl_priv *priv, u32 ofs, u32 val)
88{
89 trace_iwlwifi_legacy_dev_iowrite32(priv, ofs, val);
90 iowrite32(val, priv->hw_base + ofs);
91}
92
93#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
94static inline void
95__iwl_legacy_write32(const char *f, u32 l, struct iwl_priv *priv,
96 u32 ofs, u32 val)
97{
98 IWL_DEBUG_IO(priv, "write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l);
99 _iwl_legacy_write32(priv, ofs, val);
100}
101#define iwl_write32(priv, ofs, val) \
102 __iwl_legacy_write32(__FILE__, __LINE__, priv, ofs, val)
103#else
104#define iwl_write32(priv, ofs, val) _iwl_legacy_write32(priv, ofs, val)
105#endif
106
107static inline u32 _iwl_legacy_read32(struct iwl_priv *priv, u32 ofs)
108{
109 u32 val = ioread32(priv->hw_base + ofs);
110 trace_iwlwifi_legacy_dev_ioread32(priv, ofs, val);
111 return val;
112}
113
114#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
115static inline u32
116__iwl_legacy_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs)
117{
118 IWL_DEBUG_IO(priv, "read_direct32(0x%08X) - %s %d\n", ofs, f, l);
119 return _iwl_legacy_read32(priv, ofs);
120}
121#define iwl_read32(priv, ofs) __iwl_legacy_read32(__FILE__, __LINE__, priv, ofs)
122#else
123#define iwl_read32(p, o) _iwl_legacy_read32(p, o)
124#endif
125
126#define IWL_POLL_INTERVAL 10 /* microseconds */
127static inline int
128_iwl_legacy_poll_bit(struct iwl_priv *priv, u32 addr,
129 u32 bits, u32 mask, int timeout)
130{
131 int t = 0;
132
133 do {
134 if ((_iwl_legacy_read32(priv, addr) & mask) == (bits & mask))
135 return t;
136 udelay(IWL_POLL_INTERVAL);
137 t += IWL_POLL_INTERVAL;
138 } while (t < timeout);
139
140 return -ETIMEDOUT;
141}
142#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
143static inline int __iwl_legacy_poll_bit(const char *f, u32 l,
144 struct iwl_priv *priv, u32 addr,
145 u32 bits, u32 mask, int timeout)
146{
147 int ret = _iwl_legacy_poll_bit(priv, addr, bits, mask, timeout);
148 IWL_DEBUG_IO(priv, "poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n",
149 addr, bits, mask,
150 unlikely(ret == -ETIMEDOUT) ? "timeout" : "", f, l);
151 return ret;
152}
153#define iwl_poll_bit(priv, addr, bits, mask, timeout) \
154 __iwl_legacy_poll_bit(__FILE__, __LINE__, priv, addr, \
155 bits, mask, timeout)
156#else
157#define iwl_poll_bit(p, a, b, m, t) _iwl_legacy_poll_bit(p, a, b, m, t)
158#endif
159
160static inline void _iwl_legacy_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
161{
162 _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) | mask);
163}
164#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
165static inline void __iwl_legacy_set_bit(const char *f, u32 l,
166 struct iwl_priv *priv, u32 reg, u32 mask)
167{
168 u32 val = _iwl_legacy_read32(priv, reg) | mask;
169 IWL_DEBUG_IO(priv, "set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg,
170 mask, val);
171 _iwl_legacy_write32(priv, reg, val);
172}
173static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
174{
175 unsigned long reg_flags;
176
177 spin_lock_irqsave(&p->reg_lock, reg_flags);
178 __iwl_legacy_set_bit(__FILE__, __LINE__, p, r, m);
179 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
180}
181#else
182static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
183{
184 unsigned long reg_flags;
185
186 spin_lock_irqsave(&p->reg_lock, reg_flags);
187 _iwl_legacy_set_bit(p, r, m);
188 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
189}
190#endif
191
192static inline void
193_iwl_legacy_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
194{
195 _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) & ~mask);
196}
197#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
198static inline void
199__iwl_legacy_clear_bit(const char *f, u32 l,
200 struct iwl_priv *priv, u32 reg, u32 mask)
201{
202 u32 val = _iwl_legacy_read32(priv, reg) & ~mask;
203 IWL_DEBUG_IO(priv, "clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
204 _iwl_legacy_write32(priv, reg, val);
205}
206static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
207{
208 unsigned long reg_flags;
209
210 spin_lock_irqsave(&p->reg_lock, reg_flags);
211 __iwl_legacy_clear_bit(__FILE__, __LINE__, p, r, m);
212 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
213}
214#else
215static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
216{
217 unsigned long reg_flags;
218
219 spin_lock_irqsave(&p->reg_lock, reg_flags);
220 _iwl_legacy_clear_bit(p, r, m);
221 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
222}
223#endif
224
225static inline int _iwl_legacy_grab_nic_access(struct iwl_priv *priv)
226{
227 int ret;
228 u32 val;
229
230 /* this bit wakes up the NIC */
231 _iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
232 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
233
234 /*
235 * These bits say the device is running, and should keep running for
236 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
237 * but they do not indicate that embedded SRAM is restored yet;
238 * 3945 and 4965 have volatile SRAM, and must save/restore contents
239 * to/from host DRAM when sleeping/waking for power-saving.
240 * Each direction takes approximately 1/4 millisecond; with this
241 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
242 * series of register accesses are expected (e.g. reading Event Log),
243 * to keep device from sleeping.
244 *
245 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
246 * SRAM is okay/restored. We don't check that here because this call
247 * is just for hardware register access; but GP1 MAC_SLEEP check is a
248 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
249 *
250 */
251 ret = _iwl_legacy_poll_bit(priv, CSR_GP_CNTRL,
252 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
253 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
254 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
255 if (ret < 0) {
256 val = _iwl_legacy_read32(priv, CSR_GP_CNTRL);
257 IWL_ERR(priv,
258 "MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
259 _iwl_legacy_write32(priv, CSR_RESET,
260 CSR_RESET_REG_FLAG_FORCE_NMI);
261 return -EIO;
262 }
263
264 return 0;
265}
266
267#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
268static inline int __iwl_legacy_grab_nic_access(const char *f, u32 l,
269 struct iwl_priv *priv)
270{
271 IWL_DEBUG_IO(priv, "grabbing nic access - %s %d\n", f, l);
272 return _iwl_legacy_grab_nic_access(priv);
273}
274#define iwl_grab_nic_access(priv) \
275 __iwl_legacy_grab_nic_access(__FILE__, __LINE__, priv)
276#else
277#define iwl_grab_nic_access(priv) \
278 _iwl_legacy_grab_nic_access(priv)
279#endif
280
281static inline void _iwl_legacy_release_nic_access(struct iwl_priv *priv)
282{
283 _iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
284 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
285}
286#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
287static inline void __iwl_legacy_release_nic_access(const char *f, u32 l,
288 struct iwl_priv *priv)
289{
290
291 IWL_DEBUG_IO(priv, "releasing nic access - %s %d\n", f, l);
292 _iwl_legacy_release_nic_access(priv);
293}
294#define iwl_release_nic_access(priv) \
295 __iwl_legacy_release_nic_access(__FILE__, __LINE__, priv)
296#else
297#define iwl_release_nic_access(priv) \
298 _iwl_legacy_release_nic_access(priv)
299#endif
300
301static inline u32 _iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
302{
303 return _iwl_legacy_read32(priv, reg);
304}
305#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
306static inline u32 __iwl_legacy_read_direct32(const char *f, u32 l,
307 struct iwl_priv *priv, u32 reg)
308{
309 u32 value = _iwl_legacy_read_direct32(priv, reg);
310 IWL_DEBUG_IO(priv,
311 "read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value,
312 f, l);
313 return value;
314}
315static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
316{
317 u32 value;
318 unsigned long reg_flags;
319
320 spin_lock_irqsave(&priv->reg_lock, reg_flags);
321 iwl_grab_nic_access(priv);
322 value = __iwl_legacy_read_direct32(__FILE__, __LINE__, priv, reg);
323 iwl_release_nic_access(priv);
324 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
325 return value;
326}
327
328#else
329static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
330{
331 u32 value;
332 unsigned long reg_flags;
333
334 spin_lock_irqsave(&priv->reg_lock, reg_flags);
335 iwl_grab_nic_access(priv);
336 value = _iwl_legacy_read_direct32(priv, reg);
337 iwl_release_nic_access(priv);
338 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
339 return value;
340
341}
342#endif
343
344static inline void _iwl_legacy_write_direct32(struct iwl_priv *priv,
345 u32 reg, u32 value)
346{
347 _iwl_legacy_write32(priv, reg, value);
348}
349static inline void
350iwl_legacy_write_direct32(struct iwl_priv *priv, u32 reg, u32 value)
351{
352 unsigned long reg_flags;
353
354 spin_lock_irqsave(&priv->reg_lock, reg_flags);
355 if (!iwl_grab_nic_access(priv)) {
356 _iwl_legacy_write_direct32(priv, reg, value);
357 iwl_release_nic_access(priv);
358 }
359 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
360}
361
362static inline void iwl_legacy_write_reg_buf(struct iwl_priv *priv,
363 u32 reg, u32 len, u32 *values)
364{
365 u32 count = sizeof(u32);
366
367 if ((priv != NULL) && (values != NULL)) {
368 for (; 0 < len; len -= count, reg += count, values++)
369 iwl_legacy_write_direct32(priv, reg, *values);
370 }
371}
372
373static inline int _iwl_legacy_poll_direct_bit(struct iwl_priv *priv, u32 addr,
374 u32 mask, int timeout)
375{
376 int t = 0;
377
378 do {
379 if ((iwl_legacy_read_direct32(priv, addr) & mask) == mask)
380 return t;
381 udelay(IWL_POLL_INTERVAL);
382 t += IWL_POLL_INTERVAL;
383 } while (t < timeout);
384
385 return -ETIMEDOUT;
386}
387
388#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
389static inline int __iwl_legacy_poll_direct_bit(const char *f, u32 l,
390 struct iwl_priv *priv,
391 u32 addr, u32 mask, int timeout)
392{
393 int ret = _iwl_legacy_poll_direct_bit(priv, addr, mask, timeout);
394
395 if (unlikely(ret == -ETIMEDOUT))
396 IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) - "
397 "timedout - %s %d\n", addr, mask, f, l);
398 else
399 IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) = 0x%08X "
400 "- %s %d\n", addr, mask, ret, f, l);
401 return ret;
402}
403#define iwl_poll_direct_bit(priv, addr, mask, timeout) \
404__iwl_legacy_poll_direct_bit(__FILE__, __LINE__, priv, addr, mask, timeout)
405#else
406#define iwl_poll_direct_bit _iwl_legacy_poll_direct_bit
407#endif
408
409static inline u32 _iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
410{
411 _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
412 rmb();
413 return _iwl_legacy_read_direct32(priv, HBUS_TARG_PRPH_RDAT);
414}
415static inline u32 iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
416{
417 unsigned long reg_flags;
418 u32 val;
419
420 spin_lock_irqsave(&priv->reg_lock, reg_flags);
421 iwl_grab_nic_access(priv);
422 val = _iwl_legacy_read_prph(priv, reg);
423 iwl_release_nic_access(priv);
424 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
425 return val;
426}
427
428static inline void _iwl_legacy_write_prph(struct iwl_priv *priv,
429 u32 addr, u32 val)
430{
431 _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WADDR,
432 ((addr & 0x0000FFFF) | (3 << 24)));
433 wmb();
434 _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val);
435}
436
437static inline void
438iwl_legacy_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
439{
440 unsigned long reg_flags;
441
442 spin_lock_irqsave(&priv->reg_lock, reg_flags);
443 if (!iwl_grab_nic_access(priv)) {
444 _iwl_legacy_write_prph(priv, addr, val);
445 iwl_release_nic_access(priv);
446 }
447 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
448}
449
450#define _iwl_legacy_set_bits_prph(priv, reg, mask) \
451_iwl_legacy_write_prph(priv, reg, (_iwl_legacy_read_prph(priv, reg) | mask))
452
453static inline void
454iwl_legacy_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
455{
456 unsigned long reg_flags;
457
458 spin_lock_irqsave(&priv->reg_lock, reg_flags);
459 iwl_grab_nic_access(priv);
460 _iwl_legacy_set_bits_prph(priv, reg, mask);
461 iwl_release_nic_access(priv);
462 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
463}
464
465#define _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask) \
466_iwl_legacy_write_prph(priv, reg, \
467 ((_iwl_legacy_read_prph(priv, reg) & mask) | bits))
468
469static inline void iwl_legacy_set_bits_mask_prph(struct iwl_priv *priv, u32 reg,
470 u32 bits, u32 mask)
471{
472 unsigned long reg_flags;
473
474 spin_lock_irqsave(&priv->reg_lock, reg_flags);
475 iwl_grab_nic_access(priv);
476 _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask);
477 iwl_release_nic_access(priv);
478 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
479}
480
481static inline void iwl_legacy_clear_bits_prph(struct iwl_priv
482 *priv, u32 reg, u32 mask)
483{
484 unsigned long reg_flags;
485 u32 val;
486
487 spin_lock_irqsave(&priv->reg_lock, reg_flags);
488 iwl_grab_nic_access(priv);
489 val = _iwl_legacy_read_prph(priv, reg);
490 _iwl_legacy_write_prph(priv, reg, (val & ~mask));
491 iwl_release_nic_access(priv);
492 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
493}
494
495static inline u32 iwl_legacy_read_targ_mem(struct iwl_priv *priv, u32 addr)
496{
497 unsigned long reg_flags;
498 u32 value;
499
500 spin_lock_irqsave(&priv->reg_lock, reg_flags);
501 iwl_grab_nic_access(priv);
502
503 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr);
504 rmb();
505 value = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
506
507 iwl_release_nic_access(priv);
508 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
509 return value;
510}
511
512static inline void
513iwl_legacy_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val)
514{
515 unsigned long reg_flags;
516
517 spin_lock_irqsave(&priv->reg_lock, reg_flags);
518 if (!iwl_grab_nic_access(priv)) {
519 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
520 wmb();
521 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WDAT, val);
522 iwl_release_nic_access(priv);
523 }
524 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
525}
526
527static inline void
528iwl_legacy_write_targ_mem_buf(struct iwl_priv *priv, u32 addr,
529 u32 len, u32 *values)
530{
531 unsigned long reg_flags;
532
533 spin_lock_irqsave(&priv->reg_lock, reg_flags);
534 if (!iwl_grab_nic_access(priv)) {
535 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
536 wmb();
537 for (; 0 < len; len -= sizeof(u32), values++)
538 _iwl_legacy_write_direct32(priv,
539 HBUS_TARG_MEM_WDAT, *values);
540
541 iwl_release_nic_access(priv);
542 }
543 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
544}
545#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.c b/drivers/net/wireless/iwlegacy/iwl-led.c
new file mode 100644
index 000000000000..15eb8b707157
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-led.c
@@ -0,0 +1,188 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <linux/delay.h>
34#include <linux/skbuff.h>
35#include <linux/netdevice.h>
36#include <linux/wireless.h>
37#include <net/mac80211.h>
38#include <linux/etherdevice.h>
39#include <asm/unaligned.h>
40
41#include "iwl-dev.h"
42#include "iwl-core.h"
43#include "iwl-io.h"
44
45/* default: IWL_LED_BLINK(0) using blinking index table */
46static int led_mode;
47module_param(led_mode, int, S_IRUGO);
48MODULE_PARM_DESC(led_mode, "0=system default, "
49 "1=On(RF On)/Off(RF Off), 2=blinking");
50
51static const struct ieee80211_tpt_blink iwl_blink[] = {
52 { .throughput = 0 * 1024 - 1, .blink_time = 334 },
53 { .throughput = 1 * 1024 - 1, .blink_time = 260 },
54 { .throughput = 5 * 1024 - 1, .blink_time = 220 },
55 { .throughput = 10 * 1024 - 1, .blink_time = 190 },
56 { .throughput = 20 * 1024 - 1, .blink_time = 170 },
57 { .throughput = 50 * 1024 - 1, .blink_time = 150 },
58 { .throughput = 70 * 1024 - 1, .blink_time = 130 },
59 { .throughput = 100 * 1024 - 1, .blink_time = 110 },
60 { .throughput = 200 * 1024 - 1, .blink_time = 80 },
61 { .throughput = 300 * 1024 - 1, .blink_time = 50 },
62};
63
64/*
65 * Adjust led blink rate to compensate on a MAC Clock difference on every HW
66 * Led blink rate analysis showed an average deviation of 0% on 3945,
67 * 5% on 4965 HW.
68 * Need to compensate on the led on/off time per HW according to the deviation
69 * to achieve the desired led frequency
70 * The calculation is: (100-averageDeviation)/100 * blinkTime
71 * For code efficiency the calculation will be:
72 * compensation = (100 - averageDeviation) * 64 / 100
73 * NewBlinkTime = (compensation * BlinkTime) / 64
74 */
75static inline u8 iwl_legacy_blink_compensation(struct iwl_priv *priv,
76 u8 time, u16 compensation)
77{
78 if (!compensation) {
79 IWL_ERR(priv, "undefined blink compensation: "
80 "use pre-defined blinking time\n");
81 return time;
82 }
83
84 return (u8)((time * compensation) >> 6);
85}
86
87/* Set led pattern command */
88static int iwl_legacy_led_cmd(struct iwl_priv *priv,
89 unsigned long on,
90 unsigned long off)
91{
92 struct iwl_led_cmd led_cmd = {
93 .id = IWL_LED_LINK,
94 .interval = IWL_DEF_LED_INTRVL
95 };
96 int ret;
97
98 if (!test_bit(STATUS_READY, &priv->status))
99 return -EBUSY;
100
101 if (priv->blink_on == on && priv->blink_off == off)
102 return 0;
103
104 IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
105 priv->cfg->base_params->led_compensation);
106 led_cmd.on = iwl_legacy_blink_compensation(priv, on,
107 priv->cfg->base_params->led_compensation);
108 led_cmd.off = iwl_legacy_blink_compensation(priv, off,
109 priv->cfg->base_params->led_compensation);
110
111 ret = priv->cfg->ops->led->cmd(priv, &led_cmd);
112 if (!ret) {
113 priv->blink_on = on;
114 priv->blink_off = off;
115 }
116 return ret;
117}
118
119static void iwl_legacy_led_brightness_set(struct led_classdev *led_cdev,
120 enum led_brightness brightness)
121{
122 struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
123 unsigned long on = 0;
124
125 if (brightness > 0)
126 on = IWL_LED_SOLID;
127
128 iwl_legacy_led_cmd(priv, on, 0);
129}
130
131static int iwl_legacy_led_blink_set(struct led_classdev *led_cdev,
132 unsigned long *delay_on,
133 unsigned long *delay_off)
134{
135 struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
136
137 return iwl_legacy_led_cmd(priv, *delay_on, *delay_off);
138}
139
140void iwl_legacy_leds_init(struct iwl_priv *priv)
141{
142 int mode = led_mode;
143 int ret;
144
145 if (mode == IWL_LED_DEFAULT)
146 mode = priv->cfg->led_mode;
147
148 priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
149 wiphy_name(priv->hw->wiphy));
150 priv->led.brightness_set = iwl_legacy_led_brightness_set;
151 priv->led.blink_set = iwl_legacy_led_blink_set;
152 priv->led.max_brightness = 1;
153
154 switch (mode) {
155 case IWL_LED_DEFAULT:
156 WARN_ON(1);
157 break;
158 case IWL_LED_BLINK:
159 priv->led.default_trigger =
160 ieee80211_create_tpt_led_trigger(priv->hw,
161 IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
162 iwl_blink, ARRAY_SIZE(iwl_blink));
163 break;
164 case IWL_LED_RF_STATE:
165 priv->led.default_trigger =
166 ieee80211_get_radio_led_name(priv->hw);
167 break;
168 }
169
170 ret = led_classdev_register(&priv->pci_dev->dev, &priv->led);
171 if (ret) {
172 kfree(priv->led.name);
173 return;
174 }
175
176 priv->led_registered = true;
177}
178EXPORT_SYMBOL(iwl_legacy_leds_init);
179
180void iwl_legacy_leds_exit(struct iwl_priv *priv)
181{
182 if (!priv->led_registered)
183 return;
184
185 led_classdev_unregister(&priv->led);
186 kfree(priv->led.name);
187}
188EXPORT_SYMBOL(iwl_legacy_leds_exit);
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.h b/drivers/net/wireless/iwlegacy/iwl-led.h
new file mode 100644
index 000000000000..f0791f70f79d
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-led.h
@@ -0,0 +1,56 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_legacy_leds_h__
28#define __iwl_legacy_leds_h__
29
30
31struct iwl_priv;
32
33#define IWL_LED_SOLID 11
34#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
35
36#define IWL_LED_ACTIVITY (0<<1)
37#define IWL_LED_LINK (1<<1)
38
39/*
40 * LED mode
41 * IWL_LED_DEFAULT: use device default
42 * IWL_LED_RF_STATE: turn LED on/off based on RF state
43 * LED ON = RF ON
44 * LED OFF = RF OFF
45 * IWL_LED_BLINK: adjust led blink rate based on blink table
46 */
47enum iwl_led_mode {
48 IWL_LED_DEFAULT,
49 IWL_LED_RF_STATE,
50 IWL_LED_BLINK,
51};
52
53void iwl_legacy_leds_init(struct iwl_priv *priv);
54void iwl_legacy_leds_exit(struct iwl_priv *priv);
55
56#endif /* __iwl_legacy_leds_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h b/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
new file mode 100644
index 000000000000..38647e481eb0
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
@@ -0,0 +1,456 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_legacy_rs_h__
28#define __iwl_legacy_rs_h__
29
30struct iwl_rate_info {
31 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
32 u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
33 u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
34 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
35 u8 prev_ieee; /* previous rate in IEEE speeds */
36 u8 next_ieee; /* next rate in IEEE speeds */
37 u8 prev_rs; /* previous rate used in rs algo */
38 u8 next_rs; /* next rate used in rs algo */
39 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
40 u8 next_rs_tgg; /* next rate used in TGG rs algo */
41};
42
43struct iwl3945_rate_info {
44 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
45 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
46 u8 prev_ieee; /* previous rate in IEEE speeds */
47 u8 next_ieee; /* next rate in IEEE speeds */
48 u8 prev_rs; /* previous rate used in rs algo */
49 u8 next_rs; /* next rate used in rs algo */
50 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
51 u8 next_rs_tgg; /* next rate used in TGG rs algo */
52 u8 table_rs_index; /* index in rate scale table cmd */
53 u8 prev_table_rs; /* prev in rate table cmd */
54};
55
56
57/*
58 * These serve as indexes into
59 * struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
60 */
61enum {
62 IWL_RATE_1M_INDEX = 0,
63 IWL_RATE_2M_INDEX,
64 IWL_RATE_5M_INDEX,
65 IWL_RATE_11M_INDEX,
66 IWL_RATE_6M_INDEX,
67 IWL_RATE_9M_INDEX,
68 IWL_RATE_12M_INDEX,
69 IWL_RATE_18M_INDEX,
70 IWL_RATE_24M_INDEX,
71 IWL_RATE_36M_INDEX,
72 IWL_RATE_48M_INDEX,
73 IWL_RATE_54M_INDEX,
74 IWL_RATE_60M_INDEX,
75 IWL_RATE_COUNT,
76 IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1, /* Excluding 60M */
77 IWL_RATE_COUNT_3945 = IWL_RATE_COUNT - 1,
78 IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
79 IWL_RATE_INVALID = IWL_RATE_COUNT,
80};
81
82enum {
83 IWL_RATE_6M_INDEX_TABLE = 0,
84 IWL_RATE_9M_INDEX_TABLE,
85 IWL_RATE_12M_INDEX_TABLE,
86 IWL_RATE_18M_INDEX_TABLE,
87 IWL_RATE_24M_INDEX_TABLE,
88 IWL_RATE_36M_INDEX_TABLE,
89 IWL_RATE_48M_INDEX_TABLE,
90 IWL_RATE_54M_INDEX_TABLE,
91 IWL_RATE_1M_INDEX_TABLE,
92 IWL_RATE_2M_INDEX_TABLE,
93 IWL_RATE_5M_INDEX_TABLE,
94 IWL_RATE_11M_INDEX_TABLE,
95 IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
96};
97
98enum {
99 IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
100 IWL39_LAST_OFDM_RATE = IWL_RATE_54M_INDEX,
101 IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
102 IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
103 IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
104};
105
106/* #define vs. enum to keep from defaulting to 'large integer' */
107#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX)
108#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX)
109#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX)
110#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX)
111#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX)
112#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX)
113#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX)
114#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX)
115#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX)
116#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX)
117#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX)
118#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX)
119#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
120
121/* uCode API values for legacy bit rates, both OFDM and CCK */
122enum {
123 IWL_RATE_6M_PLCP = 13,
124 IWL_RATE_9M_PLCP = 15,
125 IWL_RATE_12M_PLCP = 5,
126 IWL_RATE_18M_PLCP = 7,
127 IWL_RATE_24M_PLCP = 9,
128 IWL_RATE_36M_PLCP = 11,
129 IWL_RATE_48M_PLCP = 1,
130 IWL_RATE_54M_PLCP = 3,
131 IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/
132 IWL_RATE_1M_PLCP = 10,
133 IWL_RATE_2M_PLCP = 20,
134 IWL_RATE_5M_PLCP = 55,
135 IWL_RATE_11M_PLCP = 110,
136 /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/
137};
138
139/* uCode API values for OFDM high-throughput (HT) bit rates */
140enum {
141 IWL_RATE_SISO_6M_PLCP = 0,
142 IWL_RATE_SISO_12M_PLCP = 1,
143 IWL_RATE_SISO_18M_PLCP = 2,
144 IWL_RATE_SISO_24M_PLCP = 3,
145 IWL_RATE_SISO_36M_PLCP = 4,
146 IWL_RATE_SISO_48M_PLCP = 5,
147 IWL_RATE_SISO_54M_PLCP = 6,
148 IWL_RATE_SISO_60M_PLCP = 7,
149 IWL_RATE_MIMO2_6M_PLCP = 0x8,
150 IWL_RATE_MIMO2_12M_PLCP = 0x9,
151 IWL_RATE_MIMO2_18M_PLCP = 0xa,
152 IWL_RATE_MIMO2_24M_PLCP = 0xb,
153 IWL_RATE_MIMO2_36M_PLCP = 0xc,
154 IWL_RATE_MIMO2_48M_PLCP = 0xd,
155 IWL_RATE_MIMO2_54M_PLCP = 0xe,
156 IWL_RATE_MIMO2_60M_PLCP = 0xf,
157 IWL_RATE_SISO_INVM_PLCP,
158 IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
159};
160
161/* MAC header values for bit rates */
162enum {
163 IWL_RATE_6M_IEEE = 12,
164 IWL_RATE_9M_IEEE = 18,
165 IWL_RATE_12M_IEEE = 24,
166 IWL_RATE_18M_IEEE = 36,
167 IWL_RATE_24M_IEEE = 48,
168 IWL_RATE_36M_IEEE = 72,
169 IWL_RATE_48M_IEEE = 96,
170 IWL_RATE_54M_IEEE = 108,
171 IWL_RATE_60M_IEEE = 120,
172 IWL_RATE_1M_IEEE = 2,
173 IWL_RATE_2M_IEEE = 4,
174 IWL_RATE_5M_IEEE = 11,
175 IWL_RATE_11M_IEEE = 22,
176};
177
178#define IWL_CCK_BASIC_RATES_MASK \
179 (IWL_RATE_1M_MASK | \
180 IWL_RATE_2M_MASK)
181
182#define IWL_CCK_RATES_MASK \
183 (IWL_CCK_BASIC_RATES_MASK | \
184 IWL_RATE_5M_MASK | \
185 IWL_RATE_11M_MASK)
186
187#define IWL_OFDM_BASIC_RATES_MASK \
188 (IWL_RATE_6M_MASK | \
189 IWL_RATE_12M_MASK | \
190 IWL_RATE_24M_MASK)
191
192#define IWL_OFDM_RATES_MASK \
193 (IWL_OFDM_BASIC_RATES_MASK | \
194 IWL_RATE_9M_MASK | \
195 IWL_RATE_18M_MASK | \
196 IWL_RATE_36M_MASK | \
197 IWL_RATE_48M_MASK | \
198 IWL_RATE_54M_MASK)
199
200#define IWL_BASIC_RATES_MASK \
201 (IWL_OFDM_BASIC_RATES_MASK | \
202 IWL_CCK_BASIC_RATES_MASK)
203
204#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
205#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1)
206
207#define IWL_INVALID_VALUE -1
208
209#define IWL_MIN_RSSI_VAL -100
210#define IWL_MAX_RSSI_VAL 0
211
212/* These values specify how many Tx frame attempts before
213 * searching for a new modulation mode */
214#define IWL_LEGACY_FAILURE_LIMIT 160
215#define IWL_LEGACY_SUCCESS_LIMIT 480
216#define IWL_LEGACY_TABLE_COUNT 160
217
218#define IWL_NONE_LEGACY_FAILURE_LIMIT 400
219#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500
220#define IWL_NONE_LEGACY_TABLE_COUNT 1500
221
222/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
223#define IWL_RS_GOOD_RATIO 12800 /* 100% */
224#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */
225#define IWL_RATE_HIGH_TH 10880 /* 85% */
226#define IWL_RATE_INCREASE_TH 6400 /* 50% */
227#define IWL_RATE_DECREASE_TH 1920 /* 15% */
228
229/* possible actions when in legacy mode */
230#define IWL_LEGACY_SWITCH_ANTENNA1 0
231#define IWL_LEGACY_SWITCH_ANTENNA2 1
232#define IWL_LEGACY_SWITCH_SISO 2
233#define IWL_LEGACY_SWITCH_MIMO2_AB 3
234#define IWL_LEGACY_SWITCH_MIMO2_AC 4
235#define IWL_LEGACY_SWITCH_MIMO2_BC 5
236
237/* possible actions when in siso mode */
238#define IWL_SISO_SWITCH_ANTENNA1 0
239#define IWL_SISO_SWITCH_ANTENNA2 1
240#define IWL_SISO_SWITCH_MIMO2_AB 2
241#define IWL_SISO_SWITCH_MIMO2_AC 3
242#define IWL_SISO_SWITCH_MIMO2_BC 4
243#define IWL_SISO_SWITCH_GI 5
244
245/* possible actions when in mimo mode */
246#define IWL_MIMO2_SWITCH_ANTENNA1 0
247#define IWL_MIMO2_SWITCH_ANTENNA2 1
248#define IWL_MIMO2_SWITCH_SISO_A 2
249#define IWL_MIMO2_SWITCH_SISO_B 3
250#define IWL_MIMO2_SWITCH_SISO_C 4
251#define IWL_MIMO2_SWITCH_GI 5
252
253#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI
254
255#define IWL_ACTION_LIMIT 3 /* # possible actions */
256
257#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
258
259/* load per tid defines for A-MPDU activation */
260#define IWL_AGG_TPT_THREHOLD 0
261#define IWL_AGG_LOAD_THRESHOLD 10
262#define IWL_AGG_ALL_TID 0xff
263#define TID_QUEUE_CELL_SPACING 50 /*mS */
264#define TID_QUEUE_MAX_SIZE 20
265#define TID_ROUND_VALUE 5 /* mS */
266#define TID_MAX_LOAD_COUNT 8
267
268#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
269#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
270
271extern const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
272
273enum iwl_table_type {
274 LQ_NONE,
275 LQ_G, /* legacy types */
276 LQ_A,
277 LQ_SISO, /* high-throughput types */
278 LQ_MIMO2,
279 LQ_MAX,
280};
281
282#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
283#define is_siso(tbl) ((tbl) == LQ_SISO)
284#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
285#define is_mimo(tbl) (is_mimo2(tbl))
286#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
287#define is_a_band(tbl) ((tbl) == LQ_A)
288#define is_g_and(tbl) ((tbl) == LQ_G)
289
290#define ANT_NONE 0x0
291#define ANT_A BIT(0)
292#define ANT_B BIT(1)
293#define ANT_AB (ANT_A | ANT_B)
294#define ANT_C BIT(2)
295#define ANT_AC (ANT_A | ANT_C)
296#define ANT_BC (ANT_B | ANT_C)
297#define ANT_ABC (ANT_AB | ANT_C)
298
299#define IWL_MAX_MCS_DISPLAY_SIZE 12
300
301struct iwl_rate_mcs_info {
302 char mbps[IWL_MAX_MCS_DISPLAY_SIZE];
303 char mcs[IWL_MAX_MCS_DISPLAY_SIZE];
304};
305
306/**
307 * struct iwl_rate_scale_data -- tx success history for one rate
308 */
309struct iwl_rate_scale_data {
310 u64 data; /* bitmap of successful frames */
311 s32 success_counter; /* number of frames successful */
312 s32 success_ratio; /* per-cent * 128 */
313 s32 counter; /* number of frames attempted */
314 s32 average_tpt; /* success ratio * expected throughput */
315 unsigned long stamp;
316};
317
318/**
319 * struct iwl_scale_tbl_info -- tx params and success history for all rates
320 *
321 * There are two of these in struct iwl_lq_sta,
322 * one for "active", and one for "search".
323 */
324struct iwl_scale_tbl_info {
325 enum iwl_table_type lq_type;
326 u8 ant_type;
327 u8 is_SGI; /* 1 = short guard interval */
328 u8 is_ht40; /* 1 = 40 MHz channel width */
329 u8 is_dup; /* 1 = duplicated data streams */
330 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
331 u8 max_search; /* maximun number of tables we can search */
332 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
333 u32 current_rate; /* rate_n_flags, uCode API format */
334 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
335};
336
337struct iwl_traffic_load {
338 unsigned long time_stamp; /* age of the oldest statistics */
339 u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
340 * slice */
341 u32 total; /* total num of packets during the
342 * last TID_MAX_TIME_DIFF */
343 u8 queue_count; /* number of queues that has
344 * been used since the last cleanup */
345 u8 head; /* start of the circular buffer */
346};
347
348/**
349 * struct iwl_lq_sta -- driver's rate scaling private structure
350 *
351 * Pointer to this gets passed back and forth between driver and mac80211.
352 */
353struct iwl_lq_sta {
354 u8 active_tbl; /* index of active table, range 0-1 */
355 u8 enable_counter; /* indicates HT mode */
356 u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
357 u8 search_better_tbl; /* 1: currently trying alternate mode */
358 s32 last_tpt;
359
360 /* The following determine when to search for a new mode */
361 u32 table_count_limit;
362 u32 max_failure_limit; /* # failed frames before new search */
363 u32 max_success_limit; /* # successful frames before new search */
364 u32 table_count;
365 u32 total_failed; /* total failed frames, any/all rates */
366 u32 total_success; /* total successful frames, any/all rates */
367 u64 flush_timer; /* time staying in mode before new search */
368
369 u8 action_counter; /* # mode-switch actions tried */
370 u8 is_green;
371 u8 is_dup;
372 enum ieee80211_band band;
373
374 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
375 u32 supp_rates;
376 u16 active_legacy_rate;
377 u16 active_siso_rate;
378 u16 active_mimo2_rate;
379 s8 max_rate_idx; /* Max rate set by user */
380 u8 missed_rate_counter;
381
382 struct iwl_link_quality_cmd lq;
383 struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
384 struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
385 u8 tx_agg_tid_en;
386#ifdef CONFIG_MAC80211_DEBUGFS
387 struct dentry *rs_sta_dbgfs_scale_table_file;
388 struct dentry *rs_sta_dbgfs_stats_table_file;
389 struct dentry *rs_sta_dbgfs_rate_scale_data_file;
390 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
391 u32 dbg_fixed_rate;
392#endif
393 struct iwl_priv *drv;
394
395 /* used to be in sta_info */
396 int last_txrate_idx;
397 /* last tx rate_n_flags */
398 u32 last_rate_n_flags;
399 /* packets destined for this STA are aggregated */
400 u8 is_agg;
401};
402
403static inline u8 iwl4965_num_of_ant(u8 mask)
404{
405 return !!((mask) & ANT_A) +
406 !!((mask) & ANT_B) +
407 !!((mask) & ANT_C);
408}
409
410static inline u8 iwl4965_first_antenna(u8 mask)
411{
412 if (mask & ANT_A)
413 return ANT_A;
414 if (mask & ANT_B)
415 return ANT_B;
416 return ANT_C;
417}
418
419
420/**
421 * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info
422 *
423 * The specific throughput table used is based on the type of network
424 * the associated with, including A, B, G, and G w/ TGG protection
425 */
426extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
427
428/* Initialize station's rate scaling information after adding station */
429extern void iwl4965_rs_rate_init(struct iwl_priv *priv,
430 struct ieee80211_sta *sta, u8 sta_id);
431extern void iwl3945_rs_rate_init(struct iwl_priv *priv,
432 struct ieee80211_sta *sta, u8 sta_id);
433
434/**
435 * iwl_rate_control_register - Register the rate control algorithm callbacks
436 *
437 * Since the rate control algorithm is hardware specific, there is no need
438 * or reason to place it as a stand alone module. The driver can call
439 * iwl_rate_control_register in order to register the rate control callbacks
440 * with the mac80211 subsystem. This should be performed prior to calling
441 * ieee80211_register_hw
442 *
443 */
444extern int iwl4965_rate_control_register(void);
445extern int iwl3945_rate_control_register(void);
446
447/**
448 * iwl_rate_control_unregister - Unregister the rate control callbacks
449 *
450 * This should be called after calling ieee80211_unregister_hw, but before
451 * the driver is unloaded.
452 */
453extern void iwl4965_rate_control_unregister(void);
454extern void iwl3945_rate_control_unregister(void);
455
456#endif /* __iwl_legacy_rs__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.c b/drivers/net/wireless/iwlegacy/iwl-power.c
new file mode 100644
index 000000000000..903ef0d6d6cb
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-power.c
@@ -0,0 +1,165 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/slab.h>
33#include <linux/init.h>
34
35#include <net/mac80211.h>
36
37#include "iwl-eeprom.h"
38#include "iwl-dev.h"
39#include "iwl-core.h"
40#include "iwl-io.h"
41#include "iwl-commands.h"
42#include "iwl-debug.h"
43#include "iwl-power.h"
44
45/*
46 * Setting power level allows the card to go to sleep when not busy.
47 *
48 * We calculate a sleep command based on the required latency, which
49 * we get from mac80211. In order to handle thermal throttling, we can
50 * also use pre-defined power levels.
51 */
52
53/*
54 * This defines the old power levels. They are still used by default
55 * (level 1) and for thermal throttle (levels 3 through 5)
56 */
57
58struct iwl_power_vec_entry {
59 struct iwl_powertable_cmd cmd;
60 u8 no_dtim; /* number of skip dtim */
61};
62
63static void iwl_legacy_power_sleep_cam_cmd(struct iwl_priv *priv,
64 struct iwl_powertable_cmd *cmd)
65{
66 memset(cmd, 0, sizeof(*cmd));
67
68 if (priv->power_data.pci_pm)
69 cmd->flags |= IWL_POWER_PCI_PM_MSK;
70
71 IWL_DEBUG_POWER(priv, "Sleep command for CAM\n");
72}
73
74static int
75iwl_legacy_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
76{
77 IWL_DEBUG_POWER(priv, "Sending power/sleep command\n");
78 IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags);
79 IWL_DEBUG_POWER(priv, "Tx timeout = %u\n",
80 le32_to_cpu(cmd->tx_data_timeout));
81 IWL_DEBUG_POWER(priv, "Rx timeout = %u\n",
82 le32_to_cpu(cmd->rx_data_timeout));
83 IWL_DEBUG_POWER(priv,
84 "Sleep interval vector = { %d , %d , %d , %d , %d }\n",
85 le32_to_cpu(cmd->sleep_interval[0]),
86 le32_to_cpu(cmd->sleep_interval[1]),
87 le32_to_cpu(cmd->sleep_interval[2]),
88 le32_to_cpu(cmd->sleep_interval[3]),
89 le32_to_cpu(cmd->sleep_interval[4]));
90
91 return iwl_legacy_send_cmd_pdu(priv, POWER_TABLE_CMD,
92 sizeof(struct iwl_powertable_cmd), cmd);
93}
94
95int
96iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
97 bool force)
98{
99 int ret;
100 bool update_chains;
101
102 lockdep_assert_held(&priv->mutex);
103
104 /* Don't update the RX chain when chain noise calibration is running */
105 update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
106 priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
107
108 if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
109 return 0;
110
111 if (!iwl_legacy_is_ready_rf(priv))
112 return -EIO;
113
114 /* scan complete use sleep_power_next, need to be updated */
115 memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
116 if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
117 IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
118 return 0;
119 }
120
121 if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
122 set_bit(STATUS_POWER_PMI, &priv->status);
123
124 ret = iwl_legacy_set_power(priv, cmd);
125 if (!ret) {
126 if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
127 clear_bit(STATUS_POWER_PMI, &priv->status);
128
129 if (priv->cfg->ops->lib->update_chain_flags && update_chains)
130 priv->cfg->ops->lib->update_chain_flags(priv);
131 else if (priv->cfg->ops->lib->update_chain_flags)
132 IWL_DEBUG_POWER(priv,
133 "Cannot update the power, chain noise "
134 "calibration running: %d\n",
135 priv->chain_noise_data.state);
136
137 memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
138 } else
139 IWL_ERR(priv, "set power fail, ret = %d", ret);
140
141 return ret;
142}
143
144int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force)
145{
146 struct iwl_powertable_cmd cmd;
147
148 iwl_legacy_power_sleep_cam_cmd(priv, &cmd);
149 return iwl_legacy_power_set_mode(priv, &cmd, force);
150}
151EXPORT_SYMBOL(iwl_legacy_power_update_mode);
152
153/* initialize to default */
154void iwl_legacy_power_initialize(struct iwl_priv *priv)
155{
156 u16 lctl = iwl_legacy_pcie_link_ctl(priv);
157
158 priv->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
159
160 priv->power_data.debug_sleep_level_override = -1;
161
162 memset(&priv->power_data.sleep_cmd, 0,
163 sizeof(priv->power_data.sleep_cmd));
164}
165EXPORT_SYMBOL(iwl_legacy_power_initialize);
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.h b/drivers/net/wireless/iwlegacy/iwl-power.h
new file mode 100644
index 000000000000..d30b36acdc4a
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-power.h
@@ -0,0 +1,55 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#ifndef __iwl_legacy_power_setting_h__
29#define __iwl_legacy_power_setting_h__
30
31#include "iwl-commands.h"
32
33enum iwl_power_level {
34 IWL_POWER_INDEX_1,
35 IWL_POWER_INDEX_2,
36 IWL_POWER_INDEX_3,
37 IWL_POWER_INDEX_4,
38 IWL_POWER_INDEX_5,
39 IWL_POWER_NUM
40};
41
42struct iwl_power_mgr {
43 struct iwl_powertable_cmd sleep_cmd;
44 struct iwl_powertable_cmd sleep_cmd_next;
45 int debug_sleep_level_override;
46 bool pci_pm;
47};
48
49int
50iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
51 bool force);
52int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force);
53void iwl_legacy_power_initialize(struct iwl_priv *priv);
54
55#endif /* __iwl_legacy_power_setting_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-prph.h b/drivers/net/wireless/iwlegacy/iwl-prph.h
new file mode 100644
index 000000000000..30a493003ab0
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-prph.h
@@ -0,0 +1,523 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_legacy_prph_h__
64#define __iwl_legacy_prph_h__
65
66/*
67 * Registers in this file are internal, not PCI bus memory mapped.
68 * Driver accesses these via HBUS_TARG_PRPH_* registers.
69 */
70#define PRPH_BASE (0x00000)
71#define PRPH_END (0xFFFFF)
72
73/* APMG (power management) constants */
74#define APMG_BASE (PRPH_BASE + 0x3000)
75#define APMG_CLK_CTRL_REG (APMG_BASE + 0x0000)
76#define APMG_CLK_EN_REG (APMG_BASE + 0x0004)
77#define APMG_CLK_DIS_REG (APMG_BASE + 0x0008)
78#define APMG_PS_CTRL_REG (APMG_BASE + 0x000c)
79#define APMG_PCIDEV_STT_REG (APMG_BASE + 0x0010)
80#define APMG_RFKILL_REG (APMG_BASE + 0x0014)
81#define APMG_RTC_INT_STT_REG (APMG_BASE + 0x001c)
82#define APMG_RTC_INT_MSK_REG (APMG_BASE + 0x0020)
83#define APMG_DIGITAL_SVR_REG (APMG_BASE + 0x0058)
84#define APMG_ANALOG_SVR_REG (APMG_BASE + 0x006C)
85
86#define APMS_CLK_VAL_MRB_FUNC_MODE (0x00000001)
87#define APMG_CLK_VAL_DMA_CLK_RQT (0x00000200)
88#define APMG_CLK_VAL_BSM_CLK_RQT (0x00000800)
89
90#define APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS (0x00400000)
91#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000)
92#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000)
93#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000)
94#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */
95#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000)
96#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */
97#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060)
98
99#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
100
101/**
102 * BSM (Bootstrap State Machine)
103 *
104 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
105 * in special SRAM that does not power down when the embedded control
106 * processor is sleeping (e.g. for periodic power-saving shutdowns of radio).
107 *
108 * When powering back up after sleeps (or during initial uCode load), the BSM
109 * internally loads the short bootstrap program from the special SRAM into the
110 * embedded processor's instruction SRAM, and starts the processor so it runs
111 * the bootstrap program.
112 *
113 * This bootstrap program loads (via PCI busmaster DMA) instructions and data
114 * images for a uCode program from host DRAM locations. The host driver
115 * indicates DRAM locations and sizes for instruction and data images via the
116 * four BSM_DRAM_* registers. Once the bootstrap program loads the new program,
117 * the new program starts automatically.
118 *
119 * The uCode used for open-source drivers includes two programs:
120 *
121 * 1) Initialization -- performs hardware calibration and sets up some
122 * internal data, then notifies host via "initialize alive" notification
123 * (struct iwl_init_alive_resp) that it has completed all of its work.
124 * After signal from host, it then loads and starts the runtime program.
125 * The initialization program must be used when initially setting up the
126 * NIC after loading the driver.
127 *
128 * 2) Runtime/Protocol -- performs all normal runtime operations. This
129 * notifies host via "alive" notification (struct iwl_alive_resp) that it
130 * is ready to be used.
131 *
132 * When initializing the NIC, the host driver does the following procedure:
133 *
134 * 1) Load bootstrap program (instructions only, no data image for bootstrap)
135 * into bootstrap memory. Use dword writes starting at BSM_SRAM_LOWER_BOUND
136 *
137 * 2) Point (via BSM_DRAM_*) to the "initialize" uCode data and instruction
138 * images in host DRAM.
139 *
140 * 3) Set up BSM to copy from BSM SRAM into uCode instruction SRAM when asked:
141 * BSM_WR_MEM_SRC_REG = 0
142 * BSM_WR_MEM_DST_REG = RTC_INST_LOWER_BOUND
143 * BSM_WR_MEM_DWCOUNT_REG = # dwords in bootstrap instruction image
144 *
145 * 4) Load bootstrap into instruction SRAM:
146 * BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START
147 *
148 * 5) Wait for load completion:
149 * Poll BSM_WR_CTRL_REG for BSM_WR_CTRL_REG_BIT_START = 0
150 *
151 * 6) Enable future boot loads whenever NIC's power management triggers it:
152 * BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START_EN
153 *
154 * 7) Start the NIC by removing all reset bits:
155 * CSR_RESET = 0
156 *
157 * The bootstrap uCode (already in instruction SRAM) loads initialization
158 * uCode. Initialization uCode performs data initialization, sends
159 * "initialize alive" notification to host, and waits for a signal from
160 * host to load runtime code.
161 *
162 * 4) Point (via BSM_DRAM_*) to the "runtime" uCode data and instruction
163 * images in host DRAM. The last register loaded must be the instruction
164 * byte count register ("1" in MSbit tells initialization uCode to load
165 * the runtime uCode):
166 * BSM_DRAM_INST_BYTECOUNT_REG = byte count | BSM_DRAM_INST_LOAD
167 *
168 * 5) Wait for "alive" notification, then issue normal runtime commands.
169 *
170 * Data caching during power-downs:
171 *
172 * Just before the embedded controller powers down (e.g for automatic
173 * power-saving modes, or for RFKILL), uCode stores (via PCI busmaster DMA)
174 * a current snapshot of the embedded processor's data SRAM into host DRAM.
175 * This caches the data while the embedded processor's memory is powered down.
176 * Location and size are controlled by BSM_DRAM_DATA_* registers.
177 *
178 * NOTE: Instruction SRAM does not need to be saved, since that doesn't
179 * change during operation; the original image (from uCode distribution
180 * file) can be used for reload.
181 *
182 * When powering back up, the BSM loads the bootstrap program. Bootstrap looks
183 * at the BSM_DRAM_* registers, which now point to the runtime instruction
184 * image and the cached (modified) runtime data (*not* the initialization
185 * uCode). Bootstrap reloads these runtime images into SRAM, and restarts the
186 * uCode from where it left off before the power-down.
187 *
188 * NOTE: Initialization uCode does *not* run as part of the save/restore
189 * procedure.
190 *
191 * This save/restore method is mostly for autonomous power management during
192 * normal operation (result of POWER_TABLE_CMD). Platform suspend/resume and
193 * RFKILL should use complete restarts (with total re-initialization) of uCode,
194 * allowing total shutdown (including BSM memory).
195 *
196 * Note that, during normal operation, the host DRAM that held the initial
197 * startup data for the runtime code is now being used as a backup data cache
198 * for modified data! If you need to completely re-initialize the NIC, make
199 * sure that you use the runtime data image from the uCode distribution file,
200 * not the modified/saved runtime data. You may want to store a separate
201 * "clean" runtime data image in DRAM to avoid disk reads of distribution file.
202 */
203
204/* BSM bit fields */
205#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */
206#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup*/
207#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */
208
209/* BSM addresses */
210#define BSM_BASE (PRPH_BASE + 0x3400)
211#define BSM_END (PRPH_BASE + 0x3800)
212
213#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */
214#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */
215#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */
216#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */
217#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */
218
219/*
220 * Pointers and size regs for bootstrap load and data SRAM save/restore.
221 * NOTE: 3945 pointers use bits 31:0 of DRAM address.
222 * 4965 pointers use bits 35:4 of DRAM address.
223 */
224#define BSM_DRAM_INST_PTR_REG (BSM_BASE + 0x090)
225#define BSM_DRAM_INST_BYTECOUNT_REG (BSM_BASE + 0x094)
226#define BSM_DRAM_DATA_PTR_REG (BSM_BASE + 0x098)
227#define BSM_DRAM_DATA_BYTECOUNT_REG (BSM_BASE + 0x09C)
228
229/*
230 * BSM special memory, stays powered on during power-save sleeps.
231 * Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1)
232 */
233#define BSM_SRAM_LOWER_BOUND (PRPH_BASE + 0x3800)
234#define BSM_SRAM_SIZE (1024) /* bytes */
235
236
237/* 3945 Tx scheduler registers */
238#define ALM_SCD_BASE (PRPH_BASE + 0x2E00)
239#define ALM_SCD_MODE_REG (ALM_SCD_BASE + 0x000)
240#define ALM_SCD_ARASTAT_REG (ALM_SCD_BASE + 0x004)
241#define ALM_SCD_TXFACT_REG (ALM_SCD_BASE + 0x010)
242#define ALM_SCD_TXF4MF_REG (ALM_SCD_BASE + 0x014)
243#define ALM_SCD_TXF5MF_REG (ALM_SCD_BASE + 0x020)
244#define ALM_SCD_SBYP_MODE_1_REG (ALM_SCD_BASE + 0x02C)
245#define ALM_SCD_SBYP_MODE_2_REG (ALM_SCD_BASE + 0x030)
246
247/**
248 * Tx Scheduler
249 *
250 * The Tx Scheduler selects the next frame to be transmitted, choosing TFDs
251 * (Transmit Frame Descriptors) from up to 16 circular Tx queues resident in
252 * host DRAM. It steers each frame's Tx command (which contains the frame
253 * data) into one of up to 7 prioritized Tx DMA FIFO channels within the
254 * device. A queue maps to only one (selectable by driver) Tx DMA channel,
255 * but one DMA channel may take input from several queues.
256 *
257 * Tx DMA FIFOs have dedicated purposes. For 4965, they are used as follows
258 * (cf. default_queue_to_tx_fifo in iwl-4965.c):
259 *
260 * 0 -- EDCA BK (background) frames, lowest priority
261 * 1 -- EDCA BE (best effort) frames, normal priority
262 * 2 -- EDCA VI (video) frames, higher priority
263 * 3 -- EDCA VO (voice) and management frames, highest priority
264 * 4 -- Commands (e.g. RXON, etc.)
265 * 5 -- unused (HCCA)
266 * 6 -- unused (HCCA)
267 * 7 -- not used by driver (device-internal only)
268 *
269 *
270 * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6.
271 * In addition, driver can map the remaining queues to Tx DMA/FIFO
272 * channels 0-3 to support 11n aggregation via EDCA DMA channels.
273 *
274 * The driver sets up each queue to work in one of two modes:
275 *
276 * 1) Scheduler-Ack, in which the scheduler automatically supports a
277 * block-ack (BA) window of up to 64 TFDs. In this mode, each queue
278 * contains TFDs for a unique combination of Recipient Address (RA)
279 * and Traffic Identifier (TID), that is, traffic of a given
280 * Quality-Of-Service (QOS) priority, destined for a single station.
281 *
282 * In scheduler-ack mode, the scheduler keeps track of the Tx status of
283 * each frame within the BA window, including whether it's been transmitted,
284 * and whether it's been acknowledged by the receiving station. The device
285 * automatically processes block-acks received from the receiving STA,
286 * and reschedules un-acked frames to be retransmitted (successful
287 * Tx completion may end up being out-of-order).
288 *
289 * The driver must maintain the queue's Byte Count table in host DRAM
290 * (struct iwl4965_sched_queue_byte_cnt_tbl) for this mode.
291 * This mode does not support fragmentation.
292 *
293 * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order.
294 * The device may automatically retry Tx, but will retry only one frame
295 * at a time, until receiving ACK from receiving station, or reaching
296 * retry limit and giving up.
297 *
298 * The command queue (#4/#9) must use this mode!
299 * This mode does not require use of the Byte Count table in host DRAM.
300 *
301 * Driver controls scheduler operation via 3 means:
302 * 1) Scheduler registers
303 * 2) Shared scheduler data base in internal 4956 SRAM
304 * 3) Shared data in host DRAM
305 *
306 * Initialization:
307 *
308 * When loading, driver should allocate memory for:
309 * 1) 16 TFD circular buffers, each with space for (typically) 256 TFDs.
310 * 2) 16 Byte Count circular buffers in 16 KBytes contiguous memory
311 * (1024 bytes for each queue).
312 *
313 * After receiving "Alive" response from uCode, driver must initialize
314 * the scheduler (especially for queue #4/#9, the command queue, otherwise
315 * the driver can't issue commands!):
316 */
317
318/**
319 * Max Tx window size is the max number of contiguous TFDs that the scheduler
320 * can keep track of at one time when creating block-ack chains of frames.
321 * Note that "64" matches the number of ack bits in a block-ack packet.
322 * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize
323 * IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) values.
324 */
325#define SCD_WIN_SIZE 64
326#define SCD_FRAME_LIMIT 64
327
328/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */
329#define IWL49_SCD_START_OFFSET 0xa02c00
330
331/*
332 * 4965 tells driver SRAM address for internal scheduler structs via this reg.
333 * Value is valid only after "Alive" response from uCode.
334 */
335#define IWL49_SCD_SRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x0)
336
337/*
338 * Driver may need to update queue-empty bits after changing queue's
339 * write and read pointers (indexes) during (re-)initialization (i.e. when
340 * scheduler is not tracking what's happening).
341 * Bit fields:
342 * 31-16: Write mask -- 1: update empty bit, 0: don't change empty bit
343 * 15-00: Empty state, one for each queue -- 1: empty, 0: non-empty
344 * NOTE: This register is not used by Linux driver.
345 */
346#define IWL49_SCD_EMPTY_BITS (IWL49_SCD_START_OFFSET + 0x4)
347
348/*
349 * Physical base address of array of byte count (BC) circular buffers (CBs).
350 * Each Tx queue has a BC CB in host DRAM to support Scheduler-ACK mode.
351 * This register points to BC CB for queue 0, must be on 1024-byte boundary.
352 * Others are spaced by 1024 bytes.
353 * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad.
354 * (Index into a queue's BC CB) = (index into queue's TFD CB) = (SSN & 0xff).
355 * Bit fields:
356 * 25-00: Byte Count CB physical address [35:10], must be 1024-byte aligned.
357 */
358#define IWL49_SCD_DRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x10)
359
360/*
361 * Enables any/all Tx DMA/FIFO channels.
362 * Scheduler generates requests for only the active channels.
363 * Set this to 0xff to enable all 8 channels (normal usage).
364 * Bit fields:
365 * 7- 0: Enable (1), disable (0), one bit for each channel 0-7
366 */
367#define IWL49_SCD_TXFACT (IWL49_SCD_START_OFFSET + 0x1c)
368/*
369 * Queue (x) Write Pointers (indexes, really!), one for each Tx queue.
370 * Initialized and updated by driver as new TFDs are added to queue.
371 * NOTE: If using Block Ack, index must correspond to frame's
372 * Start Sequence Number; index = (SSN & 0xff)
373 * NOTE: Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses?
374 */
375#define IWL49_SCD_QUEUE_WRPTR(x) (IWL49_SCD_START_OFFSET + 0x24 + (x) * 4)
376
377/*
378 * Queue (x) Read Pointers (indexes, really!), one for each Tx queue.
379 * For FIFO mode, index indicates next frame to transmit.
380 * For Scheduler-ACK mode, index indicates first frame in Tx window.
381 * Initialized by driver, updated by scheduler.
382 */
383#define IWL49_SCD_QUEUE_RDPTR(x) (IWL49_SCD_START_OFFSET + 0x64 + (x) * 4)
384
385/*
386 * Select which queues work in chain mode (1) vs. not (0).
387 * Use chain mode to build chains of aggregated frames.
388 * Bit fields:
389 * 31-16: Reserved
390 * 15-00: Mode, one bit for each queue -- 1: Chain mode, 0: one-at-a-time
391 * NOTE: If driver sets up queue for chain mode, it should be also set up
392 * Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x).
393 */
394#define IWL49_SCD_QUEUECHAIN_SEL (IWL49_SCD_START_OFFSET + 0xd0)
395
396/*
397 * Select which queues interrupt driver when scheduler increments
398 * a queue's read pointer (index).
399 * Bit fields:
400 * 31-16: Reserved
401 * 15-00: Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled
402 * NOTE: This functionality is apparently a no-op; driver relies on interrupts
403 * from Rx queue to read Tx command responses and update Tx queues.
404 */
405#define IWL49_SCD_INTERRUPT_MASK (IWL49_SCD_START_OFFSET + 0xe4)
406
407/*
408 * Queue search status registers. One for each queue.
409 * Sets up queue mode and assigns queue to Tx DMA channel.
410 * Bit fields:
411 * 19-10: Write mask/enable bits for bits 0-9
412 * 9: Driver should init to "0"
413 * 8: Scheduler-ACK mode (1), non-Scheduler-ACK (i.e. FIFO) mode (0).
414 * Driver should init to "1" for aggregation mode, or "0" otherwise.
415 * 7-6: Driver should init to "0"
416 * 5: Window Size Left; indicates whether scheduler can request
417 * another TFD, based on window size, etc. Driver should init
418 * this bit to "1" for aggregation mode, or "0" for non-agg.
419 * 4-1: Tx FIFO to use (range 0-7).
420 * 0: Queue is active (1), not active (0).
421 * Other bits should be written as "0"
422 *
423 * NOTE: If enabling Scheduler-ACK mode, chain mode should also be enabled
424 * via SCD_QUEUECHAIN_SEL.
425 */
426#define IWL49_SCD_QUEUE_STATUS_BITS(x)\
427 (IWL49_SCD_START_OFFSET + 0x104 + (x) * 4)
428
429/* Bit field positions */
430#define IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0)
431#define IWL49_SCD_QUEUE_STTS_REG_POS_TXF (1)
432#define IWL49_SCD_QUEUE_STTS_REG_POS_WSL (5)
433#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8)
434
435/* Write masks */
436#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10)
437#define IWL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00)
438
439/**
440 * 4965 internal SRAM structures for scheduler, shared with driver ...
441 *
442 * Driver should clear and initialize the following areas after receiving
443 * "Alive" response from 4965 uCode, i.e. after initial
444 * uCode load, or after a uCode load done for error recovery:
445 *
446 * SCD_CONTEXT_DATA_OFFSET (size 128 bytes)
447 * SCD_TX_STTS_BITMAP_OFFSET (size 256 bytes)
448 * SCD_TRANSLATE_TBL_OFFSET (size 32 bytes)
449 *
450 * Driver accesses SRAM via HBUS_TARG_MEM_* registers.
451 * Driver reads base address of this scheduler area from SCD_SRAM_BASE_ADDR.
452 * All OFFSET values must be added to this base address.
453 */
454
455/*
456 * Queue context. One 8-byte entry for each of 16 queues.
457 *
458 * Driver should clear this entire area (size 0x80) to 0 after receiving
459 * "Alive" notification from uCode. Additionally, driver should init
460 * each queue's entry as follows:
461 *
462 * LS Dword bit fields:
463 * 0-06: Max Tx window size for Scheduler-ACK. Driver should init to 64.
464 *
465 * MS Dword bit fields:
466 * 16-22: Frame limit. Driver should init to 10 (0xa).
467 *
468 * Driver should init all other bits to 0.
469 *
470 * Init must be done after driver receives "Alive" response from 4965 uCode,
471 * and when setting up queue for aggregation.
472 */
473#define IWL49_SCD_CONTEXT_DATA_OFFSET 0x380
474#define IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) \
475 (IWL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
476
477#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0)
478#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F)
479#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
480#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
481
482/*
483 * Tx Status Bitmap
484 *
485 * Driver should clear this entire area (size 0x100) to 0 after receiving
486 * "Alive" notification from uCode. Area is used only by device itself;
487 * no other support (besides clearing) is required from driver.
488 */
489#define IWL49_SCD_TX_STTS_BITMAP_OFFSET 0x400
490
491/*
492 * RAxTID to queue translation mapping.
493 *
494 * When queue is in Scheduler-ACK mode, frames placed in a that queue must be
495 * for only one combination of receiver address (RA) and traffic ID (TID), i.e.
496 * one QOS priority level destined for one station (for this wireless link,
497 * not final destination). The SCD_TRANSLATE_TABLE area provides 16 16-bit
498 * mappings, one for each of the 16 queues. If queue is not in Scheduler-ACK
499 * mode, the device ignores the mapping value.
500 *
501 * Bit fields, for each 16-bit map:
502 * 15-9: Reserved, set to 0
503 * 8-4: Index into device's station table for recipient station
504 * 3-0: Traffic ID (tid), range 0-15
505 *
506 * Driver should clear this entire area (size 32 bytes) to 0 after receiving
507 * "Alive" notification from uCode. To update a 16-bit map value, driver
508 * must read a dword-aligned value from device SRAM, replace the 16-bit map
509 * value of interest, and write the dword value back into device SRAM.
510 */
511#define IWL49_SCD_TRANSLATE_TBL_OFFSET 0x500
512
513/* Find translation table dword to read/write for given queue */
514#define IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
515 ((IWL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
516
517#define IWL_SCD_TXFIFO_POS_TID (0)
518#define IWL_SCD_TXFIFO_POS_RA (4)
519#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
520
521/*********************** END TX SCHEDULER *************************************/
522
523#endif /* __iwl_legacy_prph_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-rx.c b/drivers/net/wireless/iwlegacy/iwl-rx.c
new file mode 100644
index 000000000000..654cf233a384
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-rx.c
@@ -0,0 +1,302 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/etherdevice.h>
31#include <linux/slab.h>
32#include <net/mac80211.h>
33#include <asm/unaligned.h>
34#include "iwl-eeprom.h"
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-helpers.h"
40/************************** RX-FUNCTIONS ****************************/
41/*
42 * Rx theory of operation
43 *
44 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
45 * each of which point to Receive Buffers to be filled by the NIC. These get
46 * used not only for Rx frames, but for any command response or notification
47 * from the NIC. The driver and NIC manage the Rx buffers by means
48 * of indexes into the circular buffer.
49 *
50 * Rx Queue Indexes
51 * The host/firmware share two index registers for managing the Rx buffers.
52 *
53 * The READ index maps to the first position that the firmware may be writing
54 * to -- the driver can read up to (but not including) this position and get
55 * good data.
56 * The READ index is managed by the firmware once the card is enabled.
57 *
58 * The WRITE index maps to the last position the driver has read from -- the
59 * position preceding WRITE is the last slot the firmware can place a packet.
60 *
61 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
62 * WRITE = READ.
63 *
64 * During initialization, the host sets up the READ queue position to the first
65 * INDEX position, and WRITE to the last (READ - 1 wrapped)
66 *
67 * When the firmware places a packet in a buffer, it will advance the READ index
68 * and fire the RX interrupt. The driver can then query the READ index and
69 * process as many packets as possible, moving the WRITE index forward as it
70 * resets the Rx queue buffers with new memory.
71 *
72 * The management in the driver is as follows:
73 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
74 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
75 * to replenish the iwl->rxq->rx_free.
76 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
77 * iwl->rxq is replenished and the READ INDEX is updated (updating the
78 * 'processed' and 'read' driver indexes as well)
79 * + A received packet is processed and handed to the kernel network stack,
80 * detached from the iwl->rxq. The driver 'processed' index is updated.
81 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
82 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
83 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
84 * were enough free buffers and RX_STALLED is set it is cleared.
85 *
86 *
87 * Driver sequence:
88 *
89 * iwl_legacy_rx_queue_alloc() Allocates rx_free
90 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
91 * iwl_rx_queue_restock
92 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
93 * queue, updates firmware pointers, and updates
94 * the WRITE index. If insufficient rx_free buffers
95 * are available, schedules iwl_rx_replenish
96 *
97 * -- enable interrupts --
98 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
99 * READ INDEX, detaching the SKB from the pool.
100 * Moves the packet buffer from queue to rx_used.
101 * Calls iwl_rx_queue_restock to refill any empty
102 * slots.
103 * ...
104 *
105 */
106
107/**
108 * iwl_legacy_rx_queue_space - Return number of free slots available in queue.
109 */
110int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q)
111{
112 int s = q->read - q->write;
113 if (s <= 0)
114 s += RX_QUEUE_SIZE;
115 /* keep some buffer to not confuse full and empty queue */
116 s -= 2;
117 if (s < 0)
118 s = 0;
119 return s;
120}
121EXPORT_SYMBOL(iwl_legacy_rx_queue_space);
122
123/**
124 * iwl_legacy_rx_queue_update_write_ptr - Update the write pointer for the RX queue
125 */
126void
127iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
128 struct iwl_rx_queue *q)
129{
130 unsigned long flags;
131 u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
132 u32 reg;
133
134 spin_lock_irqsave(&q->lock, flags);
135
136 if (q->need_update == 0)
137 goto exit_unlock;
138
139 /* If power-saving is in use, make sure device is awake */
140 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
141 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
142
143 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
144 IWL_DEBUG_INFO(priv,
145 "Rx queue requesting wakeup,"
146 " GP1 = 0x%x\n", reg);
147 iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
148 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
149 goto exit_unlock;
150 }
151
152 q->write_actual = (q->write & ~0x7);
153 iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
154 q->write_actual);
155
156 /* Else device is assumed to be awake */
157 } else {
158 /* Device expects a multiple of 8 */
159 q->write_actual = (q->write & ~0x7);
160 iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
161 q->write_actual);
162 }
163
164 q->need_update = 0;
165
166 exit_unlock:
167 spin_unlock_irqrestore(&q->lock, flags);
168}
169EXPORT_SYMBOL(iwl_legacy_rx_queue_update_write_ptr);
170
171int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv)
172{
173 struct iwl_rx_queue *rxq = &priv->rxq;
174 struct device *dev = &priv->pci_dev->dev;
175 int i;
176
177 spin_lock_init(&rxq->lock);
178 INIT_LIST_HEAD(&rxq->rx_free);
179 INIT_LIST_HEAD(&rxq->rx_used);
180
181 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
182 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
183 GFP_KERNEL);
184 if (!rxq->bd)
185 goto err_bd;
186
187 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
188 &rxq->rb_stts_dma, GFP_KERNEL);
189 if (!rxq->rb_stts)
190 goto err_rb;
191
192 /* Fill the rx_used queue with _all_ of the Rx buffers */
193 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
194 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
195
196 /* Set us so that we have processed and used all buffers, but have
197 * not restocked the Rx queue with fresh buffers */
198 rxq->read = rxq->write = 0;
199 rxq->write_actual = 0;
200 rxq->free_count = 0;
201 rxq->need_update = 0;
202 return 0;
203
204err_rb:
205 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
206 rxq->bd_dma);
207err_bd:
208 return -ENOMEM;
209}
210EXPORT_SYMBOL(iwl_legacy_rx_queue_alloc);
211
212
213void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
214 struct iwl_rx_mem_buffer *rxb)
215{
216 struct iwl_rx_packet *pkt = rxb_addr(rxb);
217 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
218
219 if (!report->state) {
220 IWL_DEBUG_11H(priv,
221 "Spectrum Measure Notification: Start\n");
222 return;
223 }
224
225 memcpy(&priv->measure_report, report, sizeof(*report));
226 priv->measurement_status |= MEASUREMENT_READY;
227}
228EXPORT_SYMBOL(iwl_legacy_rx_spectrum_measure_notif);
229
230void iwl_legacy_recover_from_statistics(struct iwl_priv *priv,
231 struct iwl_rx_packet *pkt)
232{
233 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
234 return;
235 if (iwl_legacy_is_any_associated(priv)) {
236 if (priv->cfg->ops->lib->check_plcp_health) {
237 if (!priv->cfg->ops->lib->check_plcp_health(
238 priv, pkt)) {
239 /*
240 * high plcp error detected
241 * reset Radio
242 */
243 iwl_legacy_force_reset(priv,
244 IWL_RF_RESET, false);
245 }
246 }
247 }
248}
249EXPORT_SYMBOL(iwl_legacy_recover_from_statistics);
250
251/*
252 * returns non-zero if packet should be dropped
253 */
254int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
255 struct ieee80211_hdr *hdr,
256 u32 decrypt_res,
257 struct ieee80211_rx_status *stats)
258{
259 u16 fc = le16_to_cpu(hdr->frame_control);
260
261 /*
262 * All contexts have the same setting here due to it being
263 * a module parameter, so OK to check any context.
264 */
265 if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
266 RXON_FILTER_DIS_DECRYPT_MSK)
267 return 0;
268
269 if (!(fc & IEEE80211_FCTL_PROTECTED))
270 return 0;
271
272 IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
273 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
274 case RX_RES_STATUS_SEC_TYPE_TKIP:
275 /* The uCode has got a bad phase 1 Key, pushes the packet.
276 * Decryption will be done in SW. */
277 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
278 RX_RES_STATUS_BAD_KEY_TTAK)
279 break;
280
281 case RX_RES_STATUS_SEC_TYPE_WEP:
282 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
283 RX_RES_STATUS_BAD_ICV_MIC) {
284 /* bad ICV, the packet is destroyed since the
285 * decryption is inplace, drop it */
286 IWL_DEBUG_RX(priv, "Packet destroyed\n");
287 return -1;
288 }
289 case RX_RES_STATUS_SEC_TYPE_CCMP:
290 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
291 RX_RES_STATUS_DECRYPT_OK) {
292 IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
293 stats->flag |= RX_FLAG_DECRYPTED;
294 }
295 break;
296
297 default:
298 break;
299 }
300 return 0;
301}
302EXPORT_SYMBOL(iwl_legacy_set_decrypted_flag);
diff --git a/drivers/net/wireless/iwlegacy/iwl-scan.c b/drivers/net/wireless/iwlegacy/iwl-scan.c
new file mode 100644
index 000000000000..60f597f796ca
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-scan.c
@@ -0,0 +1,625 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#include <linux/slab.h>
29#include <linux/types.h>
30#include <linux/etherdevice.h>
31#include <net/mac80211.h>
32
33#include "iwl-eeprom.h"
34#include "iwl-dev.h"
35#include "iwl-core.h"
36#include "iwl-sta.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39
40/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
41 * sending probe req. This should be set long enough to hear probe responses
42 * from more than one AP. */
43#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
44#define IWL_ACTIVE_DWELL_TIME_52 (20)
45
46#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
47#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
48
49/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
50 * Must be set longer than active dwell time.
51 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
52#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
53#define IWL_PASSIVE_DWELL_TIME_52 (10)
54#define IWL_PASSIVE_DWELL_BASE (100)
55#define IWL_CHANNEL_TUNE_TIME 5
56
57static int iwl_legacy_send_scan_abort(struct iwl_priv *priv)
58{
59 int ret;
60 struct iwl_rx_packet *pkt;
61 struct iwl_host_cmd cmd = {
62 .id = REPLY_SCAN_ABORT_CMD,
63 .flags = CMD_WANT_SKB,
64 };
65
66 /* Exit instantly with error when device is not ready
67 * to receive scan abort command or it does not perform
68 * hardware scan currently */
69 if (!test_bit(STATUS_READY, &priv->status) ||
70 !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
71 !test_bit(STATUS_SCAN_HW, &priv->status) ||
72 test_bit(STATUS_FW_ERROR, &priv->status) ||
73 test_bit(STATUS_EXIT_PENDING, &priv->status))
74 return -EIO;
75
76 ret = iwl_legacy_send_cmd_sync(priv, &cmd);
77 if (ret)
78 return ret;
79
80 pkt = (struct iwl_rx_packet *)cmd.reply_page;
81 if (pkt->u.status != CAN_ABORT_STATUS) {
82 /* The scan abort will return 1 for success or
83 * 2 for "failure". A failure condition can be
84 * due to simply not being in an active scan which
85 * can occur if we send the scan abort before we
86 * the microcode has notified us that a scan is
87 * completed. */
88 IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n", pkt->u.status);
89 ret = -EIO;
90 }
91
92 iwl_legacy_free_pages(priv, cmd.reply_page);
93 return ret;
94}
95
96static void iwl_legacy_complete_scan(struct iwl_priv *priv, bool aborted)
97{
98 /* check if scan was requested from mac80211 */
99 if (priv->scan_request) {
100 IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n");
101 ieee80211_scan_completed(priv->hw, aborted);
102 }
103
104 priv->is_internal_short_scan = false;
105 priv->scan_vif = NULL;
106 priv->scan_request = NULL;
107}
108
109void iwl_legacy_force_scan_end(struct iwl_priv *priv)
110{
111 lockdep_assert_held(&priv->mutex);
112
113 if (!test_bit(STATUS_SCANNING, &priv->status)) {
114 IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
115 return;
116 }
117
118 IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
119 clear_bit(STATUS_SCANNING, &priv->status);
120 clear_bit(STATUS_SCAN_HW, &priv->status);
121 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
122 iwl_legacy_complete_scan(priv, true);
123}
124
125static void iwl_legacy_do_scan_abort(struct iwl_priv *priv)
126{
127 int ret;
128
129 lockdep_assert_held(&priv->mutex);
130
131 if (!test_bit(STATUS_SCANNING, &priv->status)) {
132 IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
133 return;
134 }
135
136 if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
137 IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
138 return;
139 }
140
141 ret = iwl_legacy_send_scan_abort(priv);
142 if (ret) {
143 IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret);
144 iwl_legacy_force_scan_end(priv);
145 } else
146 IWL_DEBUG_SCAN(priv, "Sucessfully send scan abort\n");
147}
148
149/**
150 * iwl_scan_cancel - Cancel any currently executing HW scan
151 */
152int iwl_legacy_scan_cancel(struct iwl_priv *priv)
153{
154 IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
155 queue_work(priv->workqueue, &priv->abort_scan);
156 return 0;
157}
158EXPORT_SYMBOL(iwl_legacy_scan_cancel);
159
160/**
161 * iwl_legacy_scan_cancel_timeout - Cancel any currently executing HW scan
162 * @ms: amount of time to wait (in milliseconds) for scan to abort
163 *
164 */
165int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
166{
167 unsigned long timeout = jiffies + msecs_to_jiffies(ms);
168
169 lockdep_assert_held(&priv->mutex);
170
171 IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
172
173 iwl_legacy_do_scan_abort(priv);
174
175 while (time_before_eq(jiffies, timeout)) {
176 if (!test_bit(STATUS_SCAN_HW, &priv->status))
177 break;
178 msleep(20);
179 }
180
181 return test_bit(STATUS_SCAN_HW, &priv->status);
182}
183EXPORT_SYMBOL(iwl_legacy_scan_cancel_timeout);
184
185/* Service response to REPLY_SCAN_CMD (0x80) */
186static void iwl_legacy_rx_reply_scan(struct iwl_priv *priv,
187 struct iwl_rx_mem_buffer *rxb)
188{
189#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
190 struct iwl_rx_packet *pkt = rxb_addr(rxb);
191 struct iwl_scanreq_notification *notif =
192 (struct iwl_scanreq_notification *)pkt->u.raw;
193
194 IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
195#endif
196}
197
198/* Service SCAN_START_NOTIFICATION (0x82) */
199static void iwl_legacy_rx_scan_start_notif(struct iwl_priv *priv,
200 struct iwl_rx_mem_buffer *rxb)
201{
202 struct iwl_rx_packet *pkt = rxb_addr(rxb);
203 struct iwl_scanstart_notification *notif =
204 (struct iwl_scanstart_notification *)pkt->u.raw;
205 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
206 IWL_DEBUG_SCAN(priv, "Scan start: "
207 "%d [802.11%s] "
208 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
209 notif->channel,
210 notif->band ? "bg" : "a",
211 le32_to_cpu(notif->tsf_high),
212 le32_to_cpu(notif->tsf_low),
213 notif->status, notif->beacon_timer);
214}
215
216/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
217static void iwl_legacy_rx_scan_results_notif(struct iwl_priv *priv,
218 struct iwl_rx_mem_buffer *rxb)
219{
220#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
221 struct iwl_rx_packet *pkt = rxb_addr(rxb);
222 struct iwl_scanresults_notification *notif =
223 (struct iwl_scanresults_notification *)pkt->u.raw;
224
225 IWL_DEBUG_SCAN(priv, "Scan ch.res: "
226 "%d [802.11%s] "
227 "(TSF: 0x%08X:%08X) - %d "
228 "elapsed=%lu usec\n",
229 notif->channel,
230 notif->band ? "bg" : "a",
231 le32_to_cpu(notif->tsf_high),
232 le32_to_cpu(notif->tsf_low),
233 le32_to_cpu(notif->statistics[0]),
234 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
235#endif
236}
237
238/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
239static void iwl_legacy_rx_scan_complete_notif(struct iwl_priv *priv,
240 struct iwl_rx_mem_buffer *rxb)
241{
242
243#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
244 struct iwl_rx_packet *pkt = rxb_addr(rxb);
245 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
246#endif
247
248 IWL_DEBUG_SCAN(priv,
249 "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
250 scan_notif->scanned_channels,
251 scan_notif->tsf_low,
252 scan_notif->tsf_high, scan_notif->status);
253
254 /* The HW is no longer scanning */
255 clear_bit(STATUS_SCAN_HW, &priv->status);
256
257 IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
258 (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
259 jiffies_to_msecs(jiffies - priv->scan_start));
260
261 queue_work(priv->workqueue, &priv->scan_completed);
262}
263
264void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv)
265{
266 /* scan handlers */
267 priv->rx_handlers[REPLY_SCAN_CMD] = iwl_legacy_rx_reply_scan;
268 priv->rx_handlers[SCAN_START_NOTIFICATION] =
269 iwl_legacy_rx_scan_start_notif;
270 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
271 iwl_legacy_rx_scan_results_notif;
272 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
273 iwl_legacy_rx_scan_complete_notif;
274}
275EXPORT_SYMBOL(iwl_legacy_setup_rx_scan_handlers);
276
277inline u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
278 enum ieee80211_band band,
279 u8 n_probes)
280{
281 if (band == IEEE80211_BAND_5GHZ)
282 return IWL_ACTIVE_DWELL_TIME_52 +
283 IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
284 else
285 return IWL_ACTIVE_DWELL_TIME_24 +
286 IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
287}
288EXPORT_SYMBOL(iwl_legacy_get_active_dwell_time);
289
290u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
291 enum ieee80211_band band,
292 struct ieee80211_vif *vif)
293{
294 struct iwl_rxon_context *ctx;
295 u16 passive = (band == IEEE80211_BAND_2GHZ) ?
296 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
297 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
298
299 if (iwl_legacy_is_any_associated(priv)) {
300 /*
301 * If we're associated, we clamp the maximum passive
302 * dwell time to be 98% of the smallest beacon interval
303 * (minus 2 * channel tune time)
304 */
305 for_each_context(priv, ctx) {
306 u16 value;
307
308 if (!iwl_legacy_is_associated_ctx(ctx))
309 continue;
310 value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
311 if ((value > IWL_PASSIVE_DWELL_BASE) || !value)
312 value = IWL_PASSIVE_DWELL_BASE;
313 value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
314 passive = min(value, passive);
315 }
316 }
317
318 return passive;
319}
320EXPORT_SYMBOL(iwl_legacy_get_passive_dwell_time);
321
322void iwl_legacy_init_scan_params(struct iwl_priv *priv)
323{
324 u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
325 if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
326 priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
327 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
328 priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
329}
330EXPORT_SYMBOL(iwl_legacy_init_scan_params);
331
332static int __must_check iwl_legacy_scan_initiate(struct iwl_priv *priv,
333 struct ieee80211_vif *vif,
334 bool internal,
335 enum ieee80211_band band)
336{
337 int ret;
338
339 lockdep_assert_held(&priv->mutex);
340
341 if (WARN_ON(!priv->cfg->ops->utils->request_scan))
342 return -EOPNOTSUPP;
343
344 cancel_delayed_work(&priv->scan_check);
345
346 if (!iwl_legacy_is_ready_rf(priv)) {
347 IWL_WARN(priv, "Request scan called when driver not ready.\n");
348 return -EIO;
349 }
350
351 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
352 IWL_DEBUG_SCAN(priv,
353 "Multiple concurrent scan requests in parallel.\n");
354 return -EBUSY;
355 }
356
357 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
358 IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
359 return -EBUSY;
360 }
361
362 IWL_DEBUG_SCAN(priv, "Starting %sscan...\n",
363 internal ? "internal short " : "");
364
365 set_bit(STATUS_SCANNING, &priv->status);
366 priv->is_internal_short_scan = internal;
367 priv->scan_start = jiffies;
368 priv->scan_band = band;
369
370 ret = priv->cfg->ops->utils->request_scan(priv, vif);
371 if (ret) {
372 clear_bit(STATUS_SCANNING, &priv->status);
373 priv->is_internal_short_scan = false;
374 return ret;
375 }
376
377 queue_delayed_work(priv->workqueue, &priv->scan_check,
378 IWL_SCAN_CHECK_WATCHDOG);
379
380 return 0;
381}
382
383int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
384 struct ieee80211_vif *vif,
385 struct cfg80211_scan_request *req)
386{
387 struct iwl_priv *priv = hw->priv;
388 int ret;
389
390 IWL_DEBUG_MAC80211(priv, "enter\n");
391
392 if (req->n_channels == 0)
393 return -EINVAL;
394
395 mutex_lock(&priv->mutex);
396
397 if (test_bit(STATUS_SCANNING, &priv->status) &&
398 !priv->is_internal_short_scan) {
399 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
400 ret = -EAGAIN;
401 goto out_unlock;
402 }
403
404 /* mac80211 will only ask for one band at a time */
405 priv->scan_request = req;
406 priv->scan_vif = vif;
407
408 /*
409 * If an internal scan is in progress, just set
410 * up the scan_request as per above.
411 */
412 if (priv->is_internal_short_scan) {
413 IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n");
414 ret = 0;
415 } else
416 ret = iwl_legacy_scan_initiate(priv, vif, false,
417 req->channels[0]->band);
418
419 IWL_DEBUG_MAC80211(priv, "leave\n");
420
421out_unlock:
422 mutex_unlock(&priv->mutex);
423
424 return ret;
425}
426EXPORT_SYMBOL(iwl_legacy_mac_hw_scan);
427
428/*
429 * internal short scan, this function should only been called while associated.
430 * It will reset and tune the radio to prevent possible RF related problem
431 */
432void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv)
433{
434 queue_work(priv->workqueue, &priv->start_internal_scan);
435}
436
437static void iwl_legacy_bg_start_internal_scan(struct work_struct *work)
438{
439 struct iwl_priv *priv =
440 container_of(work, struct iwl_priv, start_internal_scan);
441
442 IWL_DEBUG_SCAN(priv, "Start internal scan\n");
443
444 mutex_lock(&priv->mutex);
445
446 if (priv->is_internal_short_scan == true) {
447 IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n");
448 goto unlock;
449 }
450
451 if (test_bit(STATUS_SCANNING, &priv->status)) {
452 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
453 goto unlock;
454 }
455
456 if (iwl_legacy_scan_initiate(priv, NULL, true, priv->band))
457 IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n");
458 unlock:
459 mutex_unlock(&priv->mutex);
460}
461
462static void iwl_legacy_bg_scan_check(struct work_struct *data)
463{
464 struct iwl_priv *priv =
465 container_of(data, struct iwl_priv, scan_check.work);
466
467 IWL_DEBUG_SCAN(priv, "Scan check work\n");
468
469 /* Since we are here firmware does not finish scan and
470 * most likely is in bad shape, so we don't bother to
471 * send abort command, just force scan complete to mac80211 */
472 mutex_lock(&priv->mutex);
473 iwl_legacy_force_scan_end(priv);
474 mutex_unlock(&priv->mutex);
475}
476
477/**
478 * iwl_legacy_fill_probe_req - fill in all required fields and IE for probe request
479 */
480
481u16
482iwl_legacy_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
483 const u8 *ta, const u8 *ies, int ie_len, int left)
484{
485 int len = 0;
486 u8 *pos = NULL;
487
488 /* Make sure there is enough space for the probe request,
489 * two mandatory IEs and the data */
490 left -= 24;
491 if (left < 0)
492 return 0;
493
494 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
495 memcpy(frame->da, iwlegacy_bcast_addr, ETH_ALEN);
496 memcpy(frame->sa, ta, ETH_ALEN);
497 memcpy(frame->bssid, iwlegacy_bcast_addr, ETH_ALEN);
498 frame->seq_ctrl = 0;
499
500 len += 24;
501
502 /* ...next IE... */
503 pos = &frame->u.probe_req.variable[0];
504
505 /* fill in our indirect SSID IE */
506 left -= 2;
507 if (left < 0)
508 return 0;
509 *pos++ = WLAN_EID_SSID;
510 *pos++ = 0;
511
512 len += 2;
513
514 if (WARN_ON(left < ie_len))
515 return len;
516
517 if (ies && ie_len) {
518 memcpy(pos, ies, ie_len);
519 len += ie_len;
520 }
521
522 return (u16)len;
523}
524EXPORT_SYMBOL(iwl_legacy_fill_probe_req);
525
526static void iwl_legacy_bg_abort_scan(struct work_struct *work)
527{
528 struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
529
530 IWL_DEBUG_SCAN(priv, "Abort scan work\n");
531
532 /* We keep scan_check work queued in case when firmware will not
533 * report back scan completed notification */
534 mutex_lock(&priv->mutex);
535 iwl_legacy_scan_cancel_timeout(priv, 200);
536 mutex_unlock(&priv->mutex);
537}
538
539static void iwl_legacy_bg_scan_completed(struct work_struct *work)
540{
541 struct iwl_priv *priv =
542 container_of(work, struct iwl_priv, scan_completed);
543 bool aborted;
544
545 IWL_DEBUG_SCAN(priv, "Completed %sscan.\n",
546 priv->is_internal_short_scan ? "internal short " : "");
547
548 cancel_delayed_work(&priv->scan_check);
549
550 mutex_lock(&priv->mutex);
551
552 aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
553 if (aborted)
554 IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
555
556 if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
557 IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
558 goto out_settings;
559 }
560
561 if (priv->is_internal_short_scan && !aborted) {
562 int err;
563
564 /* Check if mac80211 requested scan during our internal scan */
565 if (priv->scan_request == NULL)
566 goto out_complete;
567
568 /* If so request a new scan */
569 err = iwl_legacy_scan_initiate(priv, priv->scan_vif, false,
570 priv->scan_request->channels[0]->band);
571 if (err) {
572 IWL_DEBUG_SCAN(priv,
573 "failed to initiate pending scan: %d\n", err);
574 aborted = true;
575 goto out_complete;
576 }
577
578 goto out;
579 }
580
581out_complete:
582 iwl_legacy_complete_scan(priv, aborted);
583
584out_settings:
585 /* Can we still talk to firmware ? */
586 if (!iwl_legacy_is_ready_rf(priv))
587 goto out;
588
589 /*
590 * We do not commit power settings while scan is pending,
591 * do it now if the settings changed.
592 */
593 iwl_legacy_power_set_mode(priv, &priv->power_data.sleep_cmd_next,
594 false);
595 iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
596
597 priv->cfg->ops->utils->post_scan(priv);
598
599out:
600 mutex_unlock(&priv->mutex);
601}
602
603void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv)
604{
605 INIT_WORK(&priv->scan_completed, iwl_legacy_bg_scan_completed);
606 INIT_WORK(&priv->abort_scan, iwl_legacy_bg_abort_scan);
607 INIT_WORK(&priv->start_internal_scan,
608 iwl_legacy_bg_start_internal_scan);
609 INIT_DELAYED_WORK(&priv->scan_check, iwl_legacy_bg_scan_check);
610}
611EXPORT_SYMBOL(iwl_legacy_setup_scan_deferred_work);
612
613void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv)
614{
615 cancel_work_sync(&priv->start_internal_scan);
616 cancel_work_sync(&priv->abort_scan);
617 cancel_work_sync(&priv->scan_completed);
618
619 if (cancel_delayed_work_sync(&priv->scan_check)) {
620 mutex_lock(&priv->mutex);
621 iwl_legacy_force_scan_end(priv);
622 mutex_unlock(&priv->mutex);
623 }
624}
625EXPORT_SYMBOL(iwl_legacy_cancel_scan_deferred_work);
diff --git a/drivers/net/wireless/iwlegacy/iwl-spectrum.h b/drivers/net/wireless/iwlegacy/iwl-spectrum.h
new file mode 100644
index 000000000000..9f70a4723103
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-spectrum.h
@@ -0,0 +1,92 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ieee80211 subsystem header files.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __iwl_legacy_spectrum_h__
30#define __iwl_legacy_spectrum_h__
31enum { /* ieee80211_basic_report.map */
32 IEEE80211_BASIC_MAP_BSS = (1 << 0),
33 IEEE80211_BASIC_MAP_OFDM = (1 << 1),
34 IEEE80211_BASIC_MAP_UNIDENTIFIED = (1 << 2),
35 IEEE80211_BASIC_MAP_RADAR = (1 << 3),
36 IEEE80211_BASIC_MAP_UNMEASURED = (1 << 4),
37 /* Bits 5-7 are reserved */
38
39};
40struct ieee80211_basic_report {
41 u8 channel;
42 __le64 start_time;
43 __le16 duration;
44 u8 map;
45} __packed;
46
47enum { /* ieee80211_measurement_request.mode */
48 /* Bit 0 is reserved */
49 IEEE80211_MEASUREMENT_ENABLE = (1 << 1),
50 IEEE80211_MEASUREMENT_REQUEST = (1 << 2),
51 IEEE80211_MEASUREMENT_REPORT = (1 << 3),
52 /* Bits 4-7 are reserved */
53};
54
55enum {
56 IEEE80211_REPORT_BASIC = 0, /* required */
57 IEEE80211_REPORT_CCA = 1, /* optional */
58 IEEE80211_REPORT_RPI = 2, /* optional */
59 /* 3-255 reserved */
60};
61
62struct ieee80211_measurement_params {
63 u8 channel;
64 __le64 start_time;
65 __le16 duration;
66} __packed;
67
68struct ieee80211_info_element {
69 u8 id;
70 u8 len;
71 u8 data[0];
72} __packed;
73
74struct ieee80211_measurement_request {
75 struct ieee80211_info_element ie;
76 u8 token;
77 u8 mode;
78 u8 type;
79 struct ieee80211_measurement_params params[0];
80} __packed;
81
82struct ieee80211_measurement_report {
83 struct ieee80211_info_element ie;
84 u8 token;
85 u8 mode;
86 u8 type;
87 union {
88 struct ieee80211_basic_report basic[0];
89 } u;
90} __packed;
91
92#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.c b/drivers/net/wireless/iwlegacy/iwl-sta.c
new file mode 100644
index 000000000000..47c9da3834ea
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-sta.c
@@ -0,0 +1,816 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <net/mac80211.h>
31#include <linux/etherdevice.h>
32#include <linux/sched.h>
33#include <linux/lockdep.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38
39/* priv->sta_lock must be held */
40static void iwl_legacy_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
41{
42
43 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
44 IWL_ERR(priv,
45 "ACTIVATE a non DRIVER active station id %u addr %pM\n",
46 sta_id, priv->stations[sta_id].sta.sta.addr);
47
48 if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) {
49 IWL_DEBUG_ASSOC(priv,
50 "STA id %u addr %pM already present"
51 " in uCode (according to driver)\n",
52 sta_id, priv->stations[sta_id].sta.sta.addr);
53 } else {
54 priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
55 IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
56 sta_id, priv->stations[sta_id].sta.sta.addr);
57 }
58}
59
60static int iwl_legacy_process_add_sta_resp(struct iwl_priv *priv,
61 struct iwl_legacy_addsta_cmd *addsta,
62 struct iwl_rx_packet *pkt,
63 bool sync)
64{
65 u8 sta_id = addsta->sta.sta_id;
66 unsigned long flags;
67 int ret = -EIO;
68
69 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
70 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
71 pkt->hdr.flags);
72 return ret;
73 }
74
75 IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
76 sta_id);
77
78 spin_lock_irqsave(&priv->sta_lock, flags);
79
80 switch (pkt->u.add_sta.status) {
81 case ADD_STA_SUCCESS_MSK:
82 IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
83 iwl_legacy_sta_ucode_activate(priv, sta_id);
84 ret = 0;
85 break;
86 case ADD_STA_NO_ROOM_IN_TABLE:
87 IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
88 sta_id);
89 break;
90 case ADD_STA_NO_BLOCK_ACK_RESOURCE:
91 IWL_ERR(priv,
92 "Adding station %d failed, no block ack resource.\n",
93 sta_id);
94 break;
95 case ADD_STA_MODIFY_NON_EXIST_STA:
96 IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
97 sta_id);
98 break;
99 default:
100 IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
101 pkt->u.add_sta.status);
102 break;
103 }
104
105 IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
106 priv->stations[sta_id].sta.mode ==
107 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
108 sta_id, priv->stations[sta_id].sta.sta.addr);
109
110 /*
111 * XXX: The MAC address in the command buffer is often changed from
112 * the original sent to the device. That is, the MAC address
113 * written to the command buffer often is not the same MAC adress
114 * read from the command buffer when the command returns. This
115 * issue has not yet been resolved and this debugging is left to
116 * observe the problem.
117 */
118 IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
119 priv->stations[sta_id].sta.mode ==
120 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
121 addsta->sta.addr);
122 spin_unlock_irqrestore(&priv->sta_lock, flags);
123
124 return ret;
125}
126
127static void iwl_legacy_add_sta_callback(struct iwl_priv *priv,
128 struct iwl_device_cmd *cmd,
129 struct iwl_rx_packet *pkt)
130{
131 struct iwl_legacy_addsta_cmd *addsta =
132 (struct iwl_legacy_addsta_cmd *)cmd->cmd.payload;
133
134 iwl_legacy_process_add_sta_resp(priv, addsta, pkt, false);
135
136}
137
138int iwl_legacy_send_add_sta(struct iwl_priv *priv,
139 struct iwl_legacy_addsta_cmd *sta, u8 flags)
140{
141 struct iwl_rx_packet *pkt = NULL;
142 int ret = 0;
143 u8 data[sizeof(*sta)];
144 struct iwl_host_cmd cmd = {
145 .id = REPLY_ADD_STA,
146 .flags = flags,
147 .data = data,
148 };
149 u8 sta_id __maybe_unused = sta->sta.sta_id;
150
151 IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
152 sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
153
154 if (flags & CMD_ASYNC)
155 cmd.callback = iwl_legacy_add_sta_callback;
156 else {
157 cmd.flags |= CMD_WANT_SKB;
158 might_sleep();
159 }
160
161 cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data);
162 ret = iwl_legacy_send_cmd(priv, &cmd);
163
164 if (ret || (flags & CMD_ASYNC))
165 return ret;
166
167 if (ret == 0) {
168 pkt = (struct iwl_rx_packet *)cmd.reply_page;
169 ret = iwl_legacy_process_add_sta_resp(priv, sta, pkt, true);
170 }
171 iwl_legacy_free_pages(priv, cmd.reply_page);
172
173 return ret;
174}
175EXPORT_SYMBOL(iwl_legacy_send_add_sta);
176
177static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index,
178 struct ieee80211_sta *sta,
179 struct iwl_rxon_context *ctx)
180{
181 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
182 __le32 sta_flags;
183 u8 mimo_ps_mode;
184
185 if (!sta || !sta_ht_inf->ht_supported)
186 goto done;
187
188 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
189 IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n",
190 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
191 "static" :
192 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
193 "dynamic" : "disabled");
194
195 sta_flags = priv->stations[index].sta.station_flags;
196
197 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
198
199 switch (mimo_ps_mode) {
200 case WLAN_HT_CAP_SM_PS_STATIC:
201 sta_flags |= STA_FLG_MIMO_DIS_MSK;
202 break;
203 case WLAN_HT_CAP_SM_PS_DYNAMIC:
204 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
205 break;
206 case WLAN_HT_CAP_SM_PS_DISABLED:
207 break;
208 default:
209 IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode);
210 break;
211 }
212
213 sta_flags |= cpu_to_le32(
214 (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
215
216 sta_flags |= cpu_to_le32(
217 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
218
219 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
220 sta_flags |= STA_FLG_HT40_EN_MSK;
221 else
222 sta_flags &= ~STA_FLG_HT40_EN_MSK;
223
224 priv->stations[index].sta.station_flags = sta_flags;
225 done:
226 return;
227}
228
229/**
230 * iwl_legacy_prep_station - Prepare station information for addition
231 *
232 * should be called with sta_lock held
233 */
234u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
235 const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
236{
237 struct iwl_station_entry *station;
238 int i;
239 u8 sta_id = IWL_INVALID_STATION;
240 u16 rate;
241
242 if (is_ap)
243 sta_id = ctx->ap_sta_id;
244 else if (is_broadcast_ether_addr(addr))
245 sta_id = ctx->bcast_sta_id;
246 else
247 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
248 if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
249 addr)) {
250 sta_id = i;
251 break;
252 }
253
254 if (!priv->stations[i].used &&
255 sta_id == IWL_INVALID_STATION)
256 sta_id = i;
257 }
258
259 /*
260 * These two conditions have the same outcome, but keep them
261 * separate
262 */
263 if (unlikely(sta_id == IWL_INVALID_STATION))
264 return sta_id;
265
266 /*
267 * uCode is not able to deal with multiple requests to add a
268 * station. Keep track if one is in progress so that we do not send
269 * another.
270 */
271 if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
272 IWL_DEBUG_INFO(priv,
273 "STA %d already in process of being added.\n",
274 sta_id);
275 return sta_id;
276 }
277
278 if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
279 (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
280 !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) {
281 IWL_DEBUG_ASSOC(priv,
282 "STA %d (%pM) already added, not adding again.\n",
283 sta_id, addr);
284 return sta_id;
285 }
286
287 station = &priv->stations[sta_id];
288 station->used = IWL_STA_DRIVER_ACTIVE;
289 IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n",
290 sta_id, addr);
291 priv->num_stations++;
292
293 /* Set up the REPLY_ADD_STA command to send to device */
294 memset(&station->sta, 0, sizeof(struct iwl_legacy_addsta_cmd));
295 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
296 station->sta.mode = 0;
297 station->sta.sta.sta_id = sta_id;
298 station->sta.station_flags = ctx->station_flags;
299 station->ctxid = ctx->ctxid;
300
301 if (sta) {
302 struct iwl_station_priv_common *sta_priv;
303
304 sta_priv = (void *)sta->drv_priv;
305 sta_priv->ctx = ctx;
306 }
307
308 /*
309 * OK to call unconditionally, since local stations (IBSS BSSID
310 * STA and broadcast STA) pass in a NULL sta, and mac80211
311 * doesn't allow HT IBSS.
312 */
313 iwl_legacy_set_ht_add_station(priv, sta_id, sta, ctx);
314
315 /* 3945 only */
316 rate = (priv->band == IEEE80211_BAND_5GHZ) ?
317 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP;
318 /* Turn on both antennas for the station... */
319 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
320
321 return sta_id;
322
323}
324EXPORT_SYMBOL_GPL(iwl_legacy_prep_station);
325
326#define STA_WAIT_TIMEOUT (HZ/2)
327
328/**
329 * iwl_legacy_add_station_common -
330 */
331int
332iwl_legacy_add_station_common(struct iwl_priv *priv,
333 struct iwl_rxon_context *ctx,
334 const u8 *addr, bool is_ap,
335 struct ieee80211_sta *sta, u8 *sta_id_r)
336{
337 unsigned long flags_spin;
338 int ret = 0;
339 u8 sta_id;
340 struct iwl_legacy_addsta_cmd sta_cmd;
341
342 *sta_id_r = 0;
343 spin_lock_irqsave(&priv->sta_lock, flags_spin);
344 sta_id = iwl_legacy_prep_station(priv, ctx, addr, is_ap, sta);
345 if (sta_id == IWL_INVALID_STATION) {
346 IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
347 addr);
348 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
349 return -EINVAL;
350 }
351
352 /*
353 * uCode is not able to deal with multiple requests to add a
354 * station. Keep track if one is in progress so that we do not send
355 * another.
356 */
357 if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
358 IWL_DEBUG_INFO(priv,
359 "STA %d already in process of being added.\n",
360 sta_id);
361 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
362 return -EEXIST;
363 }
364
365 if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
366 (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
367 IWL_DEBUG_ASSOC(priv,
368 "STA %d (%pM) already added, not adding again.\n",
369 sta_id, addr);
370 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
371 return -EEXIST;
372 }
373
374 priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
375 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
376 sizeof(struct iwl_legacy_addsta_cmd));
377 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
378
379 /* Add station to device's station table */
380 ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
381 if (ret) {
382 spin_lock_irqsave(&priv->sta_lock, flags_spin);
383 IWL_ERR(priv, "Adding station %pM failed.\n",
384 priv->stations[sta_id].sta.sta.addr);
385 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
386 priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
387 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
388 }
389 *sta_id_r = sta_id;
390 return ret;
391}
392EXPORT_SYMBOL(iwl_legacy_add_station_common);
393
394/**
395 * iwl_legacy_sta_ucode_deactivate - deactivate ucode status for a station
396 *
397 * priv->sta_lock must be held
398 */
399static void iwl_legacy_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
400{
401 /* Ucode must be active and driver must be non active */
402 if ((priv->stations[sta_id].used &
403 (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) !=
404 IWL_STA_UCODE_ACTIVE)
405 IWL_ERR(priv, "removed non active STA %u\n", sta_id);
406
407 priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
408
409 memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
410 IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id);
411}
412
413static int iwl_legacy_send_remove_station(struct iwl_priv *priv,
414 const u8 *addr, int sta_id,
415 bool temporary)
416{
417 struct iwl_rx_packet *pkt;
418 int ret;
419
420 unsigned long flags_spin;
421 struct iwl_rem_sta_cmd rm_sta_cmd;
422
423 struct iwl_host_cmd cmd = {
424 .id = REPLY_REMOVE_STA,
425 .len = sizeof(struct iwl_rem_sta_cmd),
426 .flags = CMD_SYNC,
427 .data = &rm_sta_cmd,
428 };
429
430 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
431 rm_sta_cmd.num_sta = 1;
432 memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
433
434 cmd.flags |= CMD_WANT_SKB;
435
436 ret = iwl_legacy_send_cmd(priv, &cmd);
437
438 if (ret)
439 return ret;
440
441 pkt = (struct iwl_rx_packet *)cmd.reply_page;
442 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
443 IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
444 pkt->hdr.flags);
445 ret = -EIO;
446 }
447
448 if (!ret) {
449 switch (pkt->u.rem_sta.status) {
450 case REM_STA_SUCCESS_MSK:
451 if (!temporary) {
452 spin_lock_irqsave(&priv->sta_lock, flags_spin);
453 iwl_legacy_sta_ucode_deactivate(priv, sta_id);
454 spin_unlock_irqrestore(&priv->sta_lock,
455 flags_spin);
456 }
457 IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
458 break;
459 default:
460 ret = -EIO;
461 IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
462 break;
463 }
464 }
465 iwl_legacy_free_pages(priv, cmd.reply_page);
466
467 return ret;
468}
469
470/**
471 * iwl_legacy_remove_station - Remove driver's knowledge of station.
472 */
473int iwl_legacy_remove_station(struct iwl_priv *priv, const u8 sta_id,
474 const u8 *addr)
475{
476 unsigned long flags;
477
478 if (!iwl_legacy_is_ready(priv)) {
479 IWL_DEBUG_INFO(priv,
480 "Unable to remove station %pM, device not ready.\n",
481 addr);
482 /*
483 * It is typical for stations to be removed when we are
484 * going down. Return success since device will be down
485 * soon anyway
486 */
487 return 0;
488 }
489
490 IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n",
491 sta_id, addr);
492
493 if (WARN_ON(sta_id == IWL_INVALID_STATION))
494 return -EINVAL;
495
496 spin_lock_irqsave(&priv->sta_lock, flags);
497
498 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
499 IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
500 addr);
501 goto out_err;
502 }
503
504 if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
505 IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n",
506 addr);
507 goto out_err;
508 }
509
510 if (priv->stations[sta_id].used & IWL_STA_LOCAL) {
511 kfree(priv->stations[sta_id].lq);
512 priv->stations[sta_id].lq = NULL;
513 }
514
515 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
516
517 priv->num_stations--;
518
519 BUG_ON(priv->num_stations < 0);
520
521 spin_unlock_irqrestore(&priv->sta_lock, flags);
522
523 return iwl_legacy_send_remove_station(priv, addr, sta_id, false);
524out_err:
525 spin_unlock_irqrestore(&priv->sta_lock, flags);
526 return -EINVAL;
527}
528EXPORT_SYMBOL_GPL(iwl_legacy_remove_station);
529
530/**
531 * iwl_legacy_clear_ucode_stations - clear ucode station table bits
532 *
533 * This function clears all the bits in the driver indicating
534 * which stations are active in the ucode. Call when something
535 * other than explicit station management would cause this in
536 * the ucode, e.g. unassociated RXON.
537 */
538void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
539 struct iwl_rxon_context *ctx)
540{
541 int i;
542 unsigned long flags_spin;
543 bool cleared = false;
544
545 IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
546
547 spin_lock_irqsave(&priv->sta_lock, flags_spin);
548 for (i = 0; i < priv->hw_params.max_stations; i++) {
549 if (ctx && ctx->ctxid != priv->stations[i].ctxid)
550 continue;
551
552 if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
553 IWL_DEBUG_INFO(priv,
554 "Clearing ucode active for station %d\n", i);
555 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
556 cleared = true;
557 }
558 }
559 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
560
561 if (!cleared)
562 IWL_DEBUG_INFO(priv,
563 "No active stations found to be cleared\n");
564}
565EXPORT_SYMBOL(iwl_legacy_clear_ucode_stations);
566
567/**
568 * iwl_legacy_restore_stations() - Restore driver known stations to device
569 *
570 * All stations considered active by driver, but not present in ucode, is
571 * restored.
572 *
573 * Function sleeps.
574 */
575void
576iwl_legacy_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
577{
578 struct iwl_legacy_addsta_cmd sta_cmd;
579 struct iwl_link_quality_cmd lq;
580 unsigned long flags_spin;
581 int i;
582 bool found = false;
583 int ret;
584 bool send_lq;
585
586 if (!iwl_legacy_is_ready(priv)) {
587 IWL_DEBUG_INFO(priv,
588 "Not ready yet, not restoring any stations.\n");
589 return;
590 }
591
592 IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
593 spin_lock_irqsave(&priv->sta_lock, flags_spin);
594 for (i = 0; i < priv->hw_params.max_stations; i++) {
595 if (ctx->ctxid != priv->stations[i].ctxid)
596 continue;
597 if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
598 !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
599 IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
600 priv->stations[i].sta.sta.addr);
601 priv->stations[i].sta.mode = 0;
602 priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS;
603 found = true;
604 }
605 }
606
607 for (i = 0; i < priv->hw_params.max_stations; i++) {
608 if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
609 memcpy(&sta_cmd, &priv->stations[i].sta,
610 sizeof(struct iwl_legacy_addsta_cmd));
611 send_lq = false;
612 if (priv->stations[i].lq) {
613 memcpy(&lq, priv->stations[i].lq,
614 sizeof(struct iwl_link_quality_cmd));
615 send_lq = true;
616 }
617 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
618 ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
619 if (ret) {
620 spin_lock_irqsave(&priv->sta_lock, flags_spin);
621 IWL_ERR(priv, "Adding station %pM failed.\n",
622 priv->stations[i].sta.sta.addr);
623 priv->stations[i].used &=
624 ~IWL_STA_DRIVER_ACTIVE;
625 priv->stations[i].used &=
626 ~IWL_STA_UCODE_INPROGRESS;
627 spin_unlock_irqrestore(&priv->sta_lock,
628 flags_spin);
629 }
630 /*
631 * Rate scaling has already been initialized, send
632 * current LQ command
633 */
634 if (send_lq)
635 iwl_legacy_send_lq_cmd(priv, ctx, &lq,
636 CMD_SYNC, true);
637 spin_lock_irqsave(&priv->sta_lock, flags_spin);
638 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
639 }
640 }
641
642 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
643 if (!found)
644 IWL_DEBUG_INFO(priv, "Restoring all known stations"
645 " .... no stations to be restored.\n");
646 else
647 IWL_DEBUG_INFO(priv, "Restoring all known stations"
648 " .... complete.\n");
649}
650EXPORT_SYMBOL(iwl_legacy_restore_stations);
651
652int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv)
653{
654 int i;
655
656 for (i = 0; i < priv->sta_key_max_num; i++)
657 if (!test_and_set_bit(i, &priv->ucode_key_table))
658 return i;
659
660 return WEP_INVALID_OFFSET;
661}
662EXPORT_SYMBOL(iwl_legacy_get_free_ucode_key_index);
663
664void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv)
665{
666 unsigned long flags;
667 int i;
668
669 spin_lock_irqsave(&priv->sta_lock, flags);
670 for (i = 0; i < priv->hw_params.max_stations; i++) {
671 if (!(priv->stations[i].used & IWL_STA_BCAST))
672 continue;
673
674 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
675 priv->num_stations--;
676 BUG_ON(priv->num_stations < 0);
677 kfree(priv->stations[i].lq);
678 priv->stations[i].lq = NULL;
679 }
680 spin_unlock_irqrestore(&priv->sta_lock, flags);
681}
682EXPORT_SYMBOL_GPL(iwl_legacy_dealloc_bcast_stations);
683
684#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
685static void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
686 struct iwl_link_quality_cmd *lq)
687{
688 int i;
689 IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id);
690 IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n",
691 lq->general_params.single_stream_ant_msk,
692 lq->general_params.dual_stream_ant_msk);
693
694 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
695 IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n",
696 i, lq->rs_table[i].rate_n_flags);
697}
698#else
699static inline void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
700 struct iwl_link_quality_cmd *lq)
701{
702}
703#endif
704
705/**
706 * iwl_legacy_is_lq_table_valid() - Test one aspect of LQ cmd for validity
707 *
708 * It sometimes happens when a HT rate has been in use and we
709 * loose connectivity with AP then mac80211 will first tell us that the
710 * current channel is not HT anymore before removing the station. In such a
711 * scenario the RXON flags will be updated to indicate we are not
712 * communicating HT anymore, but the LQ command may still contain HT rates.
713 * Test for this to prevent driver from sending LQ command between the time
714 * RXON flags are updated and when LQ command is updated.
715 */
716static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv,
717 struct iwl_rxon_context *ctx,
718 struct iwl_link_quality_cmd *lq)
719{
720 int i;
721
722 if (ctx->ht.enabled)
723 return true;
724
725 IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
726 ctx->active.channel);
727 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
728 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) &
729 RATE_MCS_HT_MSK) {
730 IWL_DEBUG_INFO(priv,
731 "index %d of LQ expects HT channel\n",
732 i);
733 return false;
734 }
735 }
736 return true;
737}
738
739/**
740 * iwl_legacy_send_lq_cmd() - Send link quality command
741 * @init: This command is sent as part of station initialization right
742 * after station has been added.
743 *
744 * The link quality command is sent as the last step of station creation.
745 * This is the special case in which init is set and we call a callback in
746 * this case to clear the state indicating that station creation is in
747 * progress.
748 */
749int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
750 struct iwl_link_quality_cmd *lq, u8 flags, bool init)
751{
752 int ret = 0;
753 unsigned long flags_spin;
754
755 struct iwl_host_cmd cmd = {
756 .id = REPLY_TX_LINK_QUALITY_CMD,
757 .len = sizeof(struct iwl_link_quality_cmd),
758 .flags = flags,
759 .data = lq,
760 };
761
762 if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
763 return -EINVAL;
764
765
766 spin_lock_irqsave(&priv->sta_lock, flags_spin);
767 if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
768 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
769 return -EINVAL;
770 }
771 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
772
773 iwl_legacy_dump_lq_cmd(priv, lq);
774 BUG_ON(init && (cmd.flags & CMD_ASYNC));
775
776 if (iwl_legacy_is_lq_table_valid(priv, ctx, lq))
777 ret = iwl_legacy_send_cmd(priv, &cmd);
778 else
779 ret = -EINVAL;
780
781 if (cmd.flags & CMD_ASYNC)
782 return ret;
783
784 if (init) {
785 IWL_DEBUG_INFO(priv, "init LQ command complete,"
786 " clearing sta addition status for sta %d\n",
787 lq->sta_id);
788 spin_lock_irqsave(&priv->sta_lock, flags_spin);
789 priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
790 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
791 }
792 return ret;
793}
794EXPORT_SYMBOL(iwl_legacy_send_lq_cmd);
795
796int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
797 struct ieee80211_vif *vif,
798 struct ieee80211_sta *sta)
799{
800 struct iwl_priv *priv = hw->priv;
801 struct iwl_station_priv_common *sta_common = (void *)sta->drv_priv;
802 int ret;
803
804 IWL_DEBUG_INFO(priv, "received request to remove station %pM\n",
805 sta->addr);
806 mutex_lock(&priv->mutex);
807 IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
808 sta->addr);
809 ret = iwl_legacy_remove_station(priv, sta_common->sta_id, sta->addr);
810 if (ret)
811 IWL_ERR(priv, "Error removing station %pM\n",
812 sta->addr);
813 mutex_unlock(&priv->mutex);
814 return ret;
815}
816EXPORT_SYMBOL(iwl_legacy_mac_sta_remove);
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.h b/drivers/net/wireless/iwlegacy/iwl-sta.h
new file mode 100644
index 000000000000..67bd75fe01a1
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-sta.h
@@ -0,0 +1,148 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#ifndef __iwl_legacy_sta_h__
30#define __iwl_legacy_sta_h__
31
32#include "iwl-dev.h"
33
34#define HW_KEY_DYNAMIC 0
35#define HW_KEY_DEFAULT 1
36
37#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
38#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
39#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
40 being activated */
41#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
42 (this is for the IBSS BSSID stations) */
43#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
44
45
46void iwl_legacy_restore_stations(struct iwl_priv *priv,
47 struct iwl_rxon_context *ctx);
48void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
49 struct iwl_rxon_context *ctx);
50void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv);
51int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv);
52int iwl_legacy_send_add_sta(struct iwl_priv *priv,
53 struct iwl_legacy_addsta_cmd *sta, u8 flags);
54int iwl_legacy_add_station_common(struct iwl_priv *priv,
55 struct iwl_rxon_context *ctx,
56 const u8 *addr, bool is_ap,
57 struct ieee80211_sta *sta, u8 *sta_id_r);
58int iwl_legacy_remove_station(struct iwl_priv *priv,
59 const u8 sta_id,
60 const u8 *addr);
61int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
62 struct ieee80211_vif *vif,
63 struct ieee80211_sta *sta);
64
65u8 iwl_legacy_prep_station(struct iwl_priv *priv,
66 struct iwl_rxon_context *ctx,
67 const u8 *addr, bool is_ap,
68 struct ieee80211_sta *sta);
69
70int iwl_legacy_send_lq_cmd(struct iwl_priv *priv,
71 struct iwl_rxon_context *ctx,
72 struct iwl_link_quality_cmd *lq,
73 u8 flags, bool init);
74
75/**
76 * iwl_legacy_clear_driver_stations - clear knowledge of all stations from driver
77 * @priv: iwl priv struct
78 *
79 * This is called during iwl_down() to make sure that in the case
80 * we're coming there from a hardware restart mac80211 will be
81 * able to reconfigure stations -- if we're getting there in the
82 * normal down flow then the stations will already be cleared.
83 */
84static inline void iwl_legacy_clear_driver_stations(struct iwl_priv *priv)
85{
86 unsigned long flags;
87 struct iwl_rxon_context *ctx;
88
89 spin_lock_irqsave(&priv->sta_lock, flags);
90 memset(priv->stations, 0, sizeof(priv->stations));
91 priv->num_stations = 0;
92
93 priv->ucode_key_table = 0;
94
95 for_each_context(priv, ctx) {
96 /*
97 * Remove all key information that is not stored as part
98 * of station information since mac80211 may not have had
99 * a chance to remove all the keys. When device is
100 * reconfigured by mac80211 after an error all keys will
101 * be reconfigured.
102 */
103 memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
104 ctx->key_mapping_keys = 0;
105 }
106
107 spin_unlock_irqrestore(&priv->sta_lock, flags);
108}
109
110static inline int iwl_legacy_sta_id(struct ieee80211_sta *sta)
111{
112 if (WARN_ON(!sta))
113 return IWL_INVALID_STATION;
114
115 return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id;
116}
117
118/**
119 * iwl_legacy_sta_id_or_broadcast - return sta_id or broadcast sta
120 * @priv: iwl priv
121 * @context: the current context
122 * @sta: mac80211 station
123 *
124 * In certain circumstances mac80211 passes a station pointer
125 * that may be %NULL, for example during TX or key setup. In
126 * that case, we need to use the broadcast station, so this
127 * inline wraps that pattern.
128 */
129static inline int iwl_legacy_sta_id_or_broadcast(struct iwl_priv *priv,
130 struct iwl_rxon_context *context,
131 struct ieee80211_sta *sta)
132{
133 int sta_id;
134
135 if (!sta)
136 return context->bcast_sta_id;
137
138 sta_id = iwl_legacy_sta_id(sta);
139
140 /*
141 * mac80211 should not be passing a partially
142 * initialised station!
143 */
144 WARN_ON(sta_id == IWL_INVALID_STATION);
145
146 return sta_id;
147}
148#endif /* __iwl_legacy_sta_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c
new file mode 100644
index 000000000000..a227773cb384
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-tx.c
@@ -0,0 +1,660 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/etherdevice.h>
31#include <linux/sched.h>
32#include <linux/slab.h>
33#include <net/mac80211.h>
34#include "iwl-eeprom.h"
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-helpers.h"
40
41/**
42 * iwl_legacy_txq_update_write_ptr - Send new write index to hardware
43 */
44void
45iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
46{
47 u32 reg = 0;
48 int txq_id = txq->q.id;
49
50 if (txq->need_update == 0)
51 return;
52
53 /* if we're trying to save power */
54 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
55 /* wake up nic if it's powered down ...
56 * uCode will wake up, and interrupt us again, so next
57 * time we'll skip this part. */
58 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
59
60 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
61 IWL_DEBUG_INFO(priv,
62 "Tx queue %d requesting wakeup,"
63 " GP1 = 0x%x\n", txq_id, reg);
64 iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
65 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
66 return;
67 }
68
69 iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
70 txq->q.write_ptr | (txq_id << 8));
71
72 /*
73 * else not in power-save mode,
74 * uCode will never sleep when we're
75 * trying to tx (during RFKILL, we're not trying to tx).
76 */
77 } else
78 iwl_write32(priv, HBUS_TARG_WRPTR,
79 txq->q.write_ptr | (txq_id << 8));
80 txq->need_update = 0;
81}
82EXPORT_SYMBOL(iwl_legacy_txq_update_write_ptr);
83
84/**
85 * iwl_legacy_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
86 */
87void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
88{
89 struct iwl_tx_queue *txq = &priv->txq[txq_id];
90 struct iwl_queue *q = &txq->q;
91
92 if (q->n_bd == 0)
93 return;
94
95 while (q->write_ptr != q->read_ptr) {
96 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
97 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
98 }
99}
100EXPORT_SYMBOL(iwl_legacy_tx_queue_unmap);
101
102/**
103 * iwl_legacy_tx_queue_free - Deallocate DMA queue.
104 * @txq: Transmit queue to deallocate.
105 *
106 * Empty queue by removing and destroying all BD's.
107 * Free all buffers.
108 * 0-fill, but do not free "txq" descriptor structure.
109 */
110void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id)
111{
112 struct iwl_tx_queue *txq = &priv->txq[txq_id];
113 struct device *dev = &priv->pci_dev->dev;
114 int i;
115
116 iwl_legacy_tx_queue_unmap(priv, txq_id);
117
118 /* De-alloc array of command/tx buffers */
119 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
120 kfree(txq->cmd[i]);
121
122 /* De-alloc circular buffer of TFDs */
123 if (txq->q.n_bd)
124 dma_free_coherent(dev, priv->hw_params.tfd_size *
125 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
126
127 /* De-alloc array of per-TFD driver data */
128 kfree(txq->txb);
129 txq->txb = NULL;
130
131 /* deallocate arrays */
132 kfree(txq->cmd);
133 kfree(txq->meta);
134 txq->cmd = NULL;
135 txq->meta = NULL;
136
137 /* 0-fill queue descriptor structure */
138 memset(txq, 0, sizeof(*txq));
139}
140EXPORT_SYMBOL(iwl_legacy_tx_queue_free);
141
142/**
143 * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
144 */
145void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv)
146{
147 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
148 struct iwl_queue *q = &txq->q;
149 bool huge = false;
150 int i;
151
152 if (q->n_bd == 0)
153 return;
154
155 while (q->read_ptr != q->write_ptr) {
156 /* we have no way to tell if it is a huge cmd ATM */
157 i = iwl_legacy_get_cmd_index(q, q->read_ptr, 0);
158
159 if (txq->meta[i].flags & CMD_SIZE_HUGE)
160 huge = true;
161 else
162 pci_unmap_single(priv->pci_dev,
163 dma_unmap_addr(&txq->meta[i], mapping),
164 dma_unmap_len(&txq->meta[i], len),
165 PCI_DMA_BIDIRECTIONAL);
166
167 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
168 }
169
170 if (huge) {
171 i = q->n_window;
172 pci_unmap_single(priv->pci_dev,
173 dma_unmap_addr(&txq->meta[i], mapping),
174 dma_unmap_len(&txq->meta[i], len),
175 PCI_DMA_BIDIRECTIONAL);
176 }
177}
178EXPORT_SYMBOL(iwl_legacy_cmd_queue_unmap);
179
180/**
181 * iwl_legacy_cmd_queue_free - Deallocate DMA queue.
182 * @txq: Transmit queue to deallocate.
183 *
184 * Empty queue by removing and destroying all BD's.
185 * Free all buffers.
186 * 0-fill, but do not free "txq" descriptor structure.
187 */
188void iwl_legacy_cmd_queue_free(struct iwl_priv *priv)
189{
190 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
191 struct device *dev = &priv->pci_dev->dev;
192 int i;
193
194 iwl_legacy_cmd_queue_unmap(priv);
195
196 /* De-alloc array of command/tx buffers */
197 for (i = 0; i <= TFD_CMD_SLOTS; i++)
198 kfree(txq->cmd[i]);
199
200 /* De-alloc circular buffer of TFDs */
201 if (txq->q.n_bd)
202 dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
203 txq->tfds, txq->q.dma_addr);
204
205 /* deallocate arrays */
206 kfree(txq->cmd);
207 kfree(txq->meta);
208 txq->cmd = NULL;
209 txq->meta = NULL;
210
211 /* 0-fill queue descriptor structure */
212 memset(txq, 0, sizeof(*txq));
213}
214EXPORT_SYMBOL(iwl_legacy_cmd_queue_free);
215
216/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
217 * DMA services
218 *
219 * Theory of operation
220 *
221 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
222 * of buffer descriptors, each of which points to one or more data buffers for
223 * the device to read from or fill. Driver and device exchange status of each
224 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
225 * entries in each circular buffer, to protect against confusing empty and full
226 * queue states.
227 *
228 * The device reads or writes the data in the queues via the device's several
229 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
230 *
231 * For Tx queue, there are low mark and high mark limits. If, after queuing
232 * the packet for Tx, free space become < low mark, Tx queue stopped. When
233 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
234 * Tx queue resumed.
235 *
236 * See more detailed info in iwl-4965-hw.h.
237 ***************************************************/
238
239int iwl_legacy_queue_space(const struct iwl_queue *q)
240{
241 int s = q->read_ptr - q->write_ptr;
242
243 if (q->read_ptr > q->write_ptr)
244 s -= q->n_bd;
245
246 if (s <= 0)
247 s += q->n_window;
248 /* keep some reserve to not confuse empty and full situations */
249 s -= 2;
250 if (s < 0)
251 s = 0;
252 return s;
253}
254EXPORT_SYMBOL(iwl_legacy_queue_space);
255
256
257/**
258 * iwl_legacy_queue_init - Initialize queue's high/low-water and read/write indexes
259 */
260static int iwl_legacy_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
261 int count, int slots_num, u32 id)
262{
263 q->n_bd = count;
264 q->n_window = slots_num;
265 q->id = id;
266
267 /* count must be power-of-two size, otherwise iwl_legacy_queue_inc_wrap
268 * and iwl_legacy_queue_dec_wrap are broken. */
269 BUG_ON(!is_power_of_2(count));
270
271 /* slots_num must be power-of-two size, otherwise
272 * iwl_legacy_get_cmd_index is broken. */
273 BUG_ON(!is_power_of_2(slots_num));
274
275 q->low_mark = q->n_window / 4;
276 if (q->low_mark < 4)
277 q->low_mark = 4;
278
279 q->high_mark = q->n_window / 8;
280 if (q->high_mark < 2)
281 q->high_mark = 2;
282
283 q->write_ptr = q->read_ptr = 0;
284
285 return 0;
286}
287
288/**
289 * iwl_legacy_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
290 */
291static int iwl_legacy_tx_queue_alloc(struct iwl_priv *priv,
292 struct iwl_tx_queue *txq, u32 id)
293{
294 struct device *dev = &priv->pci_dev->dev;
295 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
296
297 /* Driver private data, only for Tx (not command) queues,
298 * not shared with device. */
299 if (id != priv->cmd_queue) {
300 txq->txb = kzalloc(sizeof(txq->txb[0]) *
301 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
302 if (!txq->txb) {
303 IWL_ERR(priv, "kmalloc for auxiliary BD "
304 "structures failed\n");
305 goto error;
306 }
307 } else {
308 txq->txb = NULL;
309 }
310
311 /* Circular buffer of transmit frame descriptors (TFDs),
312 * shared with device */
313 txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
314 GFP_KERNEL);
315 if (!txq->tfds) {
316 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
317 goto error;
318 }
319 txq->q.id = id;
320
321 return 0;
322
323 error:
324 kfree(txq->txb);
325 txq->txb = NULL;
326
327 return -ENOMEM;
328}
329
330/**
331 * iwl_legacy_tx_queue_init - Allocate and initialize one tx/cmd queue
332 */
333int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
334 int slots_num, u32 txq_id)
335{
336 int i, len;
337 int ret;
338 int actual_slots = slots_num;
339
340 /*
341 * Alloc buffer array for commands (Tx or other types of commands).
342 * For the command queue (#4/#9), allocate command space + one big
343 * command for scan, since scan command is very huge; the system will
344 * not have two scans at the same time, so only one is needed.
345 * For normal Tx queues (all other queues), no super-size command
346 * space is needed.
347 */
348 if (txq_id == priv->cmd_queue)
349 actual_slots++;
350
351 txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
352 GFP_KERNEL);
353 txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
354 GFP_KERNEL);
355
356 if (!txq->meta || !txq->cmd)
357 goto out_free_arrays;
358
359 len = sizeof(struct iwl_device_cmd);
360 for (i = 0; i < actual_slots; i++) {
361 /* only happens for cmd queue */
362 if (i == slots_num)
363 len = IWL_MAX_CMD_SIZE;
364
365 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
366 if (!txq->cmd[i])
367 goto err;
368 }
369
370 /* Alloc driver data array and TFD circular buffer */
371 ret = iwl_legacy_tx_queue_alloc(priv, txq, txq_id);
372 if (ret)
373 goto err;
374
375 txq->need_update = 0;
376
377 /*
378 * For the default queues 0-3, set up the swq_id
379 * already -- all others need to get one later
380 * (if they need one at all).
381 */
382 if (txq_id < 4)
383 iwl_legacy_set_swq_id(txq, txq_id, txq_id);
384
385 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
386 * iwl_legacy_queue_inc_wrap and iwl_legacy_queue_dec_wrap are broken. */
387 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
388
389 /* Initialize queue's high/low-water marks, and head/tail indexes */
390 iwl_legacy_queue_init(priv, &txq->q,
391 TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
392
393 /* Tell device where to find queue */
394 priv->cfg->ops->lib->txq_init(priv, txq);
395
396 return 0;
397err:
398 for (i = 0; i < actual_slots; i++)
399 kfree(txq->cmd[i]);
400out_free_arrays:
401 kfree(txq->meta);
402 kfree(txq->cmd);
403
404 return -ENOMEM;
405}
406EXPORT_SYMBOL(iwl_legacy_tx_queue_init);
407
408void iwl_legacy_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
409 int slots_num, u32 txq_id)
410{
411 int actual_slots = slots_num;
412
413 if (txq_id == priv->cmd_queue)
414 actual_slots++;
415
416 memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
417
418 txq->need_update = 0;
419
420 /* Initialize queue's high/low-water marks, and head/tail indexes */
421 iwl_legacy_queue_init(priv, &txq->q,
422 TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
423
424 /* Tell device where to find queue */
425 priv->cfg->ops->lib->txq_init(priv, txq);
426}
427EXPORT_SYMBOL(iwl_legacy_tx_queue_reset);
428
429/*************** HOST COMMAND QUEUE FUNCTIONS *****/
430
431/**
432 * iwl_legacy_enqueue_hcmd - enqueue a uCode command
433 * @priv: device private data point
434 * @cmd: a point to the ucode command structure
435 *
436 * The function returns < 0 values to indicate the operation is
437 * failed. On success, it turns the index (> 0) of command in the
438 * command queue.
439 */
440int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
441{
442 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
443 struct iwl_queue *q = &txq->q;
444 struct iwl_device_cmd *out_cmd;
445 struct iwl_cmd_meta *out_meta;
446 dma_addr_t phys_addr;
447 unsigned long flags;
448 int len;
449 u32 idx;
450 u16 fix_size;
451
452 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
453 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
454
455 /* If any of the command structures end up being larger than
456 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
457 * we will need to increase the size of the TFD entries
458 * Also, check to see if command buffer should not exceed the size
459 * of device_cmd and max_cmd_size. */
460 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
461 !(cmd->flags & CMD_SIZE_HUGE));
462 BUG_ON(fix_size > IWL_MAX_CMD_SIZE);
463
464 if (iwl_legacy_is_rfkill(priv) || iwl_legacy_is_ctkill(priv)) {
465 IWL_WARN(priv, "Not sending command - %s KILL\n",
466 iwl_legacy_is_rfkill(priv) ? "RF" : "CT");
467 return -EIO;
468 }
469
470 if (iwl_legacy_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
471 IWL_ERR(priv, "No space in command queue\n");
472 IWL_ERR(priv, "Restarting adapter due to queue full\n");
473 queue_work(priv->workqueue, &priv->restart);
474 return -ENOSPC;
475 }
476
477 spin_lock_irqsave(&priv->hcmd_lock, flags);
478
479 /* If this is a huge cmd, mark the huge flag also on the meta.flags
480 * of the _original_ cmd. This is used for DMA mapping clean up.
481 */
482 if (cmd->flags & CMD_SIZE_HUGE) {
483 idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0);
484 txq->meta[idx].flags = CMD_SIZE_HUGE;
485 }
486
487 idx = iwl_legacy_get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
488 out_cmd = txq->cmd[idx];
489 out_meta = &txq->meta[idx];
490
491 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
492 out_meta->flags = cmd->flags;
493 if (cmd->flags & CMD_WANT_SKB)
494 out_meta->source = cmd;
495 if (cmd->flags & CMD_ASYNC)
496 out_meta->callback = cmd->callback;
497
498 out_cmd->hdr.cmd = cmd->id;
499 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
500
501 /* At this point, the out_cmd now has all of the incoming cmd
502 * information */
503
504 out_cmd->hdr.flags = 0;
505 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
506 INDEX_TO_SEQ(q->write_ptr));
507 if (cmd->flags & CMD_SIZE_HUGE)
508 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
509 len = sizeof(struct iwl_device_cmd);
510 if (idx == TFD_CMD_SLOTS)
511 len = IWL_MAX_CMD_SIZE;
512
513#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
514 switch (out_cmd->hdr.cmd) {
515 case REPLY_TX_LINK_QUALITY_CMD:
516 case SENSITIVITY_CMD:
517 IWL_DEBUG_HC_DUMP(priv,
518 "Sending command %s (#%x), seq: 0x%04X, "
519 "%d bytes at %d[%d]:%d\n",
520 iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
521 out_cmd->hdr.cmd,
522 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
523 q->write_ptr, idx, priv->cmd_queue);
524 break;
525 default:
526 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
527 "%d bytes at %d[%d]:%d\n",
528 iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
529 out_cmd->hdr.cmd,
530 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
531 q->write_ptr, idx, priv->cmd_queue);
532 }
533#endif
534 txq->need_update = 1;
535
536 if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
537 /* Set up entry in queue's byte count circular buffer */
538 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
539
540 phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
541 fix_size, PCI_DMA_BIDIRECTIONAL);
542 dma_unmap_addr_set(out_meta, mapping, phys_addr);
543 dma_unmap_len_set(out_meta, len, fix_size);
544
545 trace_iwlwifi_legacy_dev_hcmd(priv, &out_cmd->hdr,
546 fix_size, cmd->flags);
547
548 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
549 phys_addr, fix_size, 1,
550 U32_PAD(cmd->len));
551
552 /* Increment and update queue's write index */
553 q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
554 iwl_legacy_txq_update_write_ptr(priv, txq);
555
556 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
557 return idx;
558}
559
560/**
561 * iwl_legacy_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
562 *
563 * When FW advances 'R' index, all entries between old and new 'R' index
564 * need to be reclaimed. As result, some free space forms. If there is
565 * enough free space (> low mark), wake the stack that feeds us.
566 */
567static void iwl_legacy_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
568 int idx, int cmd_idx)
569{
570 struct iwl_tx_queue *txq = &priv->txq[txq_id];
571 struct iwl_queue *q = &txq->q;
572 int nfreed = 0;
573
574 if ((idx >= q->n_bd) || (iwl_legacy_queue_used(q, idx) == 0)) {
575 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
576 "is out of range [0-%d] %d %d.\n", txq_id,
577 idx, q->n_bd, q->write_ptr, q->read_ptr);
578 return;
579 }
580
581 for (idx = iwl_legacy_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
582 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
583
584 if (nfreed++ > 0) {
585 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
586 q->write_ptr, q->read_ptr);
587 queue_work(priv->workqueue, &priv->restart);
588 }
589
590 }
591}
592
593/**
594 * iwl_legacy_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
595 * @rxb: Rx buffer to reclaim
596 *
597 * If an Rx buffer has an async callback associated with it the callback
598 * will be executed. The attached skb (if present) will only be freed
599 * if the callback returns 1
600 */
601void
602iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
603{
604 struct iwl_rx_packet *pkt = rxb_addr(rxb);
605 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
606 int txq_id = SEQ_TO_QUEUE(sequence);
607 int index = SEQ_TO_INDEX(sequence);
608 int cmd_index;
609 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
610 struct iwl_device_cmd *cmd;
611 struct iwl_cmd_meta *meta;
612 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
613
614 /* If a Tx command is being handled and it isn't in the actual
615 * command queue then there a command routing bug has been introduced
616 * in the queue management code. */
617 if (WARN(txq_id != priv->cmd_queue,
618 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
619 txq_id, priv->cmd_queue, sequence,
620 priv->txq[priv->cmd_queue].q.read_ptr,
621 priv->txq[priv->cmd_queue].q.write_ptr)) {
622 iwl_print_hex_error(priv, pkt, 32);
623 return;
624 }
625
626 /* If this is a huge cmd, clear the huge flag on the meta.flags
627 * of the _original_ cmd. So that iwl_legacy_cmd_queue_free won't unmap
628 * the DMA buffer for the scan (huge) command.
629 */
630 if (huge) {
631 cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, 0);
632 txq->meta[cmd_index].flags = 0;
633 }
634 cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, huge);
635 cmd = txq->cmd[cmd_index];
636 meta = &txq->meta[cmd_index];
637
638 pci_unmap_single(priv->pci_dev,
639 dma_unmap_addr(meta, mapping),
640 dma_unmap_len(meta, len),
641 PCI_DMA_BIDIRECTIONAL);
642
643 /* Input error checking is done when commands are added to queue. */
644 if (meta->flags & CMD_WANT_SKB) {
645 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
646 rxb->page = NULL;
647 } else if (meta->callback)
648 meta->callback(priv, cmd, pkt);
649
650 iwl_legacy_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
651
652 if (!(meta->flags & CMD_ASYNC)) {
653 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
654 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
655 iwl_legacy_get_cmd_string(cmd->hdr.cmd));
656 wake_up_interruptible(&priv->wait_command_queue);
657 }
658 meta->flags = 0;
659}
660EXPORT_SYMBOL(iwl_legacy_tx_cmd_complete);
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
index 371abbf60eac..ab87e1b73529 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -61,7 +61,6 @@
61#include "iwl-helpers.h" 61#include "iwl-helpers.h"
62#include "iwl-dev.h" 62#include "iwl-dev.h"
63#include "iwl-spectrum.h" 63#include "iwl-spectrum.h"
64#include "iwl-legacy.h"
65 64
66/* 65/*
67 * module name, copyright, version, etc. 66 * module name, copyright, version, etc.
@@ -70,7 +69,7 @@
70#define DRV_DESCRIPTION \ 69#define DRV_DESCRIPTION \
71"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux" 70"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
72 71
73#ifdef CONFIG_IWLWIFI_DEBUG 72#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
74#define VD "d" 73#define VD "d"
75#else 74#else
76#define VD 75#define VD
@@ -82,7 +81,7 @@
82 * this was configurable. 81 * this was configurable.
83 */ 82 */
84#define DRV_VERSION IWLWIFI_VERSION VD "s" 83#define DRV_VERSION IWLWIFI_VERSION VD "s"
85#define DRV_COPYRIGHT "Copyright(c) 2003-2010 Intel Corporation" 84#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
86#define DRV_AUTHOR "<ilw@linux.intel.com>" 85#define DRV_AUTHOR "<ilw@linux.intel.com>"
87 86
88MODULE_DESCRIPTION(DRV_DESCRIPTION); 87MODULE_DESCRIPTION(DRV_DESCRIPTION);
@@ -164,7 +163,7 @@ static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
164 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) 163 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
165 == STA_KEY_FLG_NO_ENC) 164 == STA_KEY_FLG_NO_ENC)
166 priv->stations[sta_id].sta.key.key_offset = 165 priv->stations[sta_id].sta.key.key_offset =
167 iwl_get_free_ucode_key_index(priv); 166 iwl_legacy_get_free_ucode_key_index(priv);
168 /* else, we are overriding an existing key => no need to allocated room 167 /* else, we are overriding an existing key => no need to allocated room
169 * in uCode. */ 168 * in uCode. */
170 169
@@ -177,7 +176,8 @@ static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
177 176
178 IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n"); 177 IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n");
179 178
180 ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 179 ret = iwl_legacy_send_add_sta(priv,
180 &priv->stations[sta_id].sta, CMD_ASYNC);
181 181
182 spin_unlock_irqrestore(&priv->sta_lock, flags); 182 spin_unlock_irqrestore(&priv->sta_lock, flags);
183 183
@@ -201,7 +201,7 @@ static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv,
201static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id) 201static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
202{ 202{
203 unsigned long flags; 203 unsigned long flags;
204 struct iwl_addsta_cmd sta_cmd; 204 struct iwl_legacy_addsta_cmd sta_cmd;
205 205
206 spin_lock_irqsave(&priv->sta_lock, flags); 206 spin_lock_irqsave(&priv->sta_lock, flags);
207 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key)); 207 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
@@ -210,11 +210,11 @@ static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
210 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC; 210 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
211 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 211 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
212 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 212 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
213 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); 213 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd));
214 spin_unlock_irqrestore(&priv->sta_lock, flags); 214 spin_unlock_irqrestore(&priv->sta_lock, flags);
215 215
216 IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n"); 216 IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n");
217 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); 217 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
218} 218}
219 219
220static int iwl3945_set_dynamic_key(struct iwl_priv *priv, 220static int iwl3945_set_dynamic_key(struct iwl_priv *priv,
@@ -318,7 +318,7 @@ unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
318 int left) 318 int left)
319{ 319{
320 320
321 if (!iwl_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb) 321 if (!iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb)
322 return 0; 322 return 0;
323 323
324 if (priv->beacon_skb->len > left) 324 if (priv->beacon_skb->len > left)
@@ -344,12 +344,12 @@ static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
344 return -ENOMEM; 344 return -ENOMEM;
345 } 345 }
346 346
347 rate = iwl_rate_get_lowest_plcp(priv, 347 rate = iwl_legacy_get_lowest_plcp(priv,
348 &priv->contexts[IWL_RXON_CTX_BSS]); 348 &priv->contexts[IWL_RXON_CTX_BSS]);
349 349
350 frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate); 350 frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
351 351
352 rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, 352 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
353 &frame->u.cmd[0]); 353 &frame->u.cmd[0]);
354 354
355 iwl3945_free_frame(priv, frame); 355 iwl3945_free_frame(priv, frame);
@@ -443,7 +443,7 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
443 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 443 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
444 } 444 }
445 445
446 priv->cfg->ops->utils->tx_cmd_protection(priv, info, fc, &tx_flags); 446 iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
447 447
448 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); 448 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
449 if (ieee80211_is_mgmt(fc)) { 449 if (ieee80211_is_mgmt(fc)) {
@@ -485,7 +485,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
485 unsigned long flags; 485 unsigned long flags;
486 486
487 spin_lock_irqsave(&priv->lock, flags); 487 spin_lock_irqsave(&priv->lock, flags);
488 if (iwl_is_rfkill(priv)) { 488 if (iwl_legacy_is_rfkill(priv)) {
489 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); 489 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
490 goto drop_unlock; 490 goto drop_unlock;
491 } 491 }
@@ -500,7 +500,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
500 500
501 fc = hdr->frame_control; 501 fc = hdr->frame_control;
502 502
503#ifdef CONFIG_IWLWIFI_DEBUG 503#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
504 if (ieee80211_is_auth(fc)) 504 if (ieee80211_is_auth(fc))
505 IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); 505 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
506 else if (ieee80211_is_assoc_req(fc)) 506 else if (ieee80211_is_assoc_req(fc))
@@ -514,7 +514,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
514 hdr_len = ieee80211_hdrlen(fc); 514 hdr_len = ieee80211_hdrlen(fc);
515 515
516 /* Find index into station table for destination station */ 516 /* Find index into station table for destination station */
517 sta_id = iwl_sta_id_or_broadcast( 517 sta_id = iwl_legacy_sta_id_or_broadcast(
518 priv, &priv->contexts[IWL_RXON_CTX_BSS], 518 priv, &priv->contexts[IWL_RXON_CTX_BSS],
519 info->control.sta); 519 info->control.sta);
520 if (sta_id == IWL_INVALID_STATION) { 520 if (sta_id == IWL_INVALID_STATION) {
@@ -536,12 +536,12 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
536 txq = &priv->txq[txq_id]; 536 txq = &priv->txq[txq_id];
537 q = &txq->q; 537 q = &txq->q;
538 538
539 if ((iwl_queue_space(q) < q->high_mark)) 539 if ((iwl_legacy_queue_space(q) < q->high_mark))
540 goto drop; 540 goto drop;
541 541
542 spin_lock_irqsave(&priv->lock, flags); 542 spin_lock_irqsave(&priv->lock, flags);
543 543
544 idx = get_cmd_index(q, q->write_ptr, 0); 544 idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0);
545 545
546 /* Set up driver data for this TFD */ 546 /* Set up driver data for this TFD */
547 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); 547 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
@@ -582,8 +582,8 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
582 len = (u16)skb->len; 582 len = (u16)skb->len;
583 tx_cmd->len = cpu_to_le16(len); 583 tx_cmd->len = cpu_to_le16(len);
584 584
585 iwl_dbg_log_tx_data_frame(priv, len, hdr); 585 iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
586 iwl_update_stats(priv, true, fc, len); 586 iwl_legacy_update_stats(priv, true, fc, len);
587 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; 587 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
588 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; 588 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
589 589
@@ -642,20 +642,20 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
642 642
643 643
644 /* Tell device the write index *just past* this latest filled TFD */ 644 /* Tell device the write index *just past* this latest filled TFD */
645 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 645 q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
646 iwl_txq_update_write_ptr(priv, txq); 646 iwl_legacy_txq_update_write_ptr(priv, txq);
647 spin_unlock_irqrestore(&priv->lock, flags); 647 spin_unlock_irqrestore(&priv->lock, flags);
648 648
649 if ((iwl_queue_space(q) < q->high_mark) 649 if ((iwl_legacy_queue_space(q) < q->high_mark)
650 && priv->mac80211_registered) { 650 && priv->mac80211_registered) {
651 if (wait_write_ptr) { 651 if (wait_write_ptr) {
652 spin_lock_irqsave(&priv->lock, flags); 652 spin_lock_irqsave(&priv->lock, flags);
653 txq->need_update = 1; 653 txq->need_update = 1;
654 iwl_txq_update_write_ptr(priv, txq); 654 iwl_legacy_txq_update_write_ptr(priv, txq);
655 spin_unlock_irqrestore(&priv->lock, flags); 655 spin_unlock_irqrestore(&priv->lock, flags);
656 } 656 }
657 657
658 iwl_stop_queue(priv, txq); 658 iwl_legacy_stop_queue(priv, txq);
659 } 659 }
660 660
661 return 0; 661 return 0;
@@ -683,8 +683,8 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
683 int duration = le16_to_cpu(params->duration); 683 int duration = le16_to_cpu(params->duration);
684 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 684 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
685 685
686 if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) 686 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
687 add_time = iwl_usecs_to_beacons(priv, 687 add_time = iwl_legacy_usecs_to_beacons(priv,
688 le64_to_cpu(params->start_time) - priv->_3945.last_tsf, 688 le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
689 le16_to_cpu(ctx->timing.beacon_interval)); 689 le16_to_cpu(ctx->timing.beacon_interval));
690 690
@@ -697,9 +697,9 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
697 cmd.len = sizeof(spectrum); 697 cmd.len = sizeof(spectrum);
698 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len)); 698 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
699 699
700 if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) 700 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
701 spectrum.start_time = 701 spectrum.start_time =
702 iwl_add_beacon_time(priv, 702 iwl_legacy_add_beacon_time(priv,
703 priv->_3945.last_beacon_time, add_time, 703 priv->_3945.last_beacon_time, add_time,
704 le16_to_cpu(ctx->timing.beacon_interval)); 704 le16_to_cpu(ctx->timing.beacon_interval));
705 else 705 else
@@ -712,7 +712,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
712 spectrum.flags |= RXON_FLG_BAND_24G_MSK | 712 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
713 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK; 713 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
714 714
715 rc = iwl_send_cmd_sync(priv, &cmd); 715 rc = iwl_legacy_send_cmd_sync(priv, &cmd);
716 if (rc) 716 if (rc)
717 return rc; 717 return rc;
718 718
@@ -739,7 +739,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
739 break; 739 break;
740 } 740 }
741 741
742 iwl_free_pages(priv, cmd.reply_page); 742 iwl_legacy_free_pages(priv, cmd.reply_page);
743 743
744 return rc; 744 return rc;
745} 745}
@@ -783,45 +783,19 @@ static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
783static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv, 783static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
784 struct iwl_rx_mem_buffer *rxb) 784 struct iwl_rx_mem_buffer *rxb)
785{ 785{
786#ifdef CONFIG_IWLWIFI_DEBUG 786#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
787 struct iwl_rx_packet *pkt = rxb_addr(rxb); 787 struct iwl_rx_packet *pkt = rxb_addr(rxb);
788#endif 788#endif
789 789
790 IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status); 790 IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
791} 791}
792 792
793static void iwl3945_bg_beacon_update(struct work_struct *work)
794{
795 struct iwl_priv *priv =
796 container_of(work, struct iwl_priv, beacon_update);
797 struct sk_buff *beacon;
798
799 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
800 beacon = ieee80211_beacon_get(priv->hw,
801 priv->contexts[IWL_RXON_CTX_BSS].vif);
802
803 if (!beacon) {
804 IWL_ERR(priv, "update beacon failed\n");
805 return;
806 }
807
808 mutex_lock(&priv->mutex);
809 /* new beacon skb is allocated every time; dispose previous.*/
810 if (priv->beacon_skb)
811 dev_kfree_skb(priv->beacon_skb);
812
813 priv->beacon_skb = beacon;
814 mutex_unlock(&priv->mutex);
815
816 iwl3945_send_beacon_cmd(priv);
817}
818
819static void iwl3945_rx_beacon_notif(struct iwl_priv *priv, 793static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
820 struct iwl_rx_mem_buffer *rxb) 794 struct iwl_rx_mem_buffer *rxb)
821{ 795{
822 struct iwl_rx_packet *pkt = rxb_addr(rxb); 796 struct iwl_rx_packet *pkt = rxb_addr(rxb);
823 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status); 797 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
824#ifdef CONFIG_IWLWIFI_DEBUG 798#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
825 u8 rate = beacon->beacon_notify_hdr.rate; 799 u8 rate = beacon->beacon_notify_hdr.rate;
826 800
827 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d " 801 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
@@ -835,9 +809,6 @@ static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
835 809
836 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); 810 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
837 811
838 if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
839 (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
840 queue_work(priv->workqueue, &priv->beacon_update);
841} 812}
842 813
843/* Handle notification from uCode that card's power state is changing 814/* Handle notification from uCode that card's power state is changing
@@ -862,7 +833,7 @@ static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
862 clear_bit(STATUS_RF_KILL_HW, &priv->status); 833 clear_bit(STATUS_RF_KILL_HW, &priv->status);
863 834
864 835
865 iwl_scan_cancel(priv); 836 iwl_legacy_scan_cancel(priv);
866 837
867 if ((test_bit(STATUS_RF_KILL_HW, &status) != 838 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
868 test_bit(STATUS_RF_KILL_HW, &priv->status))) 839 test_bit(STATUS_RF_KILL_HW, &priv->status)))
@@ -885,13 +856,13 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
885{ 856{
886 priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive; 857 priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive;
887 priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta; 858 priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
888 priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error; 859 priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
889 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa; 860 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
890 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = 861 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
891 iwl_rx_spectrum_measure_notif; 862 iwl_legacy_rx_spectrum_measure_notif;
892 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif; 863 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
893 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = 864 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
894 iwl_rx_pm_debug_statistics_notif; 865 iwl_legacy_rx_pm_debug_statistics_notif;
895 priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif; 866 priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif;
896 867
897 /* 868 /*
@@ -902,7 +873,7 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
902 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics; 873 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics;
903 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics; 874 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
904 875
905 iwl_setup_rx_scan_handlers(priv); 876 iwl_legacy_setup_rx_scan_handlers(priv);
906 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif; 877 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
907 878
908 /* Set up hardware specific Rx handlers */ 879 /* Set up hardware specific Rx handlers */
@@ -1003,7 +974,7 @@ static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
1003 974
1004 spin_lock_irqsave(&rxq->lock, flags); 975 spin_lock_irqsave(&rxq->lock, flags);
1005 write = rxq->write & ~0x7; 976 write = rxq->write & ~0x7;
1006 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { 977 while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
1007 /* Get next free Rx buffer, remove from free list */ 978 /* Get next free Rx buffer, remove from free list */
1008 element = rxq->rx_free.next; 979 element = rxq->rx_free.next;
1009 rxb = list_entry(element, struct iwl_rx_mem_buffer, list); 980 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
@@ -1029,7 +1000,7 @@ static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
1029 spin_lock_irqsave(&rxq->lock, flags); 1000 spin_lock_irqsave(&rxq->lock, flags);
1030 rxq->need_update = 1; 1001 rxq->need_update = 1;
1031 spin_unlock_irqrestore(&rxq->lock, flags); 1002 spin_unlock_irqrestore(&rxq->lock, flags);
1032 iwl_rx_queue_update_write_ptr(priv, rxq); 1003 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
1033 } 1004 }
1034} 1005}
1035 1006
@@ -1123,7 +1094,7 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1123 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, 1094 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1124 PAGE_SIZE << priv->hw_params.rx_page_order, 1095 PAGE_SIZE << priv->hw_params.rx_page_order,
1125 PCI_DMA_FROMDEVICE); 1096 PCI_DMA_FROMDEVICE);
1126 __iwl_free_pages(priv, rxq->pool[i].page); 1097 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
1127 rxq->pool[i].page = NULL; 1098 rxq->pool[i].page = NULL;
1128 } 1099 }
1129 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 1100 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
@@ -1170,7 +1141,7 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
1170 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, 1141 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1171 PAGE_SIZE << priv->hw_params.rx_page_order, 1142 PAGE_SIZE << priv->hw_params.rx_page_order,
1172 PCI_DMA_FROMDEVICE); 1143 PCI_DMA_FROMDEVICE);
1173 __iwl_free_pages(priv, rxq->pool[i].page); 1144 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
1174 rxq->pool[i].page = NULL; 1145 rxq->pool[i].page = NULL;
1175 } 1146 }
1176 } 1147 }
@@ -1275,7 +1246,7 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1275 1246
1276 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 1247 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
1277 len += sizeof(u32); /* account for status word */ 1248 len += sizeof(u32); /* account for status word */
1278 trace_iwlwifi_dev_rx(priv, pkt, len); 1249 trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
1279 1250
1280 /* Reclaim a command buffer only if this packet is a response 1251 /* Reclaim a command buffer only if this packet is a response
1281 * to a (driver-originated) command. 1252 * to a (driver-originated) command.
@@ -1292,14 +1263,14 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1292 * rx_handlers table. See iwl3945_setup_rx_handlers() */ 1263 * rx_handlers table. See iwl3945_setup_rx_handlers() */
1293 if (priv->rx_handlers[pkt->hdr.cmd]) { 1264 if (priv->rx_handlers[pkt->hdr.cmd]) {
1294 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i, 1265 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
1295 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); 1266 iwl_legacy_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
1296 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; 1267 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
1297 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); 1268 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
1298 } else { 1269 } else {
1299 /* No handling needed */ 1270 /* No handling needed */
1300 IWL_DEBUG_RX(priv, 1271 IWL_DEBUG_RX(priv,
1301 "r %d i %d No handler needed for %s, 0x%02x\n", 1272 "r %d i %d No handler needed for %s, 0x%02x\n",
1302 r, i, get_cmd_string(pkt->hdr.cmd), 1273 r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
1303 pkt->hdr.cmd); 1274 pkt->hdr.cmd);
1304 } 1275 }
1305 1276
@@ -1312,10 +1283,10 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1312 1283
1313 if (reclaim) { 1284 if (reclaim) {
1314 /* Invoke any callbacks, transfer the buffer to caller, 1285 /* Invoke any callbacks, transfer the buffer to caller,
1315 * and fire off the (possibly) blocking iwl_send_cmd() 1286 * and fire off the (possibly) blocking iwl_legacy_send_cmd()
1316 * as we reclaim the driver command queue */ 1287 * as we reclaim the driver command queue */
1317 if (rxb->page) 1288 if (rxb->page)
1318 iwl_tx_cmd_complete(priv, rxb); 1289 iwl_legacy_tx_cmd_complete(priv, rxb);
1319 else 1290 else
1320 IWL_WARN(priv, "Claim null rxb?\n"); 1291 IWL_WARN(priv, "Claim null rxb?\n");
1321 } 1292 }
@@ -1357,14 +1328,14 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1357} 1328}
1358 1329
1359/* call this function to flush any scheduled tasklet */ 1330/* call this function to flush any scheduled tasklet */
1360static inline void iwl_synchronize_irq(struct iwl_priv *priv) 1331static inline void iwl3945_synchronize_irq(struct iwl_priv *priv)
1361{ 1332{
1362 /* wait to make sure we flush pending tasklet*/ 1333 /* wait to make sure we flush pending tasklet*/
1363 synchronize_irq(priv->pci_dev->irq); 1334 synchronize_irq(priv->pci_dev->irq);
1364 tasklet_kill(&priv->irq_tasklet); 1335 tasklet_kill(&priv->irq_tasklet);
1365} 1336}
1366 1337
1367static const char *desc_lookup(int i) 1338static const char *iwl3945_desc_lookup(int i)
1368{ 1339{
1369 switch (i) { 1340 switch (i) {
1370 case 1: 1341 case 1:
@@ -1401,7 +1372,7 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
1401 } 1372 }
1402 1373
1403 1374
1404 count = iwl_read_targ_mem(priv, base); 1375 count = iwl_legacy_read_targ_mem(priv, base);
1405 1376
1406 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { 1377 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1407 IWL_ERR(priv, "Start IWL Error Log Dump:\n"); 1378 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
@@ -1414,25 +1385,25 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
1414 for (i = ERROR_START_OFFSET; 1385 for (i = ERROR_START_OFFSET;
1415 i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET; 1386 i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
1416 i += ERROR_ELEM_SIZE) { 1387 i += ERROR_ELEM_SIZE) {
1417 desc = iwl_read_targ_mem(priv, base + i); 1388 desc = iwl_legacy_read_targ_mem(priv, base + i);
1418 time = 1389 time =
1419 iwl_read_targ_mem(priv, base + i + 1 * sizeof(u32)); 1390 iwl_legacy_read_targ_mem(priv, base + i + 1 * sizeof(u32));
1420 blink1 = 1391 blink1 =
1421 iwl_read_targ_mem(priv, base + i + 2 * sizeof(u32)); 1392 iwl_legacy_read_targ_mem(priv, base + i + 2 * sizeof(u32));
1422 blink2 = 1393 blink2 =
1423 iwl_read_targ_mem(priv, base + i + 3 * sizeof(u32)); 1394 iwl_legacy_read_targ_mem(priv, base + i + 3 * sizeof(u32));
1424 ilink1 = 1395 ilink1 =
1425 iwl_read_targ_mem(priv, base + i + 4 * sizeof(u32)); 1396 iwl_legacy_read_targ_mem(priv, base + i + 4 * sizeof(u32));
1426 ilink2 = 1397 ilink2 =
1427 iwl_read_targ_mem(priv, base + i + 5 * sizeof(u32)); 1398 iwl_legacy_read_targ_mem(priv, base + i + 5 * sizeof(u32));
1428 data1 = 1399 data1 =
1429 iwl_read_targ_mem(priv, base + i + 6 * sizeof(u32)); 1400 iwl_legacy_read_targ_mem(priv, base + i + 6 * sizeof(u32));
1430 1401
1431 IWL_ERR(priv, 1402 IWL_ERR(priv,
1432 "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n", 1403 "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
1433 desc_lookup(desc), desc, time, blink1, blink2, 1404 iwl3945_desc_lookup(desc), desc, time, blink1, blink2,
1434 ilink1, ilink2, data1); 1405 ilink1, ilink2, data1);
1435 trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, 0, 1406 trace_iwlwifi_legacy_dev_ucode_error(priv, desc, time, data1, 0,
1436 0, blink1, blink2, ilink1, ilink2); 1407 0, blink1, blink2, ilink1, ilink2);
1437 } 1408 }
1438} 1409}
@@ -1471,14 +1442,14 @@ static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
1471 iwl_grab_nic_access(priv); 1442 iwl_grab_nic_access(priv);
1472 1443
1473 /* Set starting address; reads will auto-increment */ 1444 /* Set starting address; reads will auto-increment */
1474 _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr); 1445 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
1475 rmb(); 1446 rmb();
1476 1447
1477 /* "time" is actually "data" for mode 0 (no timestamp). 1448 /* "time" is actually "data" for mode 0 (no timestamp).
1478 * place event id # at far right for easier visual parsing. */ 1449 * place event id # at far right for easier visual parsing. */
1479 for (i = 0; i < num_events; i++) { 1450 for (i = 0; i < num_events; i++) {
1480 ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1451 ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1481 time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1452 time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1482 if (mode == 0) { 1453 if (mode == 0) {
1483 /* data, ev */ 1454 /* data, ev */
1484 if (bufsz) { 1455 if (bufsz) {
@@ -1487,11 +1458,12 @@ static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
1487 time, ev); 1458 time, ev);
1488 } else { 1459 } else {
1489 IWL_ERR(priv, "0x%08x\t%04u\n", time, ev); 1460 IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
1490 trace_iwlwifi_dev_ucode_event(priv, 0, 1461 trace_iwlwifi_legacy_dev_ucode_event(priv, 0,
1491 time, ev); 1462 time, ev);
1492 } 1463 }
1493 } else { 1464 } else {
1494 data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1465 data = _iwl_legacy_read_direct32(priv,
1466 HBUS_TARG_MEM_RDAT);
1495 if (bufsz) { 1467 if (bufsz) {
1496 pos += scnprintf(*buf + pos, bufsz - pos, 1468 pos += scnprintf(*buf + pos, bufsz - pos,
1497 "%010u:0x%08x:%04u\n", 1469 "%010u:0x%08x:%04u\n",
@@ -1499,7 +1471,7 @@ static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
1499 } else { 1471 } else {
1500 IWL_ERR(priv, "%010u\t0x%08x\t%04u\n", 1472 IWL_ERR(priv, "%010u\t0x%08x\t%04u\n",
1501 time, data, ev); 1473 time, data, ev);
1502 trace_iwlwifi_dev_ucode_event(priv, time, 1474 trace_iwlwifi_legacy_dev_ucode_event(priv, time,
1503 data, ev); 1475 data, ev);
1504 } 1476 }
1505 } 1477 }
@@ -1570,10 +1542,10 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1570 } 1542 }
1571 1543
1572 /* event log header */ 1544 /* event log header */
1573 capacity = iwl_read_targ_mem(priv, base); 1545 capacity = iwl_legacy_read_targ_mem(priv, base);
1574 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32))); 1546 mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
1575 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); 1547 num_wraps = iwl_legacy_read_targ_mem(priv, base + (2 * sizeof(u32)));
1576 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); 1548 next_entry = iwl_legacy_read_targ_mem(priv, base + (3 * sizeof(u32)));
1577 1549
1578 if (capacity > priv->cfg->base_params->max_event_log_size) { 1550 if (capacity > priv->cfg->base_params->max_event_log_size) {
1579 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n", 1551 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
@@ -1595,8 +1567,8 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1595 return pos; 1567 return pos;
1596 } 1568 }
1597 1569
1598#ifdef CONFIG_IWLWIFI_DEBUG 1570#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1599 if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log) 1571 if (!(iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
1600 size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES) 1572 size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES)
1601 ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size; 1573 ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size;
1602#else 1574#else
@@ -1607,7 +1579,7 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1607 IWL_ERR(priv, "Start IWL Event Log Dump: display last %d count\n", 1579 IWL_ERR(priv, "Start IWL Event Log Dump: display last %d count\n",
1608 size); 1580 size);
1609 1581
1610#ifdef CONFIG_IWLWIFI_DEBUG 1582#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1611 if (display) { 1583 if (display) {
1612 if (full_log) 1584 if (full_log)
1613 bufsz = capacity * 48; 1585 bufsz = capacity * 48;
@@ -1617,7 +1589,7 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1617 if (!*buf) 1589 if (!*buf)
1618 return -ENOMEM; 1590 return -ENOMEM;
1619 } 1591 }
1620 if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) { 1592 if ((iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
1621 /* if uCode has wrapped back to top of log, 1593 /* if uCode has wrapped back to top of log,
1622 * start at the oldest entry, 1594 * start at the oldest entry,
1623 * i.e the next one that uCode would fill. 1595 * i.e the next one that uCode would fill.
@@ -1647,7 +1619,7 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1647 u32 inta, handled = 0; 1619 u32 inta, handled = 0;
1648 u32 inta_fh; 1620 u32 inta_fh;
1649 unsigned long flags; 1621 unsigned long flags;
1650#ifdef CONFIG_IWLWIFI_DEBUG 1622#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1651 u32 inta_mask; 1623 u32 inta_mask;
1652#endif 1624#endif
1653 1625
@@ -1665,8 +1637,8 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1665 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); 1637 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1666 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh); 1638 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
1667 1639
1668#ifdef CONFIG_IWLWIFI_DEBUG 1640#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1669 if (iwl_get_debug_level(priv) & IWL_DL_ISR) { 1641 if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
1670 /* just for debug */ 1642 /* just for debug */
1671 inta_mask = iwl_read32(priv, CSR_INT_MASK); 1643 inta_mask = iwl_read32(priv, CSR_INT_MASK);
1672 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", 1644 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
@@ -1690,18 +1662,18 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1690 IWL_ERR(priv, "Hardware error detected. Restarting.\n"); 1662 IWL_ERR(priv, "Hardware error detected. Restarting.\n");
1691 1663
1692 /* Tell the device to stop sending interrupts */ 1664 /* Tell the device to stop sending interrupts */
1693 iwl_disable_interrupts(priv); 1665 iwl_legacy_disable_interrupts(priv);
1694 1666
1695 priv->isr_stats.hw++; 1667 priv->isr_stats.hw++;
1696 iwl_irq_handle_error(priv); 1668 iwl_legacy_irq_handle_error(priv);
1697 1669
1698 handled |= CSR_INT_BIT_HW_ERR; 1670 handled |= CSR_INT_BIT_HW_ERR;
1699 1671
1700 return; 1672 return;
1701 } 1673 }
1702 1674
1703#ifdef CONFIG_IWLWIFI_DEBUG 1675#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1704 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { 1676 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
1705 /* NIC fires this, but we don't use it, redundant with WAKEUP */ 1677 /* NIC fires this, but we don't use it, redundant with WAKEUP */
1706 if (inta & CSR_INT_BIT_SCD) { 1678 if (inta & CSR_INT_BIT_SCD) {
1707 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit " 1679 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
@@ -1724,20 +1696,20 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1724 IWL_ERR(priv, "Microcode SW error detected. " 1696 IWL_ERR(priv, "Microcode SW error detected. "
1725 "Restarting 0x%X.\n", inta); 1697 "Restarting 0x%X.\n", inta);
1726 priv->isr_stats.sw++; 1698 priv->isr_stats.sw++;
1727 iwl_irq_handle_error(priv); 1699 iwl_legacy_irq_handle_error(priv);
1728 handled |= CSR_INT_BIT_SW_ERR; 1700 handled |= CSR_INT_BIT_SW_ERR;
1729 } 1701 }
1730 1702
1731 /* uCode wakes up after power-down sleep */ 1703 /* uCode wakes up after power-down sleep */
1732 if (inta & CSR_INT_BIT_WAKEUP) { 1704 if (inta & CSR_INT_BIT_WAKEUP) {
1733 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); 1705 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
1734 iwl_rx_queue_update_write_ptr(priv, &priv->rxq); 1706 iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
1735 iwl_txq_update_write_ptr(priv, &priv->txq[0]); 1707 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[0]);
1736 iwl_txq_update_write_ptr(priv, &priv->txq[1]); 1708 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[1]);
1737 iwl_txq_update_write_ptr(priv, &priv->txq[2]); 1709 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[2]);
1738 iwl_txq_update_write_ptr(priv, &priv->txq[3]); 1710 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[3]);
1739 iwl_txq_update_write_ptr(priv, &priv->txq[4]); 1711 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[4]);
1740 iwl_txq_update_write_ptr(priv, &priv->txq[5]); 1712 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[5]);
1741 1713
1742 priv->isr_stats.wakeup++; 1714 priv->isr_stats.wakeup++;
1743 handled |= CSR_INT_BIT_WAKEUP; 1715 handled |= CSR_INT_BIT_WAKEUP;
@@ -1757,7 +1729,7 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1757 priv->isr_stats.tx++; 1729 priv->isr_stats.tx++;
1758 1730
1759 iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6)); 1731 iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
1760 iwl_write_direct32(priv, FH39_TCSR_CREDIT 1732 iwl_legacy_write_direct32(priv, FH39_TCSR_CREDIT
1761 (FH39_SRVC_CHNL), 0x0); 1733 (FH39_SRVC_CHNL), 0x0);
1762 handled |= CSR_INT_BIT_FH_TX; 1734 handled |= CSR_INT_BIT_FH_TX;
1763 } 1735 }
@@ -1776,10 +1748,10 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1776 /* Re-enable all interrupts */ 1748 /* Re-enable all interrupts */
1777 /* only Re-enable if disabled by irq */ 1749 /* only Re-enable if disabled by irq */
1778 if (test_bit(STATUS_INT_ENABLED, &priv->status)) 1750 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1779 iwl_enable_interrupts(priv); 1751 iwl_legacy_enable_interrupts(priv);
1780 1752
1781#ifdef CONFIG_IWLWIFI_DEBUG 1753#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1782 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { 1754 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
1783 inta = iwl_read32(priv, CSR_INT); 1755 inta = iwl_read32(priv, CSR_INT);
1784 inta_mask = iwl_read32(priv, CSR_INT_MASK); 1756 inta_mask = iwl_read32(priv, CSR_INT_MASK);
1785 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); 1757 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
@@ -1806,14 +1778,14 @@ static int iwl3945_get_single_channel_for_scan(struct iwl_priv *priv,
1806 return added; 1778 return added;
1807 } 1779 }
1808 1780
1809 active_dwell = iwl_get_active_dwell_time(priv, band, 0); 1781 active_dwell = iwl_legacy_get_active_dwell_time(priv, band, 0);
1810 passive_dwell = iwl_get_passive_dwell_time(priv, band, vif); 1782 passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
1811 1783
1812 if (passive_dwell <= active_dwell) 1784 if (passive_dwell <= active_dwell)
1813 passive_dwell = active_dwell + 1; 1785 passive_dwell = active_dwell + 1;
1814 1786
1815 1787
1816 channel = iwl_get_single_channel_number(priv, band); 1788 channel = iwl_legacy_get_single_channel_number(priv, band);
1817 1789
1818 if (channel) { 1790 if (channel) {
1819 scan_ch->channel = channel; 1791 scan_ch->channel = channel;
@@ -1849,8 +1821,8 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
1849 if (!sband) 1821 if (!sband)
1850 return 0; 1822 return 0;
1851 1823
1852 active_dwell = iwl_get_active_dwell_time(priv, band, n_probes); 1824 active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
1853 passive_dwell = iwl_get_passive_dwell_time(priv, band, vif); 1825 passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
1854 1826
1855 if (passive_dwell <= active_dwell) 1827 if (passive_dwell <= active_dwell)
1856 passive_dwell = active_dwell + 1; 1828 passive_dwell = active_dwell + 1;
@@ -1863,10 +1835,12 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
1863 1835
1864 scan_ch->channel = chan->hw_value; 1836 scan_ch->channel = chan->hw_value;
1865 1837
1866 ch_info = iwl_get_channel_info(priv, band, scan_ch->channel); 1838 ch_info = iwl_legacy_get_channel_info(priv, band,
1867 if (!is_channel_valid(ch_info)) { 1839 scan_ch->channel);
1868 IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n", 1840 if (!iwl_legacy_is_channel_valid(ch_info)) {
1869 scan_ch->channel); 1841 IWL_DEBUG_SCAN(priv,
1842 "Channel %d is INVALID for this band.\n",
1843 scan_ch->channel);
1870 continue; 1844 continue;
1871 } 1845 }
1872 1846
@@ -1875,7 +1849,7 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
1875 /* If passive , set up for auto-switch 1849 /* If passive , set up for auto-switch
1876 * and use long active_dwell time. 1850 * and use long active_dwell time.
1877 */ 1851 */
1878 if (!is_active || is_channel_passive(ch_info) || 1852 if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
1879 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) { 1853 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
1880 scan_ch->type = 0; /* passive */ 1854 scan_ch->type = 0; /* passive */
1881 if (IWL_UCODE_API(priv->ucode_ver) == 1) 1855 if (IWL_UCODE_API(priv->ucode_ver) == 1)
@@ -1955,12 +1929,12 @@ static void iwl3945_init_hw_rates(struct iwl_priv *priv,
1955 1929
1956static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv) 1930static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv)
1957{ 1931{
1958 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code); 1932 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
1959 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data); 1933 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
1960 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup); 1934 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1961 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init); 1935 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
1962 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init_data); 1936 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1963 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot); 1937 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
1964} 1938}
1965 1939
1966/** 1940/**
@@ -1976,7 +1950,7 @@ static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 le
1976 1950
1977 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); 1951 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
1978 1952
1979 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, 1953 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
1980 IWL39_RTC_INST_LOWER_BOUND); 1954 IWL39_RTC_INST_LOWER_BOUND);
1981 1955
1982 errcnt = 0; 1956 errcnt = 0;
@@ -1984,7 +1958,7 @@ static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 le
1984 /* read data comes through single port, auto-incr addr */ 1958 /* read data comes through single port, auto-incr addr */
1985 /* NOTE: Use the debugless read so we don't flood kernel log 1959 /* NOTE: Use the debugless read so we don't flood kernel log
1986 * if IWL_DL_IO is set */ 1960 * if IWL_DL_IO is set */
1987 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1961 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1988 if (val != le32_to_cpu(*image)) { 1962 if (val != le32_to_cpu(*image)) {
1989 IWL_ERR(priv, "uCode INST section is invalid at " 1963 IWL_ERR(priv, "uCode INST section is invalid at "
1990 "offset 0x%x, is 0x%x, s/b 0x%x\n", 1964 "offset 0x%x, is 0x%x, s/b 0x%x\n",
@@ -2023,9 +1997,9 @@ static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32
2023 /* read data comes through single port, auto-incr addr */ 1997 /* read data comes through single port, auto-incr addr */
2024 /* NOTE: Use the debugless read so we don't flood kernel log 1998 /* NOTE: Use the debugless read so we don't flood kernel log
2025 * if IWL_DL_IO is set */ 1999 * if IWL_DL_IO is set */
2026 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, 2000 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
2027 i + IWL39_RTC_INST_LOWER_BOUND); 2001 i + IWL39_RTC_INST_LOWER_BOUND);
2028 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 2002 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
2029 if (val != le32_to_cpu(*image)) { 2003 if (val != le32_to_cpu(*image)) {
2030#if 0 /* Enable this if you want to see details */ 2004#if 0 /* Enable this if you want to see details */
2031 IWL_ERR(priv, "uCode INST section is invalid at " 2005 IWL_ERR(priv, "uCode INST section is invalid at "
@@ -2101,7 +2075,7 @@ static void iwl3945_nic_start(struct iwl_priv *priv)
2101#define IWL3945_UCODE_GET(item) \ 2075#define IWL3945_UCODE_GET(item) \
2102static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\ 2076static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\
2103{ \ 2077{ \
2104 return le32_to_cpu(ucode->u.v1.item); \ 2078 return le32_to_cpu(ucode->v1.item); \
2105} 2079}
2106 2080
2107static u32 iwl3945_ucode_get_header_size(u32 api_ver) 2081static u32 iwl3945_ucode_get_header_size(u32 api_ver)
@@ -2111,7 +2085,7 @@ static u32 iwl3945_ucode_get_header_size(u32 api_ver)
2111 2085
2112static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode) 2086static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode)
2113{ 2087{
2114 return (u8 *) ucode->u.v1.data; 2088 return (u8 *) ucode->v1.data;
2115} 2089}
2116 2090
2117IWL3945_UCODE_GET(inst_size); 2091IWL3945_UCODE_GET(inst_size);
@@ -2286,13 +2260,13 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
2286 * 1) unmodified from disk 2260 * 1) unmodified from disk
2287 * 2) backup cache for save/restore during power-downs */ 2261 * 2) backup cache for save/restore during power-downs */
2288 priv->ucode_code.len = inst_size; 2262 priv->ucode_code.len = inst_size;
2289 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code); 2263 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
2290 2264
2291 priv->ucode_data.len = data_size; 2265 priv->ucode_data.len = data_size;
2292 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data); 2266 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
2293 2267
2294 priv->ucode_data_backup.len = data_size; 2268 priv->ucode_data_backup.len = data_size;
2295 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup); 2269 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
2296 2270
2297 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr || 2271 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
2298 !priv->ucode_data_backup.v_addr) 2272 !priv->ucode_data_backup.v_addr)
@@ -2301,10 +2275,10 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
2301 /* Initialization instructions and data */ 2275 /* Initialization instructions and data */
2302 if (init_size && init_data_size) { 2276 if (init_size && init_data_size) {
2303 priv->ucode_init.len = init_size; 2277 priv->ucode_init.len = init_size;
2304 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init); 2278 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
2305 2279
2306 priv->ucode_init_data.len = init_data_size; 2280 priv->ucode_init_data.len = init_data_size;
2307 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data); 2281 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
2308 2282
2309 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr) 2283 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
2310 goto err_pci_alloc; 2284 goto err_pci_alloc;
@@ -2313,7 +2287,7 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
2313 /* Bootstrap (instructions only, no data) */ 2287 /* Bootstrap (instructions only, no data) */
2314 if (boot_size) { 2288 if (boot_size) {
2315 priv->ucode_boot.len = boot_size; 2289 priv->ucode_boot.len = boot_size;
2316 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot); 2290 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
2317 2291
2318 if (!priv->ucode_boot.v_addr) 2292 if (!priv->ucode_boot.v_addr)
2319 goto err_pci_alloc; 2293 goto err_pci_alloc;
@@ -2400,14 +2374,14 @@ static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv)
2400 pdata = priv->ucode_data_backup.p_addr; 2374 pdata = priv->ucode_data_backup.p_addr;
2401 2375
2402 /* Tell bootstrap uCode where to find image to load */ 2376 /* Tell bootstrap uCode where to find image to load */
2403 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); 2377 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
2404 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); 2378 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
2405 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, 2379 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
2406 priv->ucode_data.len); 2380 priv->ucode_data.len);
2407 2381
2408 /* Inst byte count must be last to set up, bit 31 signals uCode 2382 /* Inst byte count must be last to set up, bit 31 signals uCode
2409 * that all new ptr/size info is in place */ 2383 * that all new ptr/size info is in place */
2410 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, 2384 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
2411 priv->ucode_code.len | BSM_DRAM_INST_LOAD); 2385 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
2412 2386
2413 IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n"); 2387 IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
@@ -2488,7 +2462,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2488 goto restart; 2462 goto restart;
2489 } 2463 }
2490 2464
2491 rfkill = iwl_read_prph(priv, APMG_RFKILL_REG); 2465 rfkill = iwl_legacy_read_prph(priv, APMG_RFKILL_REG);
2492 IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill); 2466 IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill);
2493 2467
2494 if (rfkill & 0x1) { 2468 if (rfkill & 0x1) {
@@ -2510,18 +2484,18 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2510 set_bit(STATUS_ALIVE, &priv->status); 2484 set_bit(STATUS_ALIVE, &priv->status);
2511 2485
2512 /* Enable watchdog to monitor the driver tx queues */ 2486 /* Enable watchdog to monitor the driver tx queues */
2513 iwl_setup_watchdog(priv); 2487 iwl_legacy_setup_watchdog(priv);
2514 2488
2515 if (iwl_is_rfkill(priv)) 2489 if (iwl_legacy_is_rfkill(priv))
2516 return; 2490 return;
2517 2491
2518 ieee80211_wake_queues(priv->hw); 2492 ieee80211_wake_queues(priv->hw);
2519 2493
2520 priv->active_rate = IWL_RATES_MASK; 2494 priv->active_rate = IWL_RATES_MASK_3945;
2521 2495
2522 iwl_power_update_mode(priv, true); 2496 iwl_legacy_power_update_mode(priv, true);
2523 2497
2524 if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) { 2498 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
2525 struct iwl3945_rxon_cmd *active_rxon = 2499 struct iwl3945_rxon_cmd *active_rxon =
2526 (struct iwl3945_rxon_cmd *)(&ctx->active); 2500 (struct iwl3945_rxon_cmd *)(&ctx->active);
2527 2501
@@ -2529,21 +2503,20 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2529 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2503 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2530 } else { 2504 } else {
2531 /* Initialize our rx_config data */ 2505 /* Initialize our rx_config data */
2532 iwl_connection_init_rx_config(priv, ctx); 2506 iwl_legacy_connection_init_rx_config(priv, ctx);
2533 } 2507 }
2534 2508
2535 /* Configure Bluetooth device coexistence support */ 2509 /* Configure Bluetooth device coexistence support */
2536 priv->cfg->ops->hcmd->send_bt_config(priv); 2510 iwl_legacy_send_bt_config(priv);
2511
2512 set_bit(STATUS_READY, &priv->status);
2537 2513
2538 /* Configure the adapter for unassociated operation */ 2514 /* Configure the adapter for unassociated operation */
2539 iwl3945_commit_rxon(priv, ctx); 2515 iwl3945_commit_rxon(priv, ctx);
2540 2516
2541 iwl3945_reg_txpower_periodic(priv); 2517 iwl3945_reg_txpower_periodic(priv);
2542 2518
2543 iwl_leds_init(priv);
2544
2545 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); 2519 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
2546 set_bit(STATUS_READY, &priv->status);
2547 wake_up_interruptible(&priv->wait_command_queue); 2520 wake_up_interruptible(&priv->wait_command_queue);
2548 2521
2549 return; 2522 return;
@@ -2561,7 +2534,7 @@ static void __iwl3945_down(struct iwl_priv *priv)
2561 2534
2562 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n"); 2535 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
2563 2536
2564 iwl_scan_cancel_timeout(priv, 200); 2537 iwl_legacy_scan_cancel_timeout(priv, 200);
2565 2538
2566 exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status); 2539 exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
2567 2540
@@ -2570,9 +2543,9 @@ static void __iwl3945_down(struct iwl_priv *priv)
2570 del_timer_sync(&priv->watchdog); 2543 del_timer_sync(&priv->watchdog);
2571 2544
2572 /* Station information will now be cleared in device */ 2545 /* Station information will now be cleared in device */
2573 iwl_clear_ucode_stations(priv, NULL); 2546 iwl_legacy_clear_ucode_stations(priv, NULL);
2574 iwl_dealloc_bcast_stations(priv); 2547 iwl_legacy_dealloc_bcast_stations(priv);
2575 iwl_clear_driver_stations(priv); 2548 iwl_legacy_clear_driver_stations(priv);
2576 2549
2577 /* Unblock any waiting calls */ 2550 /* Unblock any waiting calls */
2578 wake_up_interruptible_all(&priv->wait_command_queue); 2551 wake_up_interruptible_all(&priv->wait_command_queue);
@@ -2587,16 +2560,16 @@ static void __iwl3945_down(struct iwl_priv *priv)
2587 2560
2588 /* tell the device to stop sending interrupts */ 2561 /* tell the device to stop sending interrupts */
2589 spin_lock_irqsave(&priv->lock, flags); 2562 spin_lock_irqsave(&priv->lock, flags);
2590 iwl_disable_interrupts(priv); 2563 iwl_legacy_disable_interrupts(priv);
2591 spin_unlock_irqrestore(&priv->lock, flags); 2564 spin_unlock_irqrestore(&priv->lock, flags);
2592 iwl_synchronize_irq(priv); 2565 iwl3945_synchronize_irq(priv);
2593 2566
2594 if (priv->mac80211_registered) 2567 if (priv->mac80211_registered)
2595 ieee80211_stop_queues(priv->hw); 2568 ieee80211_stop_queues(priv->hw);
2596 2569
2597 /* If we have not previously called iwl3945_init() then 2570 /* If we have not previously called iwl3945_init() then
2598 * clear all bits but the RF Kill bits and return */ 2571 * clear all bits but the RF Kill bits and return */
2599 if (!iwl_is_init(priv)) { 2572 if (!iwl_legacy_is_init(priv)) {
2600 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << 2573 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2601 STATUS_RF_KILL_HW | 2574 STATUS_RF_KILL_HW |
2602 test_bit(STATUS_GEO_CONFIGURED, &priv->status) << 2575 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
@@ -2621,11 +2594,11 @@ static void __iwl3945_down(struct iwl_priv *priv)
2621 iwl3945_hw_rxq_stop(priv); 2594 iwl3945_hw_rxq_stop(priv);
2622 2595
2623 /* Power-down device's busmaster DMA clocks */ 2596 /* Power-down device's busmaster DMA clocks */
2624 iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); 2597 iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
2625 udelay(5); 2598 udelay(5);
2626 2599
2627 /* Stop the device, and put it in low power state */ 2600 /* Stop the device, and put it in low power state */
2628 iwl_apm_stop(priv); 2601 iwl_legacy_apm_stop(priv);
2629 2602
2630 exit: 2603 exit:
2631 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); 2604 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
@@ -2656,7 +2629,8 @@ static int iwl3945_alloc_bcast_station(struct iwl_priv *priv)
2656 u8 sta_id; 2629 u8 sta_id;
2657 2630
2658 spin_lock_irqsave(&priv->sta_lock, flags); 2631 spin_lock_irqsave(&priv->sta_lock, flags);
2659 sta_id = iwl_prep_station(priv, ctx, iwl_bcast_addr, false, NULL); 2632 sta_id = iwl_legacy_prep_station(priv, ctx,
2633 iwlegacy_bcast_addr, false, NULL);
2660 if (sta_id == IWL_INVALID_STATION) { 2634 if (sta_id == IWL_INVALID_STATION) {
2661 IWL_ERR(priv, "Unable to prepare broadcast station\n"); 2635 IWL_ERR(priv, "Unable to prepare broadcast station\n");
2662 spin_unlock_irqrestore(&priv->sta_lock, flags); 2636 spin_unlock_irqrestore(&priv->sta_lock, flags);
@@ -2714,7 +2688,7 @@ static int __iwl3945_up(struct iwl_priv *priv)
2714 2688
2715 /* clear (again), then enable host interrupts */ 2689 /* clear (again), then enable host interrupts */
2716 iwl_write32(priv, CSR_INT, 0xFFFFFFFF); 2690 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2717 iwl_enable_interrupts(priv); 2691 iwl_legacy_enable_interrupts(priv);
2718 2692
2719 /* really make sure rfkill handshake bits are cleared */ 2693 /* really make sure rfkill handshake bits are cleared */
2720 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 2694 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
@@ -2856,21 +2830,18 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2856 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; 2830 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
2857 scan->quiet_time = IWL_ACTIVE_QUIET_TIME; 2831 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
2858 2832
2859 if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) { 2833 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
2860 u16 interval = 0; 2834 u16 interval = 0;
2861 u32 extra; 2835 u32 extra;
2862 u32 suspend_time = 100; 2836 u32 suspend_time = 100;
2863 u32 scan_suspend_time = 100; 2837 u32 scan_suspend_time = 100;
2864 unsigned long flags;
2865 2838
2866 IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); 2839 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
2867 2840
2868 spin_lock_irqsave(&priv->lock, flags);
2869 if (priv->is_internal_short_scan) 2841 if (priv->is_internal_short_scan)
2870 interval = 0; 2842 interval = 0;
2871 else 2843 else
2872 interval = vif->bss_conf.beacon_int; 2844 interval = vif->bss_conf.beacon_int;
2873 spin_unlock_irqrestore(&priv->lock, flags);
2874 2845
2875 scan->suspend_time = 0; 2846 scan->suspend_time = 0;
2876 scan->max_out_time = cpu_to_le32(200 * 1024); 2847 scan->max_out_time = cpu_to_le32(200 * 1024);
@@ -2947,7 +2918,7 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2947 2918
2948 if (!priv->is_internal_short_scan) { 2919 if (!priv->is_internal_short_scan) {
2949 scan->tx_cmd.len = cpu_to_le16( 2920 scan->tx_cmd.len = cpu_to_le16(
2950 iwl_fill_probe_req(priv, 2921 iwl_legacy_fill_probe_req(priv,
2951 (struct ieee80211_mgmt *)scan->data, 2922 (struct ieee80211_mgmt *)scan->data,
2952 vif->addr, 2923 vif->addr,
2953 priv->scan_request->ie, 2924 priv->scan_request->ie,
@@ -2956,9 +2927,9 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2956 } else { 2927 } else {
2957 /* use bcast addr, will not be transmitted but must be valid */ 2928 /* use bcast addr, will not be transmitted but must be valid */
2958 scan->tx_cmd.len = cpu_to_le16( 2929 scan->tx_cmd.len = cpu_to_le16(
2959 iwl_fill_probe_req(priv, 2930 iwl_legacy_fill_probe_req(priv,
2960 (struct ieee80211_mgmt *)scan->data, 2931 (struct ieee80211_mgmt *)scan->data,
2961 iwl_bcast_addr, NULL, 0, 2932 iwlegacy_bcast_addr, NULL, 0,
2962 IWL_MAX_SCAN_SIZE - sizeof(*scan))); 2933 IWL_MAX_SCAN_SIZE - sizeof(*scan)));
2963 } 2934 }
2964 /* select Rx antennas */ 2935 /* select Rx antennas */
@@ -2986,7 +2957,7 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2986 scan->len = cpu_to_le16(cmd.len); 2957 scan->len = cpu_to_le16(cmd.len);
2987 2958
2988 set_bit(STATUS_SCAN_HW, &priv->status); 2959 set_bit(STATUS_SCAN_HW, &priv->status);
2989 ret = iwl_send_cmd_sync(priv, &cmd); 2960 ret = iwl_legacy_send_cmd_sync(priv, &cmd);
2990 if (ret) 2961 if (ret)
2991 clear_bit(STATUS_SCAN_HW, &priv->status); 2962 clear_bit(STATUS_SCAN_HW, &priv->status);
2992 return ret; 2963 return ret;
@@ -3054,25 +3025,20 @@ void iwl3945_post_associate(struct iwl_priv *priv)
3054 if (!ctx->vif || !priv->is_open) 3025 if (!ctx->vif || !priv->is_open)
3055 return; 3026 return;
3056 3027
3057 if (ctx->vif->type == NL80211_IFTYPE_AP) {
3058 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
3059 return;
3060 }
3061
3062 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n", 3028 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
3063 ctx->vif->bss_conf.aid, ctx->active.bssid_addr); 3029 ctx->vif->bss_conf.aid, ctx->active.bssid_addr);
3064 3030
3065 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3031 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3066 return; 3032 return;
3067 3033
3068 iwl_scan_cancel_timeout(priv, 200); 3034 iwl_legacy_scan_cancel_timeout(priv, 200);
3069 3035
3070 conf = ieee80211_get_hw_conf(priv->hw); 3036 conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
3071 3037
3072 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 3038 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3073 iwl3945_commit_rxon(priv, ctx); 3039 iwl3945_commit_rxon(priv, ctx);
3074 3040
3075 rc = iwl_send_rxon_timing(priv, ctx); 3041 rc = iwl_legacy_send_rxon_timing(priv, ctx);
3076 if (rc) 3042 if (rc)
3077 IWL_WARN(priv, "REPLY_RXON_TIMING failed - " 3043 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
3078 "Attempting to continue.\n"); 3044 "Attempting to continue.\n");
@@ -3170,8 +3136,6 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw)
3170 * no need to poll the killswitch state anymore */ 3136 * no need to poll the killswitch state anymore */
3171 cancel_delayed_work(&priv->_3945.rfkill_poll); 3137 cancel_delayed_work(&priv->_3945.rfkill_poll);
3172 3138
3173 iwl_led_start(priv);
3174
3175 priv->is_open = 1; 3139 priv->is_open = 1;
3176 IWL_DEBUG_MAC80211(priv, "leave\n"); 3140 IWL_DEBUG_MAC80211(priv, "leave\n");
3177 return 0; 3141 return 0;
@@ -3206,7 +3170,7 @@ static void iwl3945_mac_stop(struct ieee80211_hw *hw)
3206 IWL_DEBUG_MAC80211(priv, "leave\n"); 3170 IWL_DEBUG_MAC80211(priv, "leave\n");
3207} 3171}
3208 3172
3209static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 3173static void iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
3210{ 3174{
3211 struct iwl_priv *priv = hw->priv; 3175 struct iwl_priv *priv = hw->priv;
3212 3176
@@ -3219,7 +3183,6 @@ static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
3219 dev_kfree_skb_any(skb); 3183 dev_kfree_skb_any(skb);
3220 3184
3221 IWL_DEBUG_MAC80211(priv, "leave\n"); 3185 IWL_DEBUG_MAC80211(priv, "leave\n");
3222 return NETDEV_TX_OK;
3223} 3186}
3224 3187
3225void iwl3945_config_ap(struct iwl_priv *priv) 3188void iwl3945_config_ap(struct iwl_priv *priv)
@@ -3232,14 +3195,14 @@ void iwl3945_config_ap(struct iwl_priv *priv)
3232 return; 3195 return;
3233 3196
3234 /* The following should be done only at AP bring up */ 3197 /* The following should be done only at AP bring up */
3235 if (!(iwl_is_associated(priv, IWL_RXON_CTX_BSS))) { 3198 if (!(iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))) {
3236 3199
3237 /* RXON - unassoc (to set timing command) */ 3200 /* RXON - unassoc (to set timing command) */
3238 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 3201 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3239 iwl3945_commit_rxon(priv, ctx); 3202 iwl3945_commit_rxon(priv, ctx);
3240 3203
3241 /* RXON Timing */ 3204 /* RXON Timing */
3242 rc = iwl_send_rxon_timing(priv, ctx); 3205 rc = iwl_legacy_send_rxon_timing(priv, ctx);
3243 if (rc) 3206 if (rc)
3244 IWL_WARN(priv, "REPLY_RXON_TIMING failed - " 3207 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
3245 "Attempting to continue.\n"); 3208 "Attempting to continue.\n");
@@ -3266,10 +3229,6 @@ void iwl3945_config_ap(struct iwl_priv *priv)
3266 iwl3945_commit_rxon(priv, ctx); 3229 iwl3945_commit_rxon(priv, ctx);
3267 } 3230 }
3268 iwl3945_send_beacon_cmd(priv); 3231 iwl3945_send_beacon_cmd(priv);
3269
3270 /* FIXME - we need to add code here to detect a totally new
3271 * configuration, reset the AP, unassoc, rxon timing, assoc,
3272 * clear sta table, add BCAST sta... */
3273} 3232}
3274 3233
3275static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 3234static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -3289,17 +3248,25 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3289 return -EOPNOTSUPP; 3248 return -EOPNOTSUPP;
3290 } 3249 }
3291 3250
3292 static_key = !iwl_is_associated(priv, IWL_RXON_CTX_BSS); 3251 /*
3252 * To support IBSS RSN, don't program group keys in IBSS, the
3253 * hardware will then not attempt to decrypt the frames.
3254 */
3255 if (vif->type == NL80211_IFTYPE_ADHOC &&
3256 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
3257 return -EOPNOTSUPP;
3258
3259 static_key = !iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
3293 3260
3294 if (!static_key) { 3261 if (!static_key) {
3295 sta_id = iwl_sta_id_or_broadcast( 3262 sta_id = iwl_legacy_sta_id_or_broadcast(
3296 priv, &priv->contexts[IWL_RXON_CTX_BSS], sta); 3263 priv, &priv->contexts[IWL_RXON_CTX_BSS], sta);
3297 if (sta_id == IWL_INVALID_STATION) 3264 if (sta_id == IWL_INVALID_STATION)
3298 return -EINVAL; 3265 return -EINVAL;
3299 } 3266 }
3300 3267
3301 mutex_lock(&priv->mutex); 3268 mutex_lock(&priv->mutex);
3302 iwl_scan_cancel_timeout(priv, 100); 3269 iwl_legacy_scan_cancel_timeout(priv, 100);
3303 3270
3304 switch (cmd) { 3271 switch (cmd) {
3305 case SET_KEY: 3272 case SET_KEY:
@@ -3344,7 +3311,8 @@ static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
3344 sta_priv->common.sta_id = IWL_INVALID_STATION; 3311 sta_priv->common.sta_id = IWL_INVALID_STATION;
3345 3312
3346 3313
3347 ret = iwl_add_station_common(priv, &priv->contexts[IWL_RXON_CTX_BSS], 3314 ret = iwl_legacy_add_station_common(priv,
3315 &priv->contexts[IWL_RXON_CTX_BSS],
3348 sta->addr, is_ap, sta, &sta_id); 3316 sta->addr, is_ap, sta, &sta_id);
3349 if (ret) { 3317 if (ret) {
3350 IWL_ERR(priv, "Unable to add station %pM (%d)\n", 3318 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
@@ -3405,7 +3373,7 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
3405 3373
3406 /* 3374 /*
3407 * Receiving all multicast frames is always enabled by the 3375 * Receiving all multicast frames is always enabled by the
3408 * default flags setup in iwl_connection_init_rx_config() 3376 * default flags setup in iwl_legacy_connection_init_rx_config()
3409 * since we currently do not support programming multicast 3377 * since we currently do not support programming multicast
3410 * filters into the device. 3378 * filters into the device.
3411 */ 3379 */
@@ -3420,7 +3388,7 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
3420 * 3388 *
3421 *****************************************************************************/ 3389 *****************************************************************************/
3422 3390
3423#ifdef CONFIG_IWLWIFI_DEBUG 3391#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
3424 3392
3425/* 3393/*
3426 * The following adds a new attribute to the sysfs representation 3394 * The following adds a new attribute to the sysfs representation
@@ -3433,13 +3401,13 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
3433 * level that is used instead of the global debug level if it (the per 3401 * level that is used instead of the global debug level if it (the per
3434 * device debug level) is set. 3402 * device debug level) is set.
3435 */ 3403 */
3436static ssize_t show_debug_level(struct device *d, 3404static ssize_t iwl3945_show_debug_level(struct device *d,
3437 struct device_attribute *attr, char *buf) 3405 struct device_attribute *attr, char *buf)
3438{ 3406{
3439 struct iwl_priv *priv = dev_get_drvdata(d); 3407 struct iwl_priv *priv = dev_get_drvdata(d);
3440 return sprintf(buf, "0x%08X\n", iwl_get_debug_level(priv)); 3408 return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
3441} 3409}
3442static ssize_t store_debug_level(struct device *d, 3410static ssize_t iwl3945_store_debug_level(struct device *d,
3443 struct device_attribute *attr, 3411 struct device_attribute *attr,
3444 const char *buf, size_t count) 3412 const char *buf, size_t count)
3445{ 3413{
@@ -3452,7 +3420,7 @@ static ssize_t store_debug_level(struct device *d,
3452 IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf); 3420 IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf);
3453 else { 3421 else {
3454 priv->debug_level = val; 3422 priv->debug_level = val;
3455 if (iwl_alloc_traffic_mem(priv)) 3423 if (iwl_legacy_alloc_traffic_mem(priv))
3456 IWL_ERR(priv, 3424 IWL_ERR(priv,
3457 "Not enough memory to generate traffic log\n"); 3425 "Not enough memory to generate traffic log\n");
3458 } 3426 }
@@ -3460,31 +3428,31 @@ static ssize_t store_debug_level(struct device *d,
3460} 3428}
3461 3429
3462static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, 3430static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
3463 show_debug_level, store_debug_level); 3431 iwl3945_show_debug_level, iwl3945_store_debug_level);
3464 3432
3465#endif /* CONFIG_IWLWIFI_DEBUG */ 3433#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
3466 3434
3467static ssize_t show_temperature(struct device *d, 3435static ssize_t iwl3945_show_temperature(struct device *d,
3468 struct device_attribute *attr, char *buf) 3436 struct device_attribute *attr, char *buf)
3469{ 3437{
3470 struct iwl_priv *priv = dev_get_drvdata(d); 3438 struct iwl_priv *priv = dev_get_drvdata(d);
3471 3439
3472 if (!iwl_is_alive(priv)) 3440 if (!iwl_legacy_is_alive(priv))
3473 return -EAGAIN; 3441 return -EAGAIN;
3474 3442
3475 return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv)); 3443 return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv));
3476} 3444}
3477 3445
3478static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL); 3446static DEVICE_ATTR(temperature, S_IRUGO, iwl3945_show_temperature, NULL);
3479 3447
3480static ssize_t show_tx_power(struct device *d, 3448static ssize_t iwl3945_show_tx_power(struct device *d,
3481 struct device_attribute *attr, char *buf) 3449 struct device_attribute *attr, char *buf)
3482{ 3450{
3483 struct iwl_priv *priv = dev_get_drvdata(d); 3451 struct iwl_priv *priv = dev_get_drvdata(d);
3484 return sprintf(buf, "%d\n", priv->tx_power_user_lmt); 3452 return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
3485} 3453}
3486 3454
3487static ssize_t store_tx_power(struct device *d, 3455static ssize_t iwl3945_store_tx_power(struct device *d,
3488 struct device_attribute *attr, 3456 struct device_attribute *attr,
3489 const char *buf, size_t count) 3457 const char *buf, size_t count)
3490{ 3458{
@@ -3501,9 +3469,9 @@ static ssize_t store_tx_power(struct device *d,
3501 return count; 3469 return count;
3502} 3470}
3503 3471
3504static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power); 3472static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, iwl3945_show_tx_power, iwl3945_store_tx_power);
3505 3473
3506static ssize_t show_flags(struct device *d, 3474static ssize_t iwl3945_show_flags(struct device *d,
3507 struct device_attribute *attr, char *buf) 3475 struct device_attribute *attr, char *buf)
3508{ 3476{
3509 struct iwl_priv *priv = dev_get_drvdata(d); 3477 struct iwl_priv *priv = dev_get_drvdata(d);
@@ -3512,7 +3480,7 @@ static ssize_t show_flags(struct device *d,
3512 return sprintf(buf, "0x%04X\n", ctx->active.flags); 3480 return sprintf(buf, "0x%04X\n", ctx->active.flags);
3513} 3481}
3514 3482
3515static ssize_t store_flags(struct device *d, 3483static ssize_t iwl3945_store_flags(struct device *d,
3516 struct device_attribute *attr, 3484 struct device_attribute *attr,
3517 const char *buf, size_t count) 3485 const char *buf, size_t count)
3518{ 3486{
@@ -3523,7 +3491,7 @@ static ssize_t store_flags(struct device *d,
3523 mutex_lock(&priv->mutex); 3491 mutex_lock(&priv->mutex);
3524 if (le32_to_cpu(ctx->staging.flags) != flags) { 3492 if (le32_to_cpu(ctx->staging.flags) != flags) {
3525 /* Cancel any currently running scans... */ 3493 /* Cancel any currently running scans... */
3526 if (iwl_scan_cancel_timeout(priv, 100)) 3494 if (iwl_legacy_scan_cancel_timeout(priv, 100))
3527 IWL_WARN(priv, "Could not cancel scan.\n"); 3495 IWL_WARN(priv, "Could not cancel scan.\n");
3528 else { 3496 else {
3529 IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n", 3497 IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n",
@@ -3537,9 +3505,9 @@ static ssize_t store_flags(struct device *d,
3537 return count; 3505 return count;
3538} 3506}
3539 3507
3540static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags); 3508static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, iwl3945_show_flags, iwl3945_store_flags);
3541 3509
3542static ssize_t show_filter_flags(struct device *d, 3510static ssize_t iwl3945_show_filter_flags(struct device *d,
3543 struct device_attribute *attr, char *buf) 3511 struct device_attribute *attr, char *buf)
3544{ 3512{
3545 struct iwl_priv *priv = dev_get_drvdata(d); 3513 struct iwl_priv *priv = dev_get_drvdata(d);
@@ -3549,7 +3517,7 @@ static ssize_t show_filter_flags(struct device *d,
3549 le32_to_cpu(ctx->active.filter_flags)); 3517 le32_to_cpu(ctx->active.filter_flags));
3550} 3518}
3551 3519
3552static ssize_t store_filter_flags(struct device *d, 3520static ssize_t iwl3945_store_filter_flags(struct device *d,
3553 struct device_attribute *attr, 3521 struct device_attribute *attr,
3554 const char *buf, size_t count) 3522 const char *buf, size_t count)
3555{ 3523{
@@ -3560,7 +3528,7 @@ static ssize_t store_filter_flags(struct device *d,
3560 mutex_lock(&priv->mutex); 3528 mutex_lock(&priv->mutex);
3561 if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) { 3529 if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
3562 /* Cancel any currently running scans... */ 3530 /* Cancel any currently running scans... */
3563 if (iwl_scan_cancel_timeout(priv, 100)) 3531 if (iwl_legacy_scan_cancel_timeout(priv, 100))
3564 IWL_WARN(priv, "Could not cancel scan.\n"); 3532 IWL_WARN(priv, "Could not cancel scan.\n");
3565 else { 3533 else {
3566 IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = " 3534 IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
@@ -3575,10 +3543,10 @@ static ssize_t store_filter_flags(struct device *d,
3575 return count; 3543 return count;
3576} 3544}
3577 3545
3578static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags, 3546static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, iwl3945_show_filter_flags,
3579 store_filter_flags); 3547 iwl3945_store_filter_flags);
3580 3548
3581static ssize_t show_measurement(struct device *d, 3549static ssize_t iwl3945_show_measurement(struct device *d,
3582 struct device_attribute *attr, char *buf) 3550 struct device_attribute *attr, char *buf)
3583{ 3551{
3584 struct iwl_priv *priv = dev_get_drvdata(d); 3552 struct iwl_priv *priv = dev_get_drvdata(d);
@@ -3610,7 +3578,7 @@ static ssize_t show_measurement(struct device *d,
3610 return len; 3578 return len;
3611} 3579}
3612 3580
3613static ssize_t store_measurement(struct device *d, 3581static ssize_t iwl3945_store_measurement(struct device *d,
3614 struct device_attribute *attr, 3582 struct device_attribute *attr,
3615 const char *buf, size_t count) 3583 const char *buf, size_t count)
3616{ 3584{
@@ -3647,9 +3615,9 @@ static ssize_t store_measurement(struct device *d,
3647} 3615}
3648 3616
3649static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, 3617static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
3650 show_measurement, store_measurement); 3618 iwl3945_show_measurement, iwl3945_store_measurement);
3651 3619
3652static ssize_t store_retry_rate(struct device *d, 3620static ssize_t iwl3945_store_retry_rate(struct device *d,
3653 struct device_attribute *attr, 3621 struct device_attribute *attr,
3654 const char *buf, size_t count) 3622 const char *buf, size_t count)
3655{ 3623{
@@ -3662,38 +3630,38 @@ static ssize_t store_retry_rate(struct device *d,
3662 return count; 3630 return count;
3663} 3631}
3664 3632
3665static ssize_t show_retry_rate(struct device *d, 3633static ssize_t iwl3945_show_retry_rate(struct device *d,
3666 struct device_attribute *attr, char *buf) 3634 struct device_attribute *attr, char *buf)
3667{ 3635{
3668 struct iwl_priv *priv = dev_get_drvdata(d); 3636 struct iwl_priv *priv = dev_get_drvdata(d);
3669 return sprintf(buf, "%d", priv->retry_rate); 3637 return sprintf(buf, "%d", priv->retry_rate);
3670} 3638}
3671 3639
3672static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate, 3640static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, iwl3945_show_retry_rate,
3673 store_retry_rate); 3641 iwl3945_store_retry_rate);
3674 3642
3675 3643
3676static ssize_t show_channels(struct device *d, 3644static ssize_t iwl3945_show_channels(struct device *d,
3677 struct device_attribute *attr, char *buf) 3645 struct device_attribute *attr, char *buf)
3678{ 3646{
3679 /* all this shit doesn't belong into sysfs anyway */ 3647 /* all this shit doesn't belong into sysfs anyway */
3680 return 0; 3648 return 0;
3681} 3649}
3682 3650
3683static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL); 3651static DEVICE_ATTR(channels, S_IRUSR, iwl3945_show_channels, NULL);
3684 3652
3685static ssize_t show_antenna(struct device *d, 3653static ssize_t iwl3945_show_antenna(struct device *d,
3686 struct device_attribute *attr, char *buf) 3654 struct device_attribute *attr, char *buf)
3687{ 3655{
3688 struct iwl_priv *priv = dev_get_drvdata(d); 3656 struct iwl_priv *priv = dev_get_drvdata(d);
3689 3657
3690 if (!iwl_is_alive(priv)) 3658 if (!iwl_legacy_is_alive(priv))
3691 return -EAGAIN; 3659 return -EAGAIN;
3692 3660
3693 return sprintf(buf, "%d\n", iwl3945_mod_params.antenna); 3661 return sprintf(buf, "%d\n", iwl3945_mod_params.antenna);
3694} 3662}
3695 3663
3696static ssize_t store_antenna(struct device *d, 3664static ssize_t iwl3945_store_antenna(struct device *d,
3697 struct device_attribute *attr, 3665 struct device_attribute *attr,
3698 const char *buf, size_t count) 3666 const char *buf, size_t count)
3699{ 3667{
@@ -3718,20 +3686,20 @@ static ssize_t store_antenna(struct device *d,
3718 return count; 3686 return count;
3719} 3687}
3720 3688
3721static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna); 3689static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, iwl3945_show_antenna, iwl3945_store_antenna);
3722 3690
3723static ssize_t show_status(struct device *d, 3691static ssize_t iwl3945_show_status(struct device *d,
3724 struct device_attribute *attr, char *buf) 3692 struct device_attribute *attr, char *buf)
3725{ 3693{
3726 struct iwl_priv *priv = dev_get_drvdata(d); 3694 struct iwl_priv *priv = dev_get_drvdata(d);
3727 if (!iwl_is_alive(priv)) 3695 if (!iwl_legacy_is_alive(priv))
3728 return -EAGAIN; 3696 return -EAGAIN;
3729 return sprintf(buf, "0x%08x\n", (int)priv->status); 3697 return sprintf(buf, "0x%08x\n", (int)priv->status);
3730} 3698}
3731 3699
3732static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); 3700static DEVICE_ATTR(status, S_IRUGO, iwl3945_show_status, NULL);
3733 3701
3734static ssize_t dump_error_log(struct device *d, 3702static ssize_t iwl3945_dump_error_log(struct device *d,
3735 struct device_attribute *attr, 3703 struct device_attribute *attr,
3736 const char *buf, size_t count) 3704 const char *buf, size_t count)
3737{ 3705{
@@ -3744,7 +3712,7 @@ static ssize_t dump_error_log(struct device *d,
3744 return strnlen(buf, count); 3712 return strnlen(buf, count);
3745} 3713}
3746 3714
3747static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log); 3715static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, iwl3945_dump_error_log);
3748 3716
3749/***************************************************************************** 3717/*****************************************************************************
3750 * 3718 *
@@ -3760,18 +3728,17 @@ static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
3760 3728
3761 INIT_WORK(&priv->restart, iwl3945_bg_restart); 3729 INIT_WORK(&priv->restart, iwl3945_bg_restart);
3762 INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish); 3730 INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
3763 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
3764 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start); 3731 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
3765 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start); 3732 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
3766 INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll); 3733 INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll);
3767 3734
3768 iwl_setup_scan_deferred_work(priv); 3735 iwl_legacy_setup_scan_deferred_work(priv);
3769 3736
3770 iwl3945_hw_setup_deferred_work(priv); 3737 iwl3945_hw_setup_deferred_work(priv);
3771 3738
3772 init_timer(&priv->watchdog); 3739 init_timer(&priv->watchdog);
3773 priv->watchdog.data = (unsigned long)priv; 3740 priv->watchdog.data = (unsigned long)priv;
3774 priv->watchdog.function = iwl_bg_watchdog; 3741 priv->watchdog.function = iwl_legacy_bg_watchdog;
3775 3742
3776 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) 3743 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
3777 iwl3945_irq_tasklet, (unsigned long)priv); 3744 iwl3945_irq_tasklet, (unsigned long)priv);
@@ -3783,9 +3750,8 @@ static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
3783 3750
3784 cancel_delayed_work_sync(&priv->init_alive_start); 3751 cancel_delayed_work_sync(&priv->init_alive_start);
3785 cancel_delayed_work(&priv->alive_start); 3752 cancel_delayed_work(&priv->alive_start);
3786 cancel_work_sync(&priv->beacon_update);
3787 3753
3788 iwl_cancel_scan_deferred_work(priv); 3754 iwl_legacy_cancel_scan_deferred_work(priv);
3789} 3755}
3790 3756
3791static struct attribute *iwl3945_sysfs_entries[] = { 3757static struct attribute *iwl3945_sysfs_entries[] = {
@@ -3799,7 +3765,7 @@ static struct attribute *iwl3945_sysfs_entries[] = {
3799 &dev_attr_status.attr, 3765 &dev_attr_status.attr,
3800 &dev_attr_temperature.attr, 3766 &dev_attr_temperature.attr,
3801 &dev_attr_tx_power.attr, 3767 &dev_attr_tx_power.attr,
3802#ifdef CONFIG_IWLWIFI_DEBUG 3768#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
3803 &dev_attr_debug_level.attr, 3769 &dev_attr_debug_level.attr,
3804#endif 3770#endif
3805 NULL 3771 NULL
@@ -3814,19 +3780,19 @@ struct ieee80211_ops iwl3945_hw_ops = {
3814 .tx = iwl3945_mac_tx, 3780 .tx = iwl3945_mac_tx,
3815 .start = iwl3945_mac_start, 3781 .start = iwl3945_mac_start,
3816 .stop = iwl3945_mac_stop, 3782 .stop = iwl3945_mac_stop,
3817 .add_interface = iwl_mac_add_interface, 3783 .add_interface = iwl_legacy_mac_add_interface,
3818 .remove_interface = iwl_mac_remove_interface, 3784 .remove_interface = iwl_legacy_mac_remove_interface,
3819 .change_interface = iwl_mac_change_interface, 3785 .change_interface = iwl_legacy_mac_change_interface,
3820 .config = iwl_legacy_mac_config, 3786 .config = iwl_legacy_mac_config,
3821 .configure_filter = iwl3945_configure_filter, 3787 .configure_filter = iwl3945_configure_filter,
3822 .set_key = iwl3945_mac_set_key, 3788 .set_key = iwl3945_mac_set_key,
3823 .conf_tx = iwl_mac_conf_tx, 3789 .conf_tx = iwl_legacy_mac_conf_tx,
3824 .reset_tsf = iwl_legacy_mac_reset_tsf, 3790 .reset_tsf = iwl_legacy_mac_reset_tsf,
3825 .bss_info_changed = iwl_legacy_mac_bss_info_changed, 3791 .bss_info_changed = iwl_legacy_mac_bss_info_changed,
3826 .hw_scan = iwl_mac_hw_scan, 3792 .hw_scan = iwl_legacy_mac_hw_scan,
3827 .sta_add = iwl3945_mac_sta_add, 3793 .sta_add = iwl3945_mac_sta_add,
3828 .sta_remove = iwl_mac_sta_remove, 3794 .sta_remove = iwl_legacy_mac_sta_remove,
3829 .tx_last_beacon = iwl_mac_tx_last_beacon, 3795 .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
3830}; 3796};
3831 3797
3832static int iwl3945_init_drv(struct iwl_priv *priv) 3798static int iwl3945_init_drv(struct iwl_priv *priv)
@@ -3868,7 +3834,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
3868 ret = -EINVAL; 3834 ret = -EINVAL;
3869 goto err; 3835 goto err;
3870 } 3836 }
3871 ret = iwl_init_channel_map(priv); 3837 ret = iwl_legacy_init_channel_map(priv);
3872 if (ret) { 3838 if (ret) {
3873 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret); 3839 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
3874 goto err; 3840 goto err;
@@ -3880,7 +3846,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
3880 goto err_free_channel_map; 3846 goto err_free_channel_map;
3881 } 3847 }
3882 3848
3883 ret = iwlcore_init_geos(priv); 3849 ret = iwl_legacy_init_geos(priv);
3884 if (ret) { 3850 if (ret) {
3885 IWL_ERR(priv, "initializing geos failed: %d\n", ret); 3851 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
3886 goto err_free_channel_map; 3852 goto err_free_channel_map;
@@ -3890,7 +3856,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
3890 return 0; 3856 return 0;
3891 3857
3892err_free_channel_map: 3858err_free_channel_map:
3893 iwl_free_channel_map(priv); 3859 iwl_legacy_free_channel_map(priv);
3894err: 3860err:
3895 return ret; 3861 return ret;
3896} 3862}
@@ -3910,15 +3876,12 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
3910 hw->flags = IEEE80211_HW_SIGNAL_DBM | 3876 hw->flags = IEEE80211_HW_SIGNAL_DBM |
3911 IEEE80211_HW_SPECTRUM_MGMT; 3877 IEEE80211_HW_SPECTRUM_MGMT;
3912 3878
3913 if (!priv->cfg->base_params->broken_powersave)
3914 hw->flags |= IEEE80211_HW_SUPPORTS_PS |
3915 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
3916
3917 hw->wiphy->interface_modes = 3879 hw->wiphy->interface_modes =
3918 priv->contexts[IWL_RXON_CTX_BSS].interface_modes; 3880 priv->contexts[IWL_RXON_CTX_BSS].interface_modes;
3919 3881
3920 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | 3882 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
3921 WIPHY_FLAG_DISABLE_BEACON_HINTS; 3883 WIPHY_FLAG_DISABLE_BEACON_HINTS |
3884 WIPHY_FLAG_IBSS_RSN;
3922 3885
3923 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; 3886 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
3924 /* we create the 802.11 header and a zero-length SSID element */ 3887 /* we create the 802.11 header and a zero-length SSID element */
@@ -3935,6 +3898,8 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
3935 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 3898 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
3936 &priv->bands[IEEE80211_BAND_5GHZ]; 3899 &priv->bands[IEEE80211_BAND_5GHZ];
3937 3900
3901 iwl_legacy_leds_init(priv);
3902
3938 ret = ieee80211_register_hw(priv->hw); 3903 ret = ieee80211_register_hw(priv->hw);
3939 if (ret) { 3904 if (ret) {
3940 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); 3905 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
@@ -3960,7 +3925,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
3960 3925
3961 /* mac80211 allocates memory for this device instance, including 3926 /* mac80211 allocates memory for this device instance, including
3962 * space for this driver's private structure */ 3927 * space for this driver's private structure */
3963 hw = iwl_alloc_all(cfg); 3928 hw = iwl_legacy_alloc_all(cfg);
3964 if (hw == NULL) { 3929 if (hw == NULL) {
3965 pr_err("Can not allocate network device\n"); 3930 pr_err("Can not allocate network device\n");
3966 err = -ENOMEM; 3931 err = -ENOMEM;
@@ -4000,13 +3965,12 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4000 iwl3945_hw_ops.hw_scan = NULL; 3965 iwl3945_hw_ops.hw_scan = NULL;
4001 } 3966 }
4002 3967
4003
4004 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); 3968 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
4005 priv->cfg = cfg; 3969 priv->cfg = cfg;
4006 priv->pci_dev = pdev; 3970 priv->pci_dev = pdev;
4007 priv->inta_mask = CSR_INI_SET_MASK; 3971 priv->inta_mask = CSR_INI_SET_MASK;
4008 3972
4009 if (iwl_alloc_traffic_mem(priv)) 3973 if (iwl_legacy_alloc_traffic_mem(priv))
4010 IWL_ERR(priv, "Not enough memory to generate traffic log\n"); 3974 IWL_ERR(priv, "Not enough memory to generate traffic log\n");
4011 3975
4012 /*************************** 3976 /***************************
@@ -4070,7 +4034,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4070 * ********************/ 4034 * ********************/
4071 4035
4072 /* Read the EEPROM */ 4036 /* Read the EEPROM */
4073 err = iwl_eeprom_init(priv); 4037 err = iwl_legacy_eeprom_init(priv);
4074 if (err) { 4038 if (err) {
4075 IWL_ERR(priv, "Unable to init EEPROM\n"); 4039 IWL_ERR(priv, "Unable to init EEPROM\n");
4076 goto out_iounmap; 4040 goto out_iounmap;
@@ -4107,12 +4071,12 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4107 * ********************/ 4071 * ********************/
4108 4072
4109 spin_lock_irqsave(&priv->lock, flags); 4073 spin_lock_irqsave(&priv->lock, flags);
4110 iwl_disable_interrupts(priv); 4074 iwl_legacy_disable_interrupts(priv);
4111 spin_unlock_irqrestore(&priv->lock, flags); 4075 spin_unlock_irqrestore(&priv->lock, flags);
4112 4076
4113 pci_enable_msi(priv->pci_dev); 4077 pci_enable_msi(priv->pci_dev);
4114 4078
4115 err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr_ops.isr, 4079 err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
4116 IRQF_SHARED, DRV_NAME, priv); 4080 IRQF_SHARED, DRV_NAME, priv);
4117 if (err) { 4081 if (err) {
4118 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq); 4082 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
@@ -4125,24 +4089,24 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4125 goto out_release_irq; 4089 goto out_release_irq;
4126 } 4090 }
4127 4091
4128 iwl_set_rxon_channel(priv, 4092 iwl_legacy_set_rxon_channel(priv,
4129 &priv->bands[IEEE80211_BAND_2GHZ].channels[5], 4093 &priv->bands[IEEE80211_BAND_2GHZ].channels[5],
4130 &priv->contexts[IWL_RXON_CTX_BSS]); 4094 &priv->contexts[IWL_RXON_CTX_BSS]);
4131 iwl3945_setup_deferred_work(priv); 4095 iwl3945_setup_deferred_work(priv);
4132 iwl3945_setup_rx_handlers(priv); 4096 iwl3945_setup_rx_handlers(priv);
4133 iwl_power_initialize(priv); 4097 iwl_legacy_power_initialize(priv);
4134 4098
4135 /********************************* 4099 /*********************************
4136 * 8. Setup and Register mac80211 4100 * 8. Setup and Register mac80211
4137 * *******************************/ 4101 * *******************************/
4138 4102
4139 iwl_enable_interrupts(priv); 4103 iwl_legacy_enable_interrupts(priv);
4140 4104
4141 err = iwl3945_setup_mac(priv); 4105 err = iwl3945_setup_mac(priv);
4142 if (err) 4106 if (err)
4143 goto out_remove_sysfs; 4107 goto out_remove_sysfs;
4144 4108
4145 err = iwl_dbgfs_register(priv, DRV_NAME); 4109 err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
4146 if (err) 4110 if (err)
4147 IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err); 4111 IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
4148 4112
@@ -4160,12 +4124,12 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4160 free_irq(priv->pci_dev->irq, priv); 4124 free_irq(priv->pci_dev->irq, priv);
4161 out_disable_msi: 4125 out_disable_msi:
4162 pci_disable_msi(priv->pci_dev); 4126 pci_disable_msi(priv->pci_dev);
4163 iwlcore_free_geos(priv); 4127 iwl_legacy_free_geos(priv);
4164 iwl_free_channel_map(priv); 4128 iwl_legacy_free_channel_map(priv);
4165 out_unset_hw_params: 4129 out_unset_hw_params:
4166 iwl3945_unset_hw_params(priv); 4130 iwl3945_unset_hw_params(priv);
4167 out_eeprom_free: 4131 out_eeprom_free:
4168 iwl_eeprom_free(priv); 4132 iwl_legacy_eeprom_free(priv);
4169 out_iounmap: 4133 out_iounmap:
4170 pci_iounmap(pdev, priv->hw_base); 4134 pci_iounmap(pdev, priv->hw_base);
4171 out_pci_release_regions: 4135 out_pci_release_regions:
@@ -4174,7 +4138,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4174 pci_set_drvdata(pdev, NULL); 4138 pci_set_drvdata(pdev, NULL);
4175 pci_disable_device(pdev); 4139 pci_disable_device(pdev);
4176 out_ieee80211_free_hw: 4140 out_ieee80211_free_hw:
4177 iwl_free_traffic_mem(priv); 4141 iwl_legacy_free_traffic_mem(priv);
4178 ieee80211_free_hw(priv->hw); 4142 ieee80211_free_hw(priv->hw);
4179 out: 4143 out:
4180 return err; 4144 return err;
@@ -4190,10 +4154,12 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
4190 4154
4191 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n"); 4155 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
4192 4156
4193 iwl_dbgfs_unregister(priv); 4157 iwl_legacy_dbgfs_unregister(priv);
4194 4158
4195 set_bit(STATUS_EXIT_PENDING, &priv->status); 4159 set_bit(STATUS_EXIT_PENDING, &priv->status);
4196 4160
4161 iwl_legacy_leds_exit(priv);
4162
4197 if (priv->mac80211_registered) { 4163 if (priv->mac80211_registered) {
4198 ieee80211_unregister_hw(priv->hw); 4164 ieee80211_unregister_hw(priv->hw);
4199 priv->mac80211_registered = 0; 4165 priv->mac80211_registered = 0;
@@ -4208,16 +4174,16 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
4208 * paths to avoid running iwl_down() at all before leaving driver. 4174 * paths to avoid running iwl_down() at all before leaving driver.
4209 * This (inexpensive) call *makes sure* device is reset. 4175 * This (inexpensive) call *makes sure* device is reset.
4210 */ 4176 */
4211 iwl_apm_stop(priv); 4177 iwl_legacy_apm_stop(priv);
4212 4178
4213 /* make sure we flush any pending irq or 4179 /* make sure we flush any pending irq or
4214 * tasklet for the driver 4180 * tasklet for the driver
4215 */ 4181 */
4216 spin_lock_irqsave(&priv->lock, flags); 4182 spin_lock_irqsave(&priv->lock, flags);
4217 iwl_disable_interrupts(priv); 4183 iwl_legacy_disable_interrupts(priv);
4218 spin_unlock_irqrestore(&priv->lock, flags); 4184 spin_unlock_irqrestore(&priv->lock, flags);
4219 4185
4220 iwl_synchronize_irq(priv); 4186 iwl3945_synchronize_irq(priv);
4221 4187
4222 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group); 4188 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
4223 4189
@@ -4239,7 +4205,7 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
4239 * until now... */ 4205 * until now... */
4240 destroy_workqueue(priv->workqueue); 4206 destroy_workqueue(priv->workqueue);
4241 priv->workqueue = NULL; 4207 priv->workqueue = NULL;
4242 iwl_free_traffic_mem(priv); 4208 iwl_legacy_free_traffic_mem(priv);
4243 4209
4244 free_irq(pdev->irq, priv); 4210 free_irq(pdev->irq, priv);
4245 pci_disable_msi(pdev); 4211 pci_disable_msi(pdev);
@@ -4249,8 +4215,8 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
4249 pci_disable_device(pdev); 4215 pci_disable_device(pdev);
4250 pci_set_drvdata(pdev, NULL); 4216 pci_set_drvdata(pdev, NULL);
4251 4217
4252 iwl_free_channel_map(priv); 4218 iwl_legacy_free_channel_map(priv);
4253 iwlcore_free_geos(priv); 4219 iwl_legacy_free_geos(priv);
4254 kfree(priv->scan_cmd); 4220 kfree(priv->scan_cmd);
4255 if (priv->beacon_skb) 4221 if (priv->beacon_skb)
4256 dev_kfree_skb(priv->beacon_skb); 4222 dev_kfree_skb(priv->beacon_skb);
@@ -4270,7 +4236,7 @@ static struct pci_driver iwl3945_driver = {
4270 .id_table = iwl3945_hw_card_ids, 4236 .id_table = iwl3945_hw_card_ids,
4271 .probe = iwl3945_pci_probe, 4237 .probe = iwl3945_pci_probe,
4272 .remove = __devexit_p(iwl3945_pci_remove), 4238 .remove = __devexit_p(iwl3945_pci_remove),
4273 .driver.pm = IWL_PM_OPS, 4239 .driver.pm = IWL_LEGACY_PM_OPS,
4274}; 4240};
4275 4241
4276static int __init iwl3945_init(void) 4242static int __init iwl3945_init(void)
@@ -4311,17 +4277,17 @@ module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO);
4311MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); 4277MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
4312module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO); 4278module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO);
4313MODULE_PARM_DESC(swcrypto, 4279MODULE_PARM_DESC(swcrypto,
4314 "using software crypto (default 1 [software])\n"); 4280 "using software crypto (default 1 [software])");
4315#ifdef CONFIG_IWLWIFI_DEBUG
4316module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR);
4317MODULE_PARM_DESC(debug, "debug output mask");
4318#endif
4319module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan, 4281module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan,
4320 int, S_IRUGO); 4282 int, S_IRUGO);
4321MODULE_PARM_DESC(disable_hw_scan, 4283MODULE_PARM_DESC(disable_hw_scan,
4322 "disable hardware scanning (default 0) (deprecated)"); 4284 "disable hardware scanning (default 0) (deprecated)");
4323module_param_named(fw_restart3945, iwl3945_mod_params.restart_fw, int, S_IRUGO); 4285#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
4324MODULE_PARM_DESC(fw_restart3945, "restart firmware in case of error"); 4286module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
4287MODULE_PARM_DESC(debug, "debug output mask");
4288#endif
4289module_param_named(fw_restart, iwl3945_mod_params.restart_fw, int, S_IRUGO);
4290MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
4325 4291
4326module_exit(iwl3945_exit); 4292module_exit(iwl3945_exit);
4327module_init(iwl3945_init); 4293module_init(iwl3945_init);
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
new file mode 100644
index 000000000000..91b3d8b9d7a5
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c
@@ -0,0 +1,3632 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/pci-aspm.h>
37#include <linux/slab.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/sched.h>
41#include <linux/skbuff.h>
42#include <linux/netdevice.h>
43#include <linux/wireless.h>
44#include <linux/firmware.h>
45#include <linux/etherdevice.h>
46#include <linux/if_arp.h>
47
48#include <net/mac80211.h>
49
50#include <asm/div64.h>
51
52#define DRV_NAME "iwl4965"
53
54#include "iwl-eeprom.h"
55#include "iwl-dev.h"
56#include "iwl-core.h"
57#include "iwl-io.h"
58#include "iwl-helpers.h"
59#include "iwl-sta.h"
60#include "iwl-4965-calib.h"
61#include "iwl-4965.h"
62#include "iwl-4965-led.h"
63
64
65/******************************************************************************
66 *
67 * module boiler plate
68 *
69 ******************************************************************************/
70
71/*
72 * module name, copyright, version, etc.
73 */
74#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
75
76#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
77#define VD "d"
78#else
79#define VD
80#endif
81
82#define DRV_VERSION IWLWIFI_VERSION VD
83
84
85MODULE_DESCRIPTION(DRV_DESCRIPTION);
86MODULE_VERSION(DRV_VERSION);
87MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
88MODULE_LICENSE("GPL");
89MODULE_ALIAS("iwl4965");
90
91void iwl4965_update_chain_flags(struct iwl_priv *priv)
92{
93 struct iwl_rxon_context *ctx;
94
95 if (priv->cfg->ops->hcmd->set_rxon_chain) {
96 for_each_context(priv, ctx) {
97 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
98 if (ctx->active.rx_chain != ctx->staging.rx_chain)
99 iwl_legacy_commit_rxon(priv, ctx);
100 }
101 }
102}
103
104static void iwl4965_clear_free_frames(struct iwl_priv *priv)
105{
106 struct list_head *element;
107
108 IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
109 priv->frames_count);
110
111 while (!list_empty(&priv->free_frames)) {
112 element = priv->free_frames.next;
113 list_del(element);
114 kfree(list_entry(element, struct iwl_frame, list));
115 priv->frames_count--;
116 }
117
118 if (priv->frames_count) {
119 IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
120 priv->frames_count);
121 priv->frames_count = 0;
122 }
123}
124
125static struct iwl_frame *iwl4965_get_free_frame(struct iwl_priv *priv)
126{
127 struct iwl_frame *frame;
128 struct list_head *element;
129 if (list_empty(&priv->free_frames)) {
130 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
131 if (!frame) {
132 IWL_ERR(priv, "Could not allocate frame!\n");
133 return NULL;
134 }
135
136 priv->frames_count++;
137 return frame;
138 }
139
140 element = priv->free_frames.next;
141 list_del(element);
142 return list_entry(element, struct iwl_frame, list);
143}
144
145static void iwl4965_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
146{
147 memset(frame, 0, sizeof(*frame));
148 list_add(&frame->list, &priv->free_frames);
149}
150
151static u32 iwl4965_fill_beacon_frame(struct iwl_priv *priv,
152 struct ieee80211_hdr *hdr,
153 int left)
154{
155 lockdep_assert_held(&priv->mutex);
156
157 if (!priv->beacon_skb)
158 return 0;
159
160 if (priv->beacon_skb->len > left)
161 return 0;
162
163 memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
164
165 return priv->beacon_skb->len;
166}
167
168/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
169static void iwl4965_set_beacon_tim(struct iwl_priv *priv,
170 struct iwl_tx_beacon_cmd *tx_beacon_cmd,
171 u8 *beacon, u32 frame_size)
172{
173 u16 tim_idx;
174 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
175
176 /*
177 * The index is relative to frame start but we start looking at the
178 * variable-length part of the beacon.
179 */
180 tim_idx = mgmt->u.beacon.variable - beacon;
181
182 /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
183 while ((tim_idx < (frame_size - 2)) &&
184 (beacon[tim_idx] != WLAN_EID_TIM))
185 tim_idx += beacon[tim_idx+1] + 2;
186
187 /* If TIM field was found, set variables */
188 if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
189 tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
190 tx_beacon_cmd->tim_size = beacon[tim_idx+1];
191 } else
192 IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
193}
194
195static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
196 struct iwl_frame *frame)
197{
198 struct iwl_tx_beacon_cmd *tx_beacon_cmd;
199 u32 frame_size;
200 u32 rate_flags;
201 u32 rate;
202 /*
203 * We have to set up the TX command, the TX Beacon command, and the
204 * beacon contents.
205 */
206
207 lockdep_assert_held(&priv->mutex);
208
209 if (!priv->beacon_ctx) {
210 IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
211 return 0;
212 }
213
214 /* Initialize memory */
215 tx_beacon_cmd = &frame->u.beacon;
216 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
217
218 /* Set up TX beacon contents */
219 frame_size = iwl4965_fill_beacon_frame(priv, tx_beacon_cmd->frame,
220 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
221 if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
222 return 0;
223 if (!frame_size)
224 return 0;
225
226 /* Set up TX command fields */
227 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
228 tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
229 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
230 tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
231 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
232
233 /* Set up TX beacon command fields */
234 iwl4965_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame,
235 frame_size);
236
237 /* Set up packet rate and flags */
238 rate = iwl_legacy_get_lowest_plcp(priv, priv->beacon_ctx);
239 priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
240 priv->hw_params.valid_tx_ant);
241 rate_flags = iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
242 if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE))
243 rate_flags |= RATE_MCS_CCK_MSK;
244 tx_beacon_cmd->tx.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate,
245 rate_flags);
246
247 return sizeof(*tx_beacon_cmd) + frame_size;
248}
249
250int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
251{
252 struct iwl_frame *frame;
253 unsigned int frame_size;
254 int rc;
255
256 frame = iwl4965_get_free_frame(priv);
257 if (!frame) {
258 IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
259 "command.\n");
260 return -ENOMEM;
261 }
262
263 frame_size = iwl4965_hw_get_beacon_cmd(priv, frame);
264 if (!frame_size) {
265 IWL_ERR(priv, "Error configuring the beacon command\n");
266 iwl4965_free_frame(priv, frame);
267 return -EINVAL;
268 }
269
270 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
271 &frame->u.cmd[0]);
272
273 iwl4965_free_frame(priv, frame);
274
275 return rc;
276}
277
278static inline dma_addr_t iwl4965_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
279{
280 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
281
282 dma_addr_t addr = get_unaligned_le32(&tb->lo);
283 if (sizeof(dma_addr_t) > sizeof(u32))
284 addr |=
285 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
286
287 return addr;
288}
289
290static inline u16 iwl4965_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
291{
292 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
293
294 return le16_to_cpu(tb->hi_n_len) >> 4;
295}
296
297static inline void iwl4965_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
298 dma_addr_t addr, u16 len)
299{
300 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
301 u16 hi_n_len = len << 4;
302
303 put_unaligned_le32(addr, &tb->lo);
304 if (sizeof(dma_addr_t) > sizeof(u32))
305 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
306
307 tb->hi_n_len = cpu_to_le16(hi_n_len);
308
309 tfd->num_tbs = idx + 1;
310}
311
312static inline u8 iwl4965_tfd_get_num_tbs(struct iwl_tfd *tfd)
313{
314 return tfd->num_tbs & 0x1f;
315}
316
317/**
318 * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
319 * @priv - driver private data
320 * @txq - tx queue
321 *
322 * Does NOT advance any TFD circular buffer read/write indexes
323 * Does NOT free the TFD itself (which is within circular buffer)
324 */
325void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
326{
327 struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
328 struct iwl_tfd *tfd;
329 struct pci_dev *dev = priv->pci_dev;
330 int index = txq->q.read_ptr;
331 int i;
332 int num_tbs;
333
334 tfd = &tfd_tmp[index];
335
336 /* Sanity check on number of chunks */
337 num_tbs = iwl4965_tfd_get_num_tbs(tfd);
338
339 if (num_tbs >= IWL_NUM_OF_TBS) {
340 IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
341 /* @todo issue fatal error, it is quite serious situation */
342 return;
343 }
344
345 /* Unmap tx_cmd */
346 if (num_tbs)
347 pci_unmap_single(dev,
348 dma_unmap_addr(&txq->meta[index], mapping),
349 dma_unmap_len(&txq->meta[index], len),
350 PCI_DMA_BIDIRECTIONAL);
351
352 /* Unmap chunks, if any. */
353 for (i = 1; i < num_tbs; i++)
354 pci_unmap_single(dev, iwl4965_tfd_tb_get_addr(tfd, i),
355 iwl4965_tfd_tb_get_len(tfd, i),
356 PCI_DMA_TODEVICE);
357
358 /* free SKB */
359 if (txq->txb) {
360 struct sk_buff *skb;
361
362 skb = txq->txb[txq->q.read_ptr].skb;
363
364 /* can be called from irqs-disabled context */
365 if (skb) {
366 dev_kfree_skb_any(skb);
367 txq->txb[txq->q.read_ptr].skb = NULL;
368 }
369 }
370}
371
372int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
373 struct iwl_tx_queue *txq,
374 dma_addr_t addr, u16 len,
375 u8 reset, u8 pad)
376{
377 struct iwl_queue *q;
378 struct iwl_tfd *tfd, *tfd_tmp;
379 u32 num_tbs;
380
381 q = &txq->q;
382 tfd_tmp = (struct iwl_tfd *)txq->tfds;
383 tfd = &tfd_tmp[q->write_ptr];
384
385 if (reset)
386 memset(tfd, 0, sizeof(*tfd));
387
388 num_tbs = iwl4965_tfd_get_num_tbs(tfd);
389
390 /* Each TFD can point to a maximum 20 Tx buffers */
391 if (num_tbs >= IWL_NUM_OF_TBS) {
392 IWL_ERR(priv, "Error can not send more than %d chunks\n",
393 IWL_NUM_OF_TBS);
394 return -EINVAL;
395 }
396
397 BUG_ON(addr & ~DMA_BIT_MASK(36));
398 if (unlikely(addr & ~IWL_TX_DMA_MASK))
399 IWL_ERR(priv, "Unaligned address = %llx\n",
400 (unsigned long long)addr);
401
402 iwl4965_tfd_set_tb(tfd, num_tbs, addr, len);
403
404 return 0;
405}
406
407/*
408 * Tell nic where to find circular buffer of Tx Frame Descriptors for
409 * given Tx queue, and enable the DMA channel used for that queue.
410 *
411 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
412 * channels supported in hardware.
413 */
414int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
415 struct iwl_tx_queue *txq)
416{
417 int txq_id = txq->q.id;
418
419 /* Circular buffer (TFD queue in DRAM) physical base address */
420 iwl_legacy_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
421 txq->q.dma_addr >> 8);
422
423 return 0;
424}
425
426/******************************************************************************
427 *
428 * Generic RX handler implementations
429 *
430 ******************************************************************************/
431static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
432 struct iwl_rx_mem_buffer *rxb)
433{
434 struct iwl_rx_packet *pkt = rxb_addr(rxb);
435 struct iwl_alive_resp *palive;
436 struct delayed_work *pwork;
437
438 palive = &pkt->u.alive_frame;
439
440 IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
441 "0x%01X 0x%01X\n",
442 palive->is_valid, palive->ver_type,
443 palive->ver_subtype);
444
445 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
446 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
447 memcpy(&priv->card_alive_init,
448 &pkt->u.alive_frame,
449 sizeof(struct iwl_init_alive_resp));
450 pwork = &priv->init_alive_start;
451 } else {
452 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
453 memcpy(&priv->card_alive, &pkt->u.alive_frame,
454 sizeof(struct iwl_alive_resp));
455 pwork = &priv->alive_start;
456 }
457
458 /* We delay the ALIVE response by 5ms to
459 * give the HW RF Kill time to activate... */
460 if (palive->is_valid == UCODE_VALID_OK)
461 queue_delayed_work(priv->workqueue, pwork,
462 msecs_to_jiffies(5));
463 else
464 IWL_WARN(priv, "uCode did not respond OK.\n");
465}
466
467/**
468 * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
469 *
470 * This callback is provided in order to send a statistics request.
471 *
472 * This timer function is continually reset to execute within
473 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
474 * was received. We need to ensure we receive the statistics in order
475 * to update the temperature used for calibrating the TXPOWER.
476 */
477static void iwl4965_bg_statistics_periodic(unsigned long data)
478{
479 struct iwl_priv *priv = (struct iwl_priv *)data;
480
481 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
482 return;
483
484 /* dont send host command if rf-kill is on */
485 if (!iwl_legacy_is_ready_rf(priv))
486 return;
487
488 iwl_legacy_send_statistics_request(priv, CMD_ASYNC, false);
489}
490
491
492static void iwl4965_print_cont_event_trace(struct iwl_priv *priv, u32 base,
493 u32 start_idx, u32 num_events,
494 u32 mode)
495{
496 u32 i;
497 u32 ptr; /* SRAM byte address of log data */
498 u32 ev, time, data; /* event log data */
499 unsigned long reg_flags;
500
501 if (mode == 0)
502 ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32));
503 else
504 ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
505
506 /* Make sure device is powered up for SRAM reads */
507 spin_lock_irqsave(&priv->reg_lock, reg_flags);
508 if (iwl_grab_nic_access(priv)) {
509 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
510 return;
511 }
512
513 /* Set starting address; reads will auto-increment */
514 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
515 rmb();
516
517 /*
518 * "time" is actually "data" for mode 0 (no timestamp).
519 * place event id # at far right for easier visual parsing.
520 */
521 for (i = 0; i < num_events; i++) {
522 ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
523 time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
524 if (mode == 0) {
525 trace_iwlwifi_legacy_dev_ucode_cont_event(priv,
526 0, time, ev);
527 } else {
528 data = _iwl_legacy_read_direct32(priv,
529 HBUS_TARG_MEM_RDAT);
530 trace_iwlwifi_legacy_dev_ucode_cont_event(priv,
531 time, data, ev);
532 }
533 }
534 /* Allow device to power down */
535 iwl_release_nic_access(priv);
536 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
537}
538
539static void iwl4965_continuous_event_trace(struct iwl_priv *priv)
540{
541 u32 capacity; /* event log capacity in # entries */
542 u32 base; /* SRAM byte address of event log header */
543 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
544 u32 num_wraps; /* # times uCode wrapped to top of log */
545 u32 next_entry; /* index of next entry to be written by uCode */
546
547 if (priv->ucode_type == UCODE_INIT)
548 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
549 else
550 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
551 if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
552 capacity = iwl_legacy_read_targ_mem(priv, base);
553 num_wraps = iwl_legacy_read_targ_mem(priv,
554 base + (2 * sizeof(u32)));
555 mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
556 next_entry = iwl_legacy_read_targ_mem(priv,
557 base + (3 * sizeof(u32)));
558 } else
559 return;
560
561 if (num_wraps == priv->event_log.num_wraps) {
562 iwl4965_print_cont_event_trace(priv,
563 base, priv->event_log.next_entry,
564 next_entry - priv->event_log.next_entry,
565 mode);
566 priv->event_log.non_wraps_count++;
567 } else {
568 if ((num_wraps - priv->event_log.num_wraps) > 1)
569 priv->event_log.wraps_more_count++;
570 else
571 priv->event_log.wraps_once_count++;
572 trace_iwlwifi_legacy_dev_ucode_wrap_event(priv,
573 num_wraps - priv->event_log.num_wraps,
574 next_entry, priv->event_log.next_entry);
575 if (next_entry < priv->event_log.next_entry) {
576 iwl4965_print_cont_event_trace(priv, base,
577 priv->event_log.next_entry,
578 capacity - priv->event_log.next_entry,
579 mode);
580
581 iwl4965_print_cont_event_trace(priv, base, 0,
582 next_entry, mode);
583 } else {
584 iwl4965_print_cont_event_trace(priv, base,
585 next_entry, capacity - next_entry,
586 mode);
587
588 iwl4965_print_cont_event_trace(priv, base, 0,
589 next_entry, mode);
590 }
591 }
592 priv->event_log.num_wraps = num_wraps;
593 priv->event_log.next_entry = next_entry;
594}
595
596/**
597 * iwl4965_bg_ucode_trace - Timer callback to log ucode event
598 *
599 * The timer is continually set to execute every
600 * UCODE_TRACE_PERIOD milliseconds after the last timer expired
601 * this function is to perform continuous uCode event logging operation
602 * if enabled
603 */
604static void iwl4965_bg_ucode_trace(unsigned long data)
605{
606 struct iwl_priv *priv = (struct iwl_priv *)data;
607
608 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
609 return;
610
611 if (priv->event_log.ucode_trace) {
612 iwl4965_continuous_event_trace(priv);
613 /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */
614 mod_timer(&priv->ucode_trace,
615 jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
616 }
617}
618
619static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
620 struct iwl_rx_mem_buffer *rxb)
621{
622 struct iwl_rx_packet *pkt = rxb_addr(rxb);
623 struct iwl4965_beacon_notif *beacon =
624 (struct iwl4965_beacon_notif *)pkt->u.raw;
625#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
626 u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
627
628 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
629 "tsf %d %d rate %d\n",
630 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
631 beacon->beacon_notify_hdr.failure_frame,
632 le32_to_cpu(beacon->ibss_mgr_status),
633 le32_to_cpu(beacon->high_tsf),
634 le32_to_cpu(beacon->low_tsf), rate);
635#endif
636
637 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
638}
639
640static void iwl4965_perform_ct_kill_task(struct iwl_priv *priv)
641{
642 unsigned long flags;
643
644 IWL_DEBUG_POWER(priv, "Stop all queues\n");
645
646 if (priv->mac80211_registered)
647 ieee80211_stop_queues(priv->hw);
648
649 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
650 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
651 iwl_read32(priv, CSR_UCODE_DRV_GP1);
652
653 spin_lock_irqsave(&priv->reg_lock, flags);
654 if (!iwl_grab_nic_access(priv))
655 iwl_release_nic_access(priv);
656 spin_unlock_irqrestore(&priv->reg_lock, flags);
657}
658
659/* Handle notification from uCode that card's power state is changing
660 * due to software, hardware, or critical temperature RFKILL */
661static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
662 struct iwl_rx_mem_buffer *rxb)
663{
664 struct iwl_rx_packet *pkt = rxb_addr(rxb);
665 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
666 unsigned long status = priv->status;
667
668 IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
669 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
670 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
671 (flags & CT_CARD_DISABLED) ?
672 "Reached" : "Not reached");
673
674 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
675 CT_CARD_DISABLED)) {
676
677 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
678 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
679
680 iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
681 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
682
683 if (!(flags & RXON_CARD_DISABLED)) {
684 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
685 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
686 iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
687 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
688 }
689 }
690
691 if (flags & CT_CARD_DISABLED)
692 iwl4965_perform_ct_kill_task(priv);
693
694 if (flags & HW_CARD_DISABLED)
695 set_bit(STATUS_RF_KILL_HW, &priv->status);
696 else
697 clear_bit(STATUS_RF_KILL_HW, &priv->status);
698
699 if (!(flags & RXON_CARD_DISABLED))
700 iwl_legacy_scan_cancel(priv);
701
702 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
703 test_bit(STATUS_RF_KILL_HW, &priv->status)))
704 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
705 test_bit(STATUS_RF_KILL_HW, &priv->status));
706 else
707 wake_up_interruptible(&priv->wait_command_queue);
708}
709
710/**
711 * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
712 *
713 * Setup the RX handlers for each of the reply types sent from the uCode
714 * to the host.
715 *
716 * This function chains into the hardware specific files for them to setup
717 * any hardware specific handlers as well.
718 */
719static void iwl4965_setup_rx_handlers(struct iwl_priv *priv)
720{
721 priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive;
722 priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
723 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
724 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
725 iwl_legacy_rx_spectrum_measure_notif;
726 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
727 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
728 iwl_legacy_rx_pm_debug_statistics_notif;
729 priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
730
731 /*
732 * The same handler is used for both the REPLY to a discrete
733 * statistics request from the host as well as for the periodic
734 * statistics notifications (after received beacons) from the uCode.
735 */
736 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_reply_statistics;
737 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_rx_statistics;
738
739 iwl_legacy_setup_rx_scan_handlers(priv);
740
741 /* status change handler */
742 priv->rx_handlers[CARD_STATE_NOTIFICATION] =
743 iwl4965_rx_card_state_notif;
744
745 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
746 iwl4965_rx_missed_beacon_notif;
747 /* Rx handlers */
748 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
749 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
750 /* block ack */
751 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
752 /* Set up hardware specific Rx handlers */
753 priv->cfg->ops->lib->rx_handler_setup(priv);
754}
755
756/**
757 * iwl4965_rx_handle - Main entry function for receiving responses from uCode
758 *
759 * Uses the priv->rx_handlers callback function array to invoke
760 * the appropriate handlers, including command responses,
761 * frame-received notifications, and other notifications.
762 */
763void iwl4965_rx_handle(struct iwl_priv *priv)
764{
765 struct iwl_rx_mem_buffer *rxb;
766 struct iwl_rx_packet *pkt;
767 struct iwl_rx_queue *rxq = &priv->rxq;
768 u32 r, i;
769 int reclaim;
770 unsigned long flags;
771 u8 fill_rx = 0;
772 u32 count = 8;
773 int total_empty;
774
775 /* uCode's read index (stored in shared DRAM) indicates the last Rx
776 * buffer that the driver may process (last buffer filled by ucode). */
777 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
778 i = rxq->read;
779
780 /* Rx interrupt, but nothing sent from uCode */
781 if (i == r)
782 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
783
784 /* calculate total frames need to be restock after handling RX */
785 total_empty = r - rxq->write_actual;
786 if (total_empty < 0)
787 total_empty += RX_QUEUE_SIZE;
788
789 if (total_empty > (RX_QUEUE_SIZE / 2))
790 fill_rx = 1;
791
792 while (i != r) {
793 int len;
794
795 rxb = rxq->queue[i];
796
797 /* If an RXB doesn't have a Rx queue slot associated with it,
798 * then a bug has been introduced in the queue refilling
799 * routines -- catch it here */
800 BUG_ON(rxb == NULL);
801
802 rxq->queue[i] = NULL;
803
804 pci_unmap_page(priv->pci_dev, rxb->page_dma,
805 PAGE_SIZE << priv->hw_params.rx_page_order,
806 PCI_DMA_FROMDEVICE);
807 pkt = rxb_addr(rxb);
808
809 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
810 len += sizeof(u32); /* account for status word */
811 trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
812
813 /* Reclaim a command buffer only if this packet is a response
814 * to a (driver-originated) command.
815 * If the packet (e.g. Rx frame) originated from uCode,
816 * there is no command buffer to reclaim.
817 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
818 * but apparently a few don't get set; catch them here. */
819 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
820 (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
821 (pkt->hdr.cmd != REPLY_RX) &&
822 (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
823 (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
824 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
825 (pkt->hdr.cmd != REPLY_TX);
826
827 /* Based on type of command response or notification,
828 * handle those that need handling via function in
829 * rx_handlers table. See iwl4965_setup_rx_handlers() */
830 if (priv->rx_handlers[pkt->hdr.cmd]) {
831 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
832 i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
833 pkt->hdr.cmd);
834 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
835 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
836 } else {
837 /* No handling needed */
838 IWL_DEBUG_RX(priv,
839 "r %d i %d No handler needed for %s, 0x%02x\n",
840 r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
841 pkt->hdr.cmd);
842 }
843
844 /*
845 * XXX: After here, we should always check rxb->page
846 * against NULL before touching it or its virtual
847 * memory (pkt). Because some rx_handler might have
848 * already taken or freed the pages.
849 */
850
851 if (reclaim) {
852 /* Invoke any callbacks, transfer the buffer to caller,
853 * and fire off the (possibly) blocking iwl_legacy_send_cmd()
854 * as we reclaim the driver command queue */
855 if (rxb->page)
856 iwl_legacy_tx_cmd_complete(priv, rxb);
857 else
858 IWL_WARN(priv, "Claim null rxb?\n");
859 }
860
861 /* Reuse the page if possible. For notification packets and
862 * SKBs that fail to Rx correctly, add them back into the
863 * rx_free list for reuse later. */
864 spin_lock_irqsave(&rxq->lock, flags);
865 if (rxb->page != NULL) {
866 rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
867 0, PAGE_SIZE << priv->hw_params.rx_page_order,
868 PCI_DMA_FROMDEVICE);
869 list_add_tail(&rxb->list, &rxq->rx_free);
870 rxq->free_count++;
871 } else
872 list_add_tail(&rxb->list, &rxq->rx_used);
873
874 spin_unlock_irqrestore(&rxq->lock, flags);
875
876 i = (i + 1) & RX_QUEUE_MASK;
877 /* If there are a lot of unused frames,
878 * restock the Rx queue so ucode wont assert. */
879 if (fill_rx) {
880 count++;
881 if (count >= 8) {
882 rxq->read = i;
883 iwl4965_rx_replenish_now(priv);
884 count = 0;
885 }
886 }
887 }
888
889 /* Backtrack one entry */
890 rxq->read = i;
891 if (fill_rx)
892 iwl4965_rx_replenish_now(priv);
893 else
894 iwl4965_rx_queue_restock(priv);
895}
896
897/* call this function to flush any scheduled tasklet */
898static inline void iwl4965_synchronize_irq(struct iwl_priv *priv)
899{
900 /* wait to make sure we flush pending tasklet*/
901 synchronize_irq(priv->pci_dev->irq);
902 tasklet_kill(&priv->irq_tasklet);
903}
904
905static void iwl4965_irq_tasklet(struct iwl_priv *priv)
906{
907 u32 inta, handled = 0;
908 u32 inta_fh;
909 unsigned long flags;
910 u32 i;
911#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
912 u32 inta_mask;
913#endif
914
915 spin_lock_irqsave(&priv->lock, flags);
916
917 /* Ack/clear/reset pending uCode interrupts.
918 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
919 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
920 inta = iwl_read32(priv, CSR_INT);
921 iwl_write32(priv, CSR_INT, inta);
922
923 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
924 * Any new interrupts that happen after this, either while we're
925 * in this tasklet, or later, will show up in next ISR/tasklet. */
926 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
927 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
928
929#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
930 if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
931 /* just for debug */
932 inta_mask = iwl_read32(priv, CSR_INT_MASK);
933 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
934 inta, inta_mask, inta_fh);
935 }
936#endif
937
938 spin_unlock_irqrestore(&priv->lock, flags);
939
940 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
941 * atomic, make sure that inta covers all the interrupts that
942 * we've discovered, even if FH interrupt came in just after
943 * reading CSR_INT. */
944 if (inta_fh & CSR49_FH_INT_RX_MASK)
945 inta |= CSR_INT_BIT_FH_RX;
946 if (inta_fh & CSR49_FH_INT_TX_MASK)
947 inta |= CSR_INT_BIT_FH_TX;
948
949 /* Now service all interrupt bits discovered above. */
950 if (inta & CSR_INT_BIT_HW_ERR) {
951 IWL_ERR(priv, "Hardware error detected. Restarting.\n");
952
953 /* Tell the device to stop sending interrupts */
954 iwl_legacy_disable_interrupts(priv);
955
956 priv->isr_stats.hw++;
957 iwl_legacy_irq_handle_error(priv);
958
959 handled |= CSR_INT_BIT_HW_ERR;
960
961 return;
962 }
963
964#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
965 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
966 /* NIC fires this, but we don't use it, redundant with WAKEUP */
967 if (inta & CSR_INT_BIT_SCD) {
968 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
969 "the frame/frames.\n");
970 priv->isr_stats.sch++;
971 }
972
973 /* Alive notification via Rx interrupt will do the real work */
974 if (inta & CSR_INT_BIT_ALIVE) {
975 IWL_DEBUG_ISR(priv, "Alive interrupt\n");
976 priv->isr_stats.alive++;
977 }
978 }
979#endif
980 /* Safely ignore these bits for debug checks below */
981 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
982
983 /* HW RF KILL switch toggled */
984 if (inta & CSR_INT_BIT_RF_KILL) {
985 int hw_rf_kill = 0;
986 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
987 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
988 hw_rf_kill = 1;
989
990 IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
991 hw_rf_kill ? "disable radio" : "enable radio");
992
993 priv->isr_stats.rfkill++;
994
995 /* driver only loads ucode once setting the interface up.
996 * the driver allows loading the ucode even if the radio
997 * is killed. Hence update the killswitch state here. The
998 * rfkill handler will care about restarting if needed.
999 */
1000 if (!test_bit(STATUS_ALIVE, &priv->status)) {
1001 if (hw_rf_kill)
1002 set_bit(STATUS_RF_KILL_HW, &priv->status);
1003 else
1004 clear_bit(STATUS_RF_KILL_HW, &priv->status);
1005 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
1006 }
1007
1008 handled |= CSR_INT_BIT_RF_KILL;
1009 }
1010
1011 /* Chip got too hot and stopped itself */
1012 if (inta & CSR_INT_BIT_CT_KILL) {
1013 IWL_ERR(priv, "Microcode CT kill error detected.\n");
1014 priv->isr_stats.ctkill++;
1015 handled |= CSR_INT_BIT_CT_KILL;
1016 }
1017
1018 /* Error detected by uCode */
1019 if (inta & CSR_INT_BIT_SW_ERR) {
1020 IWL_ERR(priv, "Microcode SW error detected. "
1021 " Restarting 0x%X.\n", inta);
1022 priv->isr_stats.sw++;
1023 iwl_legacy_irq_handle_error(priv);
1024 handled |= CSR_INT_BIT_SW_ERR;
1025 }
1026
1027 /*
1028 * uCode wakes up after power-down sleep.
1029 * Tell device about any new tx or host commands enqueued,
1030 * and about any Rx buffers made available while asleep.
1031 */
1032 if (inta & CSR_INT_BIT_WAKEUP) {
1033 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
1034 iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
1035 for (i = 0; i < priv->hw_params.max_txq_num; i++)
1036 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[i]);
1037 priv->isr_stats.wakeup++;
1038 handled |= CSR_INT_BIT_WAKEUP;
1039 }
1040
1041 /* All uCode command responses, including Tx command responses,
1042 * Rx "responses" (frame-received notification), and other
1043 * notifications from uCode come through here*/
1044 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1045 iwl4965_rx_handle(priv);
1046 priv->isr_stats.rx++;
1047 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1048 }
1049
1050 /* This "Tx" DMA channel is used only for loading uCode */
1051 if (inta & CSR_INT_BIT_FH_TX) {
1052 IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
1053 priv->isr_stats.tx++;
1054 handled |= CSR_INT_BIT_FH_TX;
1055 /* Wake up uCode load routine, now that load is complete */
1056 priv->ucode_write_complete = 1;
1057 wake_up_interruptible(&priv->wait_command_queue);
1058 }
1059
1060 if (inta & ~handled) {
1061 IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1062 priv->isr_stats.unhandled++;
1063 }
1064
1065 if (inta & ~(priv->inta_mask)) {
1066 IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
1067 inta & ~priv->inta_mask);
1068 IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
1069 }
1070
1071 /* Re-enable all interrupts */
1072 /* only Re-enable if diabled by irq */
1073 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1074 iwl_legacy_enable_interrupts(priv);
1075
1076#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1077 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
1078 inta = iwl_read32(priv, CSR_INT);
1079 inta_mask = iwl_read32(priv, CSR_INT_MASK);
1080 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1081 IWL_DEBUG_ISR(priv,
1082 "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
1083 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
1084 }
1085#endif
1086}
1087
1088/*****************************************************************************
1089 *
1090 * sysfs attributes
1091 *
1092 *****************************************************************************/
1093
1094#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1095
1096/*
1097 * The following adds a new attribute to the sysfs representation
1098 * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
1099 * used for controlling the debug level.
1100 *
1101 * See the level definitions in iwl for details.
1102 *
1103 * The debug_level being managed using sysfs below is a per device debug
1104 * level that is used instead of the global debug level if it (the per
1105 * device debug level) is set.
1106 */
1107static ssize_t iwl4965_show_debug_level(struct device *d,
1108 struct device_attribute *attr, char *buf)
1109{
1110 struct iwl_priv *priv = dev_get_drvdata(d);
1111 return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
1112}
1113static ssize_t iwl4965_store_debug_level(struct device *d,
1114 struct device_attribute *attr,
1115 const char *buf, size_t count)
1116{
1117 struct iwl_priv *priv = dev_get_drvdata(d);
1118 unsigned long val;
1119 int ret;
1120
1121 ret = strict_strtoul(buf, 0, &val);
1122 if (ret)
1123 IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf);
1124 else {
1125 priv->debug_level = val;
1126 if (iwl_legacy_alloc_traffic_mem(priv))
1127 IWL_ERR(priv,
1128 "Not enough memory to generate traffic log\n");
1129 }
1130 return strnlen(buf, count);
1131}
1132
1133static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
1134 iwl4965_show_debug_level, iwl4965_store_debug_level);
1135
1136
1137#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
1138
1139
1140static ssize_t iwl4965_show_temperature(struct device *d,
1141 struct device_attribute *attr, char *buf)
1142{
1143 struct iwl_priv *priv = dev_get_drvdata(d);
1144
1145 if (!iwl_legacy_is_alive(priv))
1146 return -EAGAIN;
1147
1148 return sprintf(buf, "%d\n", priv->temperature);
1149}
1150
1151static DEVICE_ATTR(temperature, S_IRUGO, iwl4965_show_temperature, NULL);
1152
1153static ssize_t iwl4965_show_tx_power(struct device *d,
1154 struct device_attribute *attr, char *buf)
1155{
1156 struct iwl_priv *priv = dev_get_drvdata(d);
1157
1158 if (!iwl_legacy_is_ready_rf(priv))
1159 return sprintf(buf, "off\n");
1160 else
1161 return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
1162}
1163
1164static ssize_t iwl4965_store_tx_power(struct device *d,
1165 struct device_attribute *attr,
1166 const char *buf, size_t count)
1167{
1168 struct iwl_priv *priv = dev_get_drvdata(d);
1169 unsigned long val;
1170 int ret;
1171
1172 ret = strict_strtoul(buf, 10, &val);
1173 if (ret)
1174 IWL_INFO(priv, "%s is not in decimal form.\n", buf);
1175 else {
1176 ret = iwl_legacy_set_tx_power(priv, val, false);
1177 if (ret)
1178 IWL_ERR(priv, "failed setting tx power (0x%d).\n",
1179 ret);
1180 else
1181 ret = count;
1182 }
1183 return ret;
1184}
1185
1186static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO,
1187 iwl4965_show_tx_power, iwl4965_store_tx_power);
1188
1189static struct attribute *iwl_sysfs_entries[] = {
1190 &dev_attr_temperature.attr,
1191 &dev_attr_tx_power.attr,
1192#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1193 &dev_attr_debug_level.attr,
1194#endif
1195 NULL
1196};
1197
1198static struct attribute_group iwl_attribute_group = {
1199 .name = NULL, /* put in device directory */
1200 .attrs = iwl_sysfs_entries,
1201};
1202
1203/******************************************************************************
1204 *
1205 * uCode download functions
1206 *
1207 ******************************************************************************/
1208
1209static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv)
1210{
1211 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
1212 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
1213 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1214 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
1215 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1216 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
1217}
1218
1219static void iwl4965_nic_start(struct iwl_priv *priv)
1220{
1221 /* Remove all resets to allow NIC to operate */
1222 iwl_write32(priv, CSR_RESET, 0);
1223}
1224
1225static void iwl4965_ucode_callback(const struct firmware *ucode_raw,
1226 void *context);
1227static int iwl4965_mac_setup_register(struct iwl_priv *priv,
1228 u32 max_probe_length);
1229
1230static int __must_check iwl4965_request_firmware(struct iwl_priv *priv, bool first)
1231{
1232 const char *name_pre = priv->cfg->fw_name_pre;
1233 char tag[8];
1234
1235 if (first) {
1236 priv->fw_index = priv->cfg->ucode_api_max;
1237 sprintf(tag, "%d", priv->fw_index);
1238 } else {
1239 priv->fw_index--;
1240 sprintf(tag, "%d", priv->fw_index);
1241 }
1242
1243 if (priv->fw_index < priv->cfg->ucode_api_min) {
1244 IWL_ERR(priv, "no suitable firmware found!\n");
1245 return -ENOENT;
1246 }
1247
1248 sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
1249
1250 IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n",
1251 priv->firmware_name);
1252
1253 return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
1254 &priv->pci_dev->dev, GFP_KERNEL, priv,
1255 iwl4965_ucode_callback);
1256}
1257
1258struct iwl4965_firmware_pieces {
1259 const void *inst, *data, *init, *init_data, *boot;
1260 size_t inst_size, data_size, init_size, init_data_size, boot_size;
1261};
1262
1263static int iwl4965_load_firmware(struct iwl_priv *priv,
1264 const struct firmware *ucode_raw,
1265 struct iwl4965_firmware_pieces *pieces)
1266{
1267 struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
1268 u32 api_ver, hdr_size;
1269 const u8 *src;
1270
1271 priv->ucode_ver = le32_to_cpu(ucode->ver);
1272 api_ver = IWL_UCODE_API(priv->ucode_ver);
1273
1274 switch (api_ver) {
1275 default:
1276 case 0:
1277 case 1:
1278 case 2:
1279 hdr_size = 24;
1280 if (ucode_raw->size < hdr_size) {
1281 IWL_ERR(priv, "File size too small!\n");
1282 return -EINVAL;
1283 }
1284 pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
1285 pieces->data_size = le32_to_cpu(ucode->v1.data_size);
1286 pieces->init_size = le32_to_cpu(ucode->v1.init_size);
1287 pieces->init_data_size =
1288 le32_to_cpu(ucode->v1.init_data_size);
1289 pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
1290 src = ucode->v1.data;
1291 break;
1292 }
1293
1294 /* Verify size of file vs. image size info in file's header */
1295 if (ucode_raw->size != hdr_size + pieces->inst_size +
1296 pieces->data_size + pieces->init_size +
1297 pieces->init_data_size + pieces->boot_size) {
1298
1299 IWL_ERR(priv,
1300 "uCode file size %d does not match expected size\n",
1301 (int)ucode_raw->size);
1302 return -EINVAL;
1303 }
1304
1305 pieces->inst = src;
1306 src += pieces->inst_size;
1307 pieces->data = src;
1308 src += pieces->data_size;
1309 pieces->init = src;
1310 src += pieces->init_size;
1311 pieces->init_data = src;
1312 src += pieces->init_data_size;
1313 pieces->boot = src;
1314 src += pieces->boot_size;
1315
1316 return 0;
1317}
1318
1319/**
1320 * iwl4965_ucode_callback - callback when firmware was loaded
1321 *
1322 * If loaded successfully, copies the firmware into buffers
1323 * for the card to fetch (via DMA).
1324 */
1325static void
1326iwl4965_ucode_callback(const struct firmware *ucode_raw, void *context)
1327{
1328 struct iwl_priv *priv = context;
1329 struct iwl_ucode_header *ucode;
1330 int err;
1331 struct iwl4965_firmware_pieces pieces;
1332 const unsigned int api_max = priv->cfg->ucode_api_max;
1333 const unsigned int api_min = priv->cfg->ucode_api_min;
1334 u32 api_ver;
1335
1336 u32 max_probe_length = 200;
1337 u32 standard_phy_calibration_size =
1338 IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
1339
1340 memset(&pieces, 0, sizeof(pieces));
1341
1342 if (!ucode_raw) {
1343 if (priv->fw_index <= priv->cfg->ucode_api_max)
1344 IWL_ERR(priv,
1345 "request for firmware file '%s' failed.\n",
1346 priv->firmware_name);
1347 goto try_again;
1348 }
1349
1350 IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
1351 priv->firmware_name, ucode_raw->size);
1352
1353 /* Make sure that we got at least the API version number */
1354 if (ucode_raw->size < 4) {
1355 IWL_ERR(priv, "File size way too small!\n");
1356 goto try_again;
1357 }
1358
1359 /* Data from ucode file: header followed by uCode images */
1360 ucode = (struct iwl_ucode_header *)ucode_raw->data;
1361
1362 err = iwl4965_load_firmware(priv, ucode_raw, &pieces);
1363
1364 if (err)
1365 goto try_again;
1366
1367 api_ver = IWL_UCODE_API(priv->ucode_ver);
1368
1369 /*
1370 * api_ver should match the api version forming part of the
1371 * firmware filename ... but we don't check for that and only rely
1372 * on the API version read from firmware header from here on forward
1373 */
1374 if (api_ver < api_min || api_ver > api_max) {
1375 IWL_ERR(priv,
1376 "Driver unable to support your firmware API. "
1377 "Driver supports v%u, firmware is v%u.\n",
1378 api_max, api_ver);
1379 goto try_again;
1380 }
1381
1382 if (api_ver != api_max)
1383 IWL_ERR(priv,
1384 "Firmware has old API version. Expected v%u, "
1385 "got v%u. New firmware can be obtained "
1386 "from http://www.intellinuxwireless.org.\n",
1387 api_max, api_ver);
1388
1389 IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
1390 IWL_UCODE_MAJOR(priv->ucode_ver),
1391 IWL_UCODE_MINOR(priv->ucode_ver),
1392 IWL_UCODE_API(priv->ucode_ver),
1393 IWL_UCODE_SERIAL(priv->ucode_ver));
1394
1395 snprintf(priv->hw->wiphy->fw_version,
1396 sizeof(priv->hw->wiphy->fw_version),
1397 "%u.%u.%u.%u",
1398 IWL_UCODE_MAJOR(priv->ucode_ver),
1399 IWL_UCODE_MINOR(priv->ucode_ver),
1400 IWL_UCODE_API(priv->ucode_ver),
1401 IWL_UCODE_SERIAL(priv->ucode_ver));
1402
1403 /*
1404 * For any of the failures below (before allocating pci memory)
1405 * we will try to load a version with a smaller API -- maybe the
1406 * user just got a corrupted version of the latest API.
1407 */
1408
1409 IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
1410 priv->ucode_ver);
1411 IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n",
1412 pieces.inst_size);
1413 IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n",
1414 pieces.data_size);
1415 IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n",
1416 pieces.init_size);
1417 IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
1418 pieces.init_data_size);
1419 IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n",
1420 pieces.boot_size);
1421
1422 /* Verify that uCode images will fit in card's SRAM */
1423 if (pieces.inst_size > priv->hw_params.max_inst_size) {
1424 IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
1425 pieces.inst_size);
1426 goto try_again;
1427 }
1428
1429 if (pieces.data_size > priv->hw_params.max_data_size) {
1430 IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
1431 pieces.data_size);
1432 goto try_again;
1433 }
1434
1435 if (pieces.init_size > priv->hw_params.max_inst_size) {
1436 IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
1437 pieces.init_size);
1438 goto try_again;
1439 }
1440
1441 if (pieces.init_data_size > priv->hw_params.max_data_size) {
1442 IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
1443 pieces.init_data_size);
1444 goto try_again;
1445 }
1446
1447 if (pieces.boot_size > priv->hw_params.max_bsm_size) {
1448 IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n",
1449 pieces.boot_size);
1450 goto try_again;
1451 }
1452
1453 /* Allocate ucode buffers for card's bus-master loading ... */
1454
1455 /* Runtime instructions and 2 copies of data:
1456 * 1) unmodified from disk
1457 * 2) backup cache for save/restore during power-downs */
1458 priv->ucode_code.len = pieces.inst_size;
1459 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
1460
1461 priv->ucode_data.len = pieces.data_size;
1462 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
1463
1464 priv->ucode_data_backup.len = pieces.data_size;
1465 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1466
1467 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
1468 !priv->ucode_data_backup.v_addr)
1469 goto err_pci_alloc;
1470
1471 /* Initialization instructions and data */
1472 if (pieces.init_size && pieces.init_data_size) {
1473 priv->ucode_init.len = pieces.init_size;
1474 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
1475
1476 priv->ucode_init_data.len = pieces.init_data_size;
1477 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1478
1479 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
1480 goto err_pci_alloc;
1481 }
1482
1483 /* Bootstrap (instructions only, no data) */
1484 if (pieces.boot_size) {
1485 priv->ucode_boot.len = pieces.boot_size;
1486 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
1487
1488 if (!priv->ucode_boot.v_addr)
1489 goto err_pci_alloc;
1490 }
1491
1492 /* Now that we can no longer fail, copy information */
1493
1494 priv->sta_key_max_num = STA_KEY_MAX_NUM;
1495
1496 /* Copy images into buffers for card's bus-master reads ... */
1497
1498 /* Runtime instructions (first block of data in file) */
1499 IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n",
1500 pieces.inst_size);
1501 memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size);
1502
1503 IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
1504 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
1505
1506 /*
1507 * Runtime data
1508 * NOTE: Copy into backup buffer will be done in iwl_up()
1509 */
1510 IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n",
1511 pieces.data_size);
1512 memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size);
1513 memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
1514
1515 /* Initialization instructions */
1516 if (pieces.init_size) {
1517 IWL_DEBUG_INFO(priv,
1518 "Copying (but not loading) init instr len %Zd\n",
1519 pieces.init_size);
1520 memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size);
1521 }
1522
1523 /* Initialization data */
1524 if (pieces.init_data_size) {
1525 IWL_DEBUG_INFO(priv,
1526 "Copying (but not loading) init data len %Zd\n",
1527 pieces.init_data_size);
1528 memcpy(priv->ucode_init_data.v_addr, pieces.init_data,
1529 pieces.init_data_size);
1530 }
1531
1532 /* Bootstrap instructions */
1533 IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n",
1534 pieces.boot_size);
1535 memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
1536
1537 /*
1538 * figure out the offset of chain noise reset and gain commands
1539 * base on the size of standard phy calibration commands table size
1540 */
1541 priv->_4965.phy_calib_chain_noise_reset_cmd =
1542 standard_phy_calibration_size;
1543 priv->_4965.phy_calib_chain_noise_gain_cmd =
1544 standard_phy_calibration_size + 1;
1545
1546 /**************************************************
1547 * This is still part of probe() in a sense...
1548 *
1549 * 9. Setup and register with mac80211 and debugfs
1550 **************************************************/
1551 err = iwl4965_mac_setup_register(priv, max_probe_length);
1552 if (err)
1553 goto out_unbind;
1554
1555 err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
1556 if (err)
1557 IWL_ERR(priv,
1558 "failed to create debugfs files. Ignoring error: %d\n", err);
1559
1560 err = sysfs_create_group(&priv->pci_dev->dev.kobj,
1561 &iwl_attribute_group);
1562 if (err) {
1563 IWL_ERR(priv, "failed to create sysfs device attributes\n");
1564 goto out_unbind;
1565 }
1566
1567 /* We have our copies now, allow OS release its copies */
1568 release_firmware(ucode_raw);
1569 complete(&priv->_4965.firmware_loading_complete);
1570 return;
1571
1572 try_again:
1573 /* try next, if any */
1574 if (iwl4965_request_firmware(priv, false))
1575 goto out_unbind;
1576 release_firmware(ucode_raw);
1577 return;
1578
1579 err_pci_alloc:
1580 IWL_ERR(priv, "failed to allocate pci memory\n");
1581 iwl4965_dealloc_ucode_pci(priv);
1582 out_unbind:
1583 complete(&priv->_4965.firmware_loading_complete);
1584 device_release_driver(&priv->pci_dev->dev);
1585 release_firmware(ucode_raw);
1586}
1587
1588static const char * const desc_lookup_text[] = {
1589 "OK",
1590 "FAIL",
1591 "BAD_PARAM",
1592 "BAD_CHECKSUM",
1593 "NMI_INTERRUPT_WDG",
1594 "SYSASSERT",
1595 "FATAL_ERROR",
1596 "BAD_COMMAND",
1597 "HW_ERROR_TUNE_LOCK",
1598 "HW_ERROR_TEMPERATURE",
1599 "ILLEGAL_CHAN_FREQ",
1600 "VCC_NOT_STABLE",
1601 "FH_ERROR",
1602 "NMI_INTERRUPT_HOST",
1603 "NMI_INTERRUPT_ACTION_PT",
1604 "NMI_INTERRUPT_UNKNOWN",
1605 "UCODE_VERSION_MISMATCH",
1606 "HW_ERROR_ABS_LOCK",
1607 "HW_ERROR_CAL_LOCK_FAIL",
1608 "NMI_INTERRUPT_INST_ACTION_PT",
1609 "NMI_INTERRUPT_DATA_ACTION_PT",
1610 "NMI_TRM_HW_ER",
1611 "NMI_INTERRUPT_TRM",
1612 "NMI_INTERRUPT_BREAK_POINT"
1613 "DEBUG_0",
1614 "DEBUG_1",
1615 "DEBUG_2",
1616 "DEBUG_3",
1617};
1618
1619static struct { char *name; u8 num; } advanced_lookup[] = {
1620 { "NMI_INTERRUPT_WDG", 0x34 },
1621 { "SYSASSERT", 0x35 },
1622 { "UCODE_VERSION_MISMATCH", 0x37 },
1623 { "BAD_COMMAND", 0x38 },
1624 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
1625 { "FATAL_ERROR", 0x3D },
1626 { "NMI_TRM_HW_ERR", 0x46 },
1627 { "NMI_INTERRUPT_TRM", 0x4C },
1628 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
1629 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
1630 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
1631 { "NMI_INTERRUPT_HOST", 0x66 },
1632 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
1633 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
1634 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
1635 { "ADVANCED_SYSASSERT", 0 },
1636};
1637
1638static const char *iwl4965_desc_lookup(u32 num)
1639{
1640 int i;
1641 int max = ARRAY_SIZE(desc_lookup_text);
1642
1643 if (num < max)
1644 return desc_lookup_text[num];
1645
1646 max = ARRAY_SIZE(advanced_lookup) - 1;
1647 for (i = 0; i < max; i++) {
1648 if (advanced_lookup[i].num == num)
1649 break;
1650 }
1651 return advanced_lookup[i].name;
1652}
1653
1654#define ERROR_START_OFFSET (1 * sizeof(u32))
1655#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1656
1657void iwl4965_dump_nic_error_log(struct iwl_priv *priv)
1658{
1659 u32 data2, line;
1660 u32 desc, time, count, base, data1;
1661 u32 blink1, blink2, ilink1, ilink2;
1662 u32 pc, hcmd;
1663
1664 if (priv->ucode_type == UCODE_INIT) {
1665 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
1666 } else {
1667 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
1668 }
1669
1670 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1671 IWL_ERR(priv,
1672 "Not valid error log pointer 0x%08X for %s uCode\n",
1673 base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
1674 return;
1675 }
1676
1677 count = iwl_legacy_read_targ_mem(priv, base);
1678
1679 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1680 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
1681 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
1682 priv->status, count);
1683 }
1684
1685 desc = iwl_legacy_read_targ_mem(priv, base + 1 * sizeof(u32));
1686 priv->isr_stats.err_code = desc;
1687 pc = iwl_legacy_read_targ_mem(priv, base + 2 * sizeof(u32));
1688 blink1 = iwl_legacy_read_targ_mem(priv, base + 3 * sizeof(u32));
1689 blink2 = iwl_legacy_read_targ_mem(priv, base + 4 * sizeof(u32));
1690 ilink1 = iwl_legacy_read_targ_mem(priv, base + 5 * sizeof(u32));
1691 ilink2 = iwl_legacy_read_targ_mem(priv, base + 6 * sizeof(u32));
1692 data1 = iwl_legacy_read_targ_mem(priv, base + 7 * sizeof(u32));
1693 data2 = iwl_legacy_read_targ_mem(priv, base + 8 * sizeof(u32));
1694 line = iwl_legacy_read_targ_mem(priv, base + 9 * sizeof(u32));
1695 time = iwl_legacy_read_targ_mem(priv, base + 11 * sizeof(u32));
1696 hcmd = iwl_legacy_read_targ_mem(priv, base + 22 * sizeof(u32));
1697
1698 trace_iwlwifi_legacy_dev_ucode_error(priv, desc,
1699 time, data1, data2, line,
1700 blink1, blink2, ilink1, ilink2);
1701
1702 IWL_ERR(priv, "Desc Time "
1703 "data1 data2 line\n");
1704 IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
1705 iwl4965_desc_lookup(desc), desc, time, data1, data2, line);
1706 IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n");
1707 IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
1708 pc, blink1, blink2, ilink1, ilink2, hcmd);
1709}
1710
1711#define EVENT_START_OFFSET (4 * sizeof(u32))
1712
1713/**
1714 * iwl4965_print_event_log - Dump error event log to syslog
1715 *
1716 */
1717static int iwl4965_print_event_log(struct iwl_priv *priv, u32 start_idx,
1718 u32 num_events, u32 mode,
1719 int pos, char **buf, size_t bufsz)
1720{
1721 u32 i;
1722 u32 base; /* SRAM byte address of event log header */
1723 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
1724 u32 ptr; /* SRAM byte address of log data */
1725 u32 ev, time, data; /* event log data */
1726 unsigned long reg_flags;
1727
1728 if (num_events == 0)
1729 return pos;
1730
1731 if (priv->ucode_type == UCODE_INIT) {
1732 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1733 } else {
1734 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1735 }
1736
1737 if (mode == 0)
1738 event_size = 2 * sizeof(u32);
1739 else
1740 event_size = 3 * sizeof(u32);
1741
1742 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
1743
1744 /* Make sure device is powered up for SRAM reads */
1745 spin_lock_irqsave(&priv->reg_lock, reg_flags);
1746 iwl_grab_nic_access(priv);
1747
1748 /* Set starting address; reads will auto-increment */
1749 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
1750 rmb();
1751
1752 /* "time" is actually "data" for mode 0 (no timestamp).
1753 * place event id # at far right for easier visual parsing. */
1754 for (i = 0; i < num_events; i++) {
1755 ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1756 time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1757 if (mode == 0) {
1758 /* data, ev */
1759 if (bufsz) {
1760 pos += scnprintf(*buf + pos, bufsz - pos,
1761 "EVT_LOG:0x%08x:%04u\n",
1762 time, ev);
1763 } else {
1764 trace_iwlwifi_legacy_dev_ucode_event(priv, 0,
1765 time, ev);
1766 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
1767 time, ev);
1768 }
1769 } else {
1770 data = _iwl_legacy_read_direct32(priv,
1771 HBUS_TARG_MEM_RDAT);
1772 if (bufsz) {
1773 pos += scnprintf(*buf + pos, bufsz - pos,
1774 "EVT_LOGT:%010u:0x%08x:%04u\n",
1775 time, data, ev);
1776 } else {
1777 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
1778 time, data, ev);
1779 trace_iwlwifi_legacy_dev_ucode_event(priv, time,
1780 data, ev);
1781 }
1782 }
1783 }
1784
1785 /* Allow device to power down */
1786 iwl_release_nic_access(priv);
1787 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
1788 return pos;
1789}
1790
1791/**
1792 * iwl4965_print_last_event_logs - Dump the newest # of event log to syslog
1793 */
1794static int iwl4965_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1795 u32 num_wraps, u32 next_entry,
1796 u32 size, u32 mode,
1797 int pos, char **buf, size_t bufsz)
1798{
1799 /*
1800 * display the newest DEFAULT_LOG_ENTRIES entries
1801 * i.e the entries just before the next ont that uCode would fill.
1802 */
1803 if (num_wraps) {
1804 if (next_entry < size) {
1805 pos = iwl4965_print_event_log(priv,
1806 capacity - (size - next_entry),
1807 size - next_entry, mode,
1808 pos, buf, bufsz);
1809 pos = iwl4965_print_event_log(priv, 0,
1810 next_entry, mode,
1811 pos, buf, bufsz);
1812 } else
1813 pos = iwl4965_print_event_log(priv, next_entry - size,
1814 size, mode, pos, buf, bufsz);
1815 } else {
1816 if (next_entry < size) {
1817 pos = iwl4965_print_event_log(priv, 0, next_entry,
1818 mode, pos, buf, bufsz);
1819 } else {
1820 pos = iwl4965_print_event_log(priv, next_entry - size,
1821 size, mode, pos, buf, bufsz);
1822 }
1823 }
1824 return pos;
1825}
1826
1827#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
1828
1829int iwl4965_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1830 char **buf, bool display)
1831{
1832 u32 base; /* SRAM byte address of event log header */
1833 u32 capacity; /* event log capacity in # entries */
1834 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
1835 u32 num_wraps; /* # times uCode wrapped to top of log */
1836 u32 next_entry; /* index of next entry to be written by uCode */
1837 u32 size; /* # entries that we'll print */
1838 int pos = 0;
1839 size_t bufsz = 0;
1840
1841 if (priv->ucode_type == UCODE_INIT) {
1842 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1843 } else {
1844 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1845 }
1846
1847 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1848 IWL_ERR(priv,
1849 "Invalid event log pointer 0x%08X for %s uCode\n",
1850 base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
1851 return -EINVAL;
1852 }
1853
1854 /* event log header */
1855 capacity = iwl_legacy_read_targ_mem(priv, base);
1856 mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
1857 num_wraps = iwl_legacy_read_targ_mem(priv, base + (2 * sizeof(u32)));
1858 next_entry = iwl_legacy_read_targ_mem(priv, base + (3 * sizeof(u32)));
1859
1860 size = num_wraps ? capacity : next_entry;
1861
1862 /* bail out if nothing in log */
1863 if (size == 0) {
1864 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
1865 return pos;
1866 }
1867
1868#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1869 if (!(iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
1870 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
1871 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
1872#else
1873 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
1874 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
1875#endif
1876 IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
1877 size);
1878
1879#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1880 if (display) {
1881 if (full_log)
1882 bufsz = capacity * 48;
1883 else
1884 bufsz = size * 48;
1885 *buf = kmalloc(bufsz, GFP_KERNEL);
1886 if (!*buf)
1887 return -ENOMEM;
1888 }
1889 if ((iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
1890 /*
1891 * if uCode has wrapped back to top of log,
1892 * start at the oldest entry,
1893 * i.e the next one that uCode would fill.
1894 */
1895 if (num_wraps)
1896 pos = iwl4965_print_event_log(priv, next_entry,
1897 capacity - next_entry, mode,
1898 pos, buf, bufsz);
1899 /* (then/else) start at top of log */
1900 pos = iwl4965_print_event_log(priv, 0,
1901 next_entry, mode, pos, buf, bufsz);
1902 } else
1903 pos = iwl4965_print_last_event_logs(priv, capacity, num_wraps,
1904 next_entry, size, mode,
1905 pos, buf, bufsz);
1906#else
1907 pos = iwl4965_print_last_event_logs(priv, capacity, num_wraps,
1908 next_entry, size, mode,
1909 pos, buf, bufsz);
1910#endif
1911 return pos;
1912}
1913
1914static void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
1915{
1916 struct iwl_ct_kill_config cmd;
1917 unsigned long flags;
1918 int ret = 0;
1919
1920 spin_lock_irqsave(&priv->lock, flags);
1921 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
1922 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
1923 spin_unlock_irqrestore(&priv->lock, flags);
1924
1925 cmd.critical_temperature_R =
1926 cpu_to_le32(priv->hw_params.ct_kill_threshold);
1927
1928 ret = iwl_legacy_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
1929 sizeof(cmd), &cmd);
1930 if (ret)
1931 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1932 else
1933 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
1934 "succeeded, "
1935 "critical temperature is %d\n",
1936 priv->hw_params.ct_kill_threshold);
1937}
1938
1939static const s8 default_queue_to_tx_fifo[] = {
1940 IWL_TX_FIFO_VO,
1941 IWL_TX_FIFO_VI,
1942 IWL_TX_FIFO_BE,
1943 IWL_TX_FIFO_BK,
1944 IWL49_CMD_FIFO_NUM,
1945 IWL_TX_FIFO_UNUSED,
1946 IWL_TX_FIFO_UNUSED,
1947};
1948
1949static int iwl4965_alive_notify(struct iwl_priv *priv)
1950{
1951 u32 a;
1952 unsigned long flags;
1953 int i, chan;
1954 u32 reg_val;
1955
1956 spin_lock_irqsave(&priv->lock, flags);
1957
1958 /* Clear 4965's internal Tx Scheduler data base */
1959 priv->scd_base_addr = iwl_legacy_read_prph(priv,
1960 IWL49_SCD_SRAM_BASE_ADDR);
1961 a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
1962 for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
1963 iwl_legacy_write_targ_mem(priv, a, 0);
1964 for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
1965 iwl_legacy_write_targ_mem(priv, a, 0);
1966 for (; a < priv->scd_base_addr +
1967 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
1968 iwl_legacy_write_targ_mem(priv, a, 0);
1969
1970 /* Tel 4965 where to find Tx byte count tables */
1971 iwl_legacy_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
1972 priv->scd_bc_tbls.dma >> 10);
1973
1974 /* Enable DMA channel */
1975 for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
1976 iwl_legacy_write_direct32(priv,
1977 FH_TCSR_CHNL_TX_CONFIG_REG(chan),
1978 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1979 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1980
1981 /* Update FH chicken bits */
1982 reg_val = iwl_legacy_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
1983 iwl_legacy_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
1984 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1985
1986 /* Disable chain mode for all queues */
1987 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
1988
1989 /* Initialize each Tx queue (including the command queue) */
1990 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
1991
1992 /* TFD circular buffer read/write indexes */
1993 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
1994 iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
1995
1996 /* Max Tx Window size for Scheduler-ACK mode */
1997 iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
1998 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
1999 (SCD_WIN_SIZE <<
2000 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
2001 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
2002
2003 /* Frame limit */
2004 iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
2005 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
2006 sizeof(u32),
2007 (SCD_FRAME_LIMIT <<
2008 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2009 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
2010
2011 }
2012 iwl_legacy_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
2013 (1 << priv->hw_params.max_txq_num) - 1);
2014
2015 /* Activate all Tx DMA/FIFO channels */
2016 iwl4965_txq_set_sched(priv, IWL_MASK(0, 6));
2017
2018 iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
2019
2020 /* make sure all queue are not stopped */
2021 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
2022 for (i = 0; i < 4; i++)
2023 atomic_set(&priv->queue_stop_count[i], 0);
2024
2025 /* reset to 0 to enable all the queue first */
2026 priv->txq_ctx_active_msk = 0;
2027 /* Map each Tx/cmd queue to its corresponding fifo */
2028 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
2029
2030 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
2031 int ac = default_queue_to_tx_fifo[i];
2032
2033 iwl_txq_ctx_activate(priv, i);
2034
2035 if (ac == IWL_TX_FIFO_UNUSED)
2036 continue;
2037
2038 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
2039 }
2040
2041 spin_unlock_irqrestore(&priv->lock, flags);
2042
2043 return 0;
2044}
2045
2046/**
2047 * iwl4965_alive_start - called after REPLY_ALIVE notification received
2048 * from protocol/runtime uCode (initialization uCode's
2049 * Alive gets handled by iwl_init_alive_start()).
2050 */
2051static void iwl4965_alive_start(struct iwl_priv *priv)
2052{
2053 int ret = 0;
2054 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2055
2056 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
2057
2058 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
2059 /* We had an error bringing up the hardware, so take it
2060 * all the way back down so we can try again */
2061 IWL_DEBUG_INFO(priv, "Alive failed.\n");
2062 goto restart;
2063 }
2064
2065 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
2066 * This is a paranoid check, because we would not have gotten the
2067 * "runtime" alive if code weren't properly loaded. */
2068 if (iwl4965_verify_ucode(priv)) {
2069 /* Runtime instruction load was bad;
2070 * take it all the way back down so we can try again */
2071 IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
2072 goto restart;
2073 }
2074
2075 ret = iwl4965_alive_notify(priv);
2076 if (ret) {
2077 IWL_WARN(priv,
2078 "Could not complete ALIVE transition [ntf]: %d\n", ret);
2079 goto restart;
2080 }
2081
2082
2083 /* After the ALIVE response, we can send host commands to the uCode */
2084 set_bit(STATUS_ALIVE, &priv->status);
2085
2086 /* Enable watchdog to monitor the driver tx queues */
2087 iwl_legacy_setup_watchdog(priv);
2088
2089 if (iwl_legacy_is_rfkill(priv))
2090 return;
2091
2092 ieee80211_wake_queues(priv->hw);
2093
2094 priv->active_rate = IWL_RATES_MASK;
2095
2096 if (iwl_legacy_is_associated_ctx(ctx)) {
2097 struct iwl_legacy_rxon_cmd *active_rxon =
2098 (struct iwl_legacy_rxon_cmd *)&ctx->active;
2099 /* apply any changes in staging */
2100 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2101 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2102 } else {
2103 struct iwl_rxon_context *tmp;
2104 /* Initialize our rx_config data */
2105 for_each_context(priv, tmp)
2106 iwl_legacy_connection_init_rx_config(priv, tmp);
2107
2108 if (priv->cfg->ops->hcmd->set_rxon_chain)
2109 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2110 }
2111
2112 /* Configure bluetooth coexistence if enabled */
2113 iwl_legacy_send_bt_config(priv);
2114
2115 iwl4965_reset_run_time_calib(priv);
2116
2117 set_bit(STATUS_READY, &priv->status);
2118
2119 /* Configure the adapter for unassociated operation */
2120 iwl_legacy_commit_rxon(priv, ctx);
2121
2122 /* At this point, the NIC is initialized and operational */
2123 iwl4965_rf_kill_ct_config(priv);
2124
2125 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
2126 wake_up_interruptible(&priv->wait_command_queue);
2127
2128 iwl_legacy_power_update_mode(priv, true);
2129 IWL_DEBUG_INFO(priv, "Updated power mode\n");
2130
2131 return;
2132
2133 restart:
2134 queue_work(priv->workqueue, &priv->restart);
2135}
2136
2137static void iwl4965_cancel_deferred_work(struct iwl_priv *priv);
2138
2139static void __iwl4965_down(struct iwl_priv *priv)
2140{
2141 unsigned long flags;
2142 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
2143
2144 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
2145
2146 iwl_legacy_scan_cancel_timeout(priv, 200);
2147
2148 exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
2149
2150 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
2151 * to prevent rearm timer */
2152 del_timer_sync(&priv->watchdog);
2153
2154 iwl_legacy_clear_ucode_stations(priv, NULL);
2155 iwl_legacy_dealloc_bcast_stations(priv);
2156 iwl_legacy_clear_driver_stations(priv);
2157
2158 /* Unblock any waiting calls */
2159 wake_up_interruptible_all(&priv->wait_command_queue);
2160
2161 /* Wipe out the EXIT_PENDING status bit if we are not actually
2162 * exiting the module */
2163 if (!exit_pending)
2164 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2165
2166 /* stop and reset the on-board processor */
2167 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
2168
2169 /* tell the device to stop sending interrupts */
2170 spin_lock_irqsave(&priv->lock, flags);
2171 iwl_legacy_disable_interrupts(priv);
2172 spin_unlock_irqrestore(&priv->lock, flags);
2173 iwl4965_synchronize_irq(priv);
2174
2175 if (priv->mac80211_registered)
2176 ieee80211_stop_queues(priv->hw);
2177
2178 /* If we have not previously called iwl_init() then
2179 * clear all bits but the RF Kill bit and return */
2180 if (!iwl_legacy_is_init(priv)) {
2181 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2182 STATUS_RF_KILL_HW |
2183 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
2184 STATUS_GEO_CONFIGURED |
2185 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2186 STATUS_EXIT_PENDING;
2187 goto exit;
2188 }
2189
2190 /* ...otherwise clear out all the status bits but the RF Kill
2191 * bit and continue taking the NIC down. */
2192 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2193 STATUS_RF_KILL_HW |
2194 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
2195 STATUS_GEO_CONFIGURED |
2196 test_bit(STATUS_FW_ERROR, &priv->status) <<
2197 STATUS_FW_ERROR |
2198 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2199 STATUS_EXIT_PENDING;
2200
2201 iwl4965_txq_ctx_stop(priv);
2202 iwl4965_rxq_stop(priv);
2203
2204 /* Power-down device's busmaster DMA clocks */
2205 iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
2206 udelay(5);
2207
2208 /* Make sure (redundant) we've released our request to stay awake */
2209 iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
2210 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2211
2212 /* Stop the device, and put it in low power state */
2213 iwl_legacy_apm_stop(priv);
2214
2215 exit:
2216 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
2217
2218 dev_kfree_skb(priv->beacon_skb);
2219 priv->beacon_skb = NULL;
2220
2221 /* clear out any free frames */
2222 iwl4965_clear_free_frames(priv);
2223}
2224
2225static void iwl4965_down(struct iwl_priv *priv)
2226{
2227 mutex_lock(&priv->mutex);
2228 __iwl4965_down(priv);
2229 mutex_unlock(&priv->mutex);
2230
2231 iwl4965_cancel_deferred_work(priv);
2232}
2233
2234#define HW_READY_TIMEOUT (50)
2235
2236static int iwl4965_set_hw_ready(struct iwl_priv *priv)
2237{
2238 int ret = 0;
2239
2240 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
2241 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
2242
2243 /* See if we got it */
2244 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
2245 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2246 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2247 HW_READY_TIMEOUT);
2248 if (ret != -ETIMEDOUT)
2249 priv->hw_ready = true;
2250 else
2251 priv->hw_ready = false;
2252
2253 IWL_DEBUG_INFO(priv, "hardware %s\n",
2254 (priv->hw_ready == 1) ? "ready" : "not ready");
2255 return ret;
2256}
2257
2258static int iwl4965_prepare_card_hw(struct iwl_priv *priv)
2259{
2260 int ret = 0;
2261
2262 IWL_DEBUG_INFO(priv, "iwl4965_prepare_card_hw enter\n");
2263
2264 ret = iwl4965_set_hw_ready(priv);
2265 if (priv->hw_ready)
2266 return ret;
2267
2268 /* If HW is not ready, prepare the conditions to check again */
2269 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
2270 CSR_HW_IF_CONFIG_REG_PREPARE);
2271
2272 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
2273 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
2274 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
2275
2276 /* HW should be ready by now, check again. */
2277 if (ret != -ETIMEDOUT)
2278 iwl4965_set_hw_ready(priv);
2279
2280 return ret;
2281}
2282
2283#define MAX_HW_RESTARTS 5
2284
2285static int __iwl4965_up(struct iwl_priv *priv)
2286{
2287 struct iwl_rxon_context *ctx;
2288 int i;
2289 int ret;
2290
2291 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2292 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
2293 return -EIO;
2294 }
2295
2296 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
2297 IWL_ERR(priv, "ucode not available for device bringup\n");
2298 return -EIO;
2299 }
2300
2301 for_each_context(priv, ctx) {
2302 ret = iwl4965_alloc_bcast_station(priv, ctx);
2303 if (ret) {
2304 iwl_legacy_dealloc_bcast_stations(priv);
2305 return ret;
2306 }
2307 }
2308
2309 iwl4965_prepare_card_hw(priv);
2310
2311 if (!priv->hw_ready) {
2312 IWL_WARN(priv, "Exit HW not ready\n");
2313 return -EIO;
2314 }
2315
2316 /* If platform's RF_KILL switch is NOT set to KILL */
2317 if (iwl_read32(priv,
2318 CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
2319 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2320 else
2321 set_bit(STATUS_RF_KILL_HW, &priv->status);
2322
2323 if (iwl_legacy_is_rfkill(priv)) {
2324 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
2325
2326 iwl_legacy_enable_interrupts(priv);
2327 IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
2328 return 0;
2329 }
2330
2331 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2332
2333 /* must be initialised before iwl_hw_nic_init */
2334 priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
2335
2336 ret = iwl4965_hw_nic_init(priv);
2337 if (ret) {
2338 IWL_ERR(priv, "Unable to init nic\n");
2339 return ret;
2340 }
2341
2342 /* make sure rfkill handshake bits are cleared */
2343 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2344 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
2345 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2346
2347 /* clear (again), then enable host interrupts */
2348 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2349 iwl_legacy_enable_interrupts(priv);
2350
2351 /* really make sure rfkill handshake bits are cleared */
2352 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2353 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2354
2355 /* Copy original ucode data image from disk into backup cache.
2356 * This will be used to initialize the on-board processor's
2357 * data SRAM for a clean start when the runtime program first loads. */
2358 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
2359 priv->ucode_data.len);
2360
2361 for (i = 0; i < MAX_HW_RESTARTS; i++) {
2362
2363 /* load bootstrap state machine,
2364 * load bootstrap program into processor's memory,
2365 * prepare to load the "initialize" uCode */
2366 ret = priv->cfg->ops->lib->load_ucode(priv);
2367
2368 if (ret) {
2369 IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n",
2370 ret);
2371 continue;
2372 }
2373
2374 /* start card; "initialize" will load runtime ucode */
2375 iwl4965_nic_start(priv);
2376
2377 IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
2378
2379 return 0;
2380 }
2381
2382 set_bit(STATUS_EXIT_PENDING, &priv->status);
2383 __iwl4965_down(priv);
2384 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2385
2386 /* tried to restart and config the device for as long as our
2387 * patience could withstand */
2388 IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
2389 return -EIO;
2390}
2391
2392
2393/*****************************************************************************
2394 *
2395 * Workqueue callbacks
2396 *
2397 *****************************************************************************/
2398
2399static void iwl4965_bg_init_alive_start(struct work_struct *data)
2400{
2401 struct iwl_priv *priv =
2402 container_of(data, struct iwl_priv, init_alive_start.work);
2403
2404 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2405 return;
2406
2407 mutex_lock(&priv->mutex);
2408 priv->cfg->ops->lib->init_alive_start(priv);
2409 mutex_unlock(&priv->mutex);
2410}
2411
2412static void iwl4965_bg_alive_start(struct work_struct *data)
2413{
2414 struct iwl_priv *priv =
2415 container_of(data, struct iwl_priv, alive_start.work);
2416
2417 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2418 return;
2419
2420 mutex_lock(&priv->mutex);
2421 iwl4965_alive_start(priv);
2422 mutex_unlock(&priv->mutex);
2423}
2424
2425static void iwl4965_bg_run_time_calib_work(struct work_struct *work)
2426{
2427 struct iwl_priv *priv = container_of(work, struct iwl_priv,
2428 run_time_calib_work);
2429
2430 mutex_lock(&priv->mutex);
2431
2432 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2433 test_bit(STATUS_SCANNING, &priv->status)) {
2434 mutex_unlock(&priv->mutex);
2435 return;
2436 }
2437
2438 if (priv->start_calib) {
2439 iwl4965_chain_noise_calibration(priv,
2440 (void *)&priv->_4965.statistics);
2441 iwl4965_sensitivity_calibration(priv,
2442 (void *)&priv->_4965.statistics);
2443 }
2444
2445 mutex_unlock(&priv->mutex);
2446}
2447
2448static void iwl4965_bg_restart(struct work_struct *data)
2449{
2450 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
2451
2452 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2453 return;
2454
2455 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
2456 struct iwl_rxon_context *ctx;
2457
2458 mutex_lock(&priv->mutex);
2459 for_each_context(priv, ctx)
2460 ctx->vif = NULL;
2461 priv->is_open = 0;
2462
2463 __iwl4965_down(priv);
2464
2465 mutex_unlock(&priv->mutex);
2466 iwl4965_cancel_deferred_work(priv);
2467 ieee80211_restart_hw(priv->hw);
2468 } else {
2469 iwl4965_down(priv);
2470
2471 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2472 return;
2473
2474 mutex_lock(&priv->mutex);
2475 __iwl4965_up(priv);
2476 mutex_unlock(&priv->mutex);
2477 }
2478}
2479
2480static void iwl4965_bg_rx_replenish(struct work_struct *data)
2481{
2482 struct iwl_priv *priv =
2483 container_of(data, struct iwl_priv, rx_replenish);
2484
2485 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2486 return;
2487
2488 mutex_lock(&priv->mutex);
2489 iwl4965_rx_replenish(priv);
2490 mutex_unlock(&priv->mutex);
2491}
2492
2493/*****************************************************************************
2494 *
2495 * mac80211 entry point functions
2496 *
2497 *****************************************************************************/
2498
2499#define UCODE_READY_TIMEOUT (4 * HZ)
2500
2501/*
2502 * Not a mac80211 entry point function, but it fits in with all the
2503 * other mac80211 functions grouped here.
2504 */
2505static int iwl4965_mac_setup_register(struct iwl_priv *priv,
2506 u32 max_probe_length)
2507{
2508 int ret;
2509 struct ieee80211_hw *hw = priv->hw;
2510 struct iwl_rxon_context *ctx;
2511
2512 hw->rate_control_algorithm = "iwl-4965-rs";
2513
2514 /* Tell mac80211 our characteristics */
2515 hw->flags = IEEE80211_HW_SIGNAL_DBM |
2516 IEEE80211_HW_AMPDU_AGGREGATION |
2517 IEEE80211_HW_NEED_DTIM_PERIOD |
2518 IEEE80211_HW_SPECTRUM_MGMT |
2519 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
2520
2521 if (priv->cfg->sku & IWL_SKU_N)
2522 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
2523 IEEE80211_HW_SUPPORTS_STATIC_SMPS;
2524
2525 hw->sta_data_size = sizeof(struct iwl_station_priv);
2526 hw->vif_data_size = sizeof(struct iwl_vif_priv);
2527
2528 for_each_context(priv, ctx) {
2529 hw->wiphy->interface_modes |= ctx->interface_modes;
2530 hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
2531 }
2532
2533 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
2534 WIPHY_FLAG_DISABLE_BEACON_HINTS;
2535
2536 /*
2537 * For now, disable PS by default because it affects
2538 * RX performance significantly.
2539 */
2540 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
2541
2542 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
2543 /* we create the 802.11 header and a zero-length SSID element */
2544 hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
2545
2546 /* Default value; 4 EDCA QOS priorities */
2547 hw->queues = 4;
2548
2549 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
2550
2551 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
2552 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
2553 &priv->bands[IEEE80211_BAND_2GHZ];
2554 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
2555 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
2556 &priv->bands[IEEE80211_BAND_5GHZ];
2557
2558 iwl_legacy_leds_init(priv);
2559
2560 ret = ieee80211_register_hw(priv->hw);
2561 if (ret) {
2562 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
2563 return ret;
2564 }
2565 priv->mac80211_registered = 1;
2566
2567 return 0;
2568}
2569
2570
2571int iwl4965_mac_start(struct ieee80211_hw *hw)
2572{
2573 struct iwl_priv *priv = hw->priv;
2574 int ret;
2575
2576 IWL_DEBUG_MAC80211(priv, "enter\n");
2577
2578 /* we should be verifying the device is ready to be opened */
2579 mutex_lock(&priv->mutex);
2580 ret = __iwl4965_up(priv);
2581 mutex_unlock(&priv->mutex);
2582
2583 if (ret)
2584 return ret;
2585
2586 if (iwl_legacy_is_rfkill(priv))
2587 goto out;
2588
2589 IWL_DEBUG_INFO(priv, "Start UP work done.\n");
2590
2591 /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
2592 * mac80211 will not be run successfully. */
2593 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
2594 test_bit(STATUS_READY, &priv->status),
2595 UCODE_READY_TIMEOUT);
2596 if (!ret) {
2597 if (!test_bit(STATUS_READY, &priv->status)) {
2598 IWL_ERR(priv, "START_ALIVE timeout after %dms.\n",
2599 jiffies_to_msecs(UCODE_READY_TIMEOUT));
2600 return -ETIMEDOUT;
2601 }
2602 }
2603
2604 iwl4965_led_enable(priv);
2605
2606out:
2607 priv->is_open = 1;
2608 IWL_DEBUG_MAC80211(priv, "leave\n");
2609 return 0;
2610}
2611
2612void iwl4965_mac_stop(struct ieee80211_hw *hw)
2613{
2614 struct iwl_priv *priv = hw->priv;
2615
2616 IWL_DEBUG_MAC80211(priv, "enter\n");
2617
2618 if (!priv->is_open)
2619 return;
2620
2621 priv->is_open = 0;
2622
2623 iwl4965_down(priv);
2624
2625 flush_workqueue(priv->workqueue);
2626
2627 /* enable interrupts again in order to receive rfkill changes */
2628 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2629 iwl_legacy_enable_interrupts(priv);
2630
2631 IWL_DEBUG_MAC80211(priv, "leave\n");
2632}
2633
2634void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2635{
2636 struct iwl_priv *priv = hw->priv;
2637
2638 IWL_DEBUG_MACDUMP(priv, "enter\n");
2639
2640 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
2641 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
2642
2643 if (iwl4965_tx_skb(priv, skb))
2644 dev_kfree_skb_any(skb);
2645
2646 IWL_DEBUG_MACDUMP(priv, "leave\n");
2647}
2648
2649void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
2650 struct ieee80211_vif *vif,
2651 struct ieee80211_key_conf *keyconf,
2652 struct ieee80211_sta *sta,
2653 u32 iv32, u16 *phase1key)
2654{
2655 struct iwl_priv *priv = hw->priv;
2656 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2657
2658 IWL_DEBUG_MAC80211(priv, "enter\n");
2659
2660 iwl4965_update_tkip_key(priv, vif_priv->ctx, keyconf, sta,
2661 iv32, phase1key);
2662
2663 IWL_DEBUG_MAC80211(priv, "leave\n");
2664}
2665
2666int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2667 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
2668 struct ieee80211_key_conf *key)
2669{
2670 struct iwl_priv *priv = hw->priv;
2671 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2672 struct iwl_rxon_context *ctx = vif_priv->ctx;
2673 int ret;
2674 u8 sta_id;
2675 bool is_default_wep_key = false;
2676
2677 IWL_DEBUG_MAC80211(priv, "enter\n");
2678
2679 if (priv->cfg->mod_params->sw_crypto) {
2680 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
2681 return -EOPNOTSUPP;
2682 }
2683
2684 sta_id = iwl_legacy_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
2685 if (sta_id == IWL_INVALID_STATION)
2686 return -EINVAL;
2687
2688 mutex_lock(&priv->mutex);
2689 iwl_legacy_scan_cancel_timeout(priv, 100);
2690
2691 /*
2692 * If we are getting WEP group key and we didn't receive any key mapping
2693 * so far, we are in legacy wep mode (group key only), otherwise we are
2694 * in 1X mode.
2695 * In legacy wep mode, we use another host command to the uCode.
2696 */
2697 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
2698 key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
2699 !sta) {
2700 if (cmd == SET_KEY)
2701 is_default_wep_key = !ctx->key_mapping_keys;
2702 else
2703 is_default_wep_key =
2704 (key->hw_key_idx == HW_KEY_DEFAULT);
2705 }
2706
2707 switch (cmd) {
2708 case SET_KEY:
2709 if (is_default_wep_key)
2710 ret = iwl4965_set_default_wep_key(priv,
2711 vif_priv->ctx, key);
2712 else
2713 ret = iwl4965_set_dynamic_key(priv, vif_priv->ctx,
2714 key, sta_id);
2715
2716 IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
2717 break;
2718 case DISABLE_KEY:
2719 if (is_default_wep_key)
2720 ret = iwl4965_remove_default_wep_key(priv, ctx, key);
2721 else
2722 ret = iwl4965_remove_dynamic_key(priv, ctx,
2723 key, sta_id);
2724
2725 IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
2726 break;
2727 default:
2728 ret = -EINVAL;
2729 }
2730
2731 mutex_unlock(&priv->mutex);
2732 IWL_DEBUG_MAC80211(priv, "leave\n");
2733
2734 return ret;
2735}
2736
2737int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
2738 struct ieee80211_vif *vif,
2739 enum ieee80211_ampdu_mlme_action action,
2740 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
2741 u8 buf_size)
2742{
2743 struct iwl_priv *priv = hw->priv;
2744 int ret = -EINVAL;
2745
2746 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
2747 sta->addr, tid);
2748
2749 if (!(priv->cfg->sku & IWL_SKU_N))
2750 return -EACCES;
2751
2752 mutex_lock(&priv->mutex);
2753
2754 switch (action) {
2755 case IEEE80211_AMPDU_RX_START:
2756 IWL_DEBUG_HT(priv, "start Rx\n");
2757 ret = iwl4965_sta_rx_agg_start(priv, sta, tid, *ssn);
2758 break;
2759 case IEEE80211_AMPDU_RX_STOP:
2760 IWL_DEBUG_HT(priv, "stop Rx\n");
2761 ret = iwl4965_sta_rx_agg_stop(priv, sta, tid);
2762 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2763 ret = 0;
2764 break;
2765 case IEEE80211_AMPDU_TX_START:
2766 IWL_DEBUG_HT(priv, "start Tx\n");
2767 ret = iwl4965_tx_agg_start(priv, vif, sta, tid, ssn);
2768 if (ret == 0) {
2769 priv->_4965.agg_tids_count++;
2770 IWL_DEBUG_HT(priv, "priv->_4965.agg_tids_count = %u\n",
2771 priv->_4965.agg_tids_count);
2772 }
2773 break;
2774 case IEEE80211_AMPDU_TX_STOP:
2775 IWL_DEBUG_HT(priv, "stop Tx\n");
2776 ret = iwl4965_tx_agg_stop(priv, vif, sta, tid);
2777 if ((ret == 0) && (priv->_4965.agg_tids_count > 0)) {
2778 priv->_4965.agg_tids_count--;
2779 IWL_DEBUG_HT(priv, "priv->_4965.agg_tids_count = %u\n",
2780 priv->_4965.agg_tids_count);
2781 }
2782 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2783 ret = 0;
2784 break;
2785 case IEEE80211_AMPDU_TX_OPERATIONAL:
2786 ret = 0;
2787 break;
2788 }
2789 mutex_unlock(&priv->mutex);
2790
2791 return ret;
2792}
2793
2794int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
2795 struct ieee80211_vif *vif,
2796 struct ieee80211_sta *sta)
2797{
2798 struct iwl_priv *priv = hw->priv;
2799 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
2800 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2801 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
2802 int ret;
2803 u8 sta_id;
2804
2805 IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
2806 sta->addr);
2807 mutex_lock(&priv->mutex);
2808 IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
2809 sta->addr);
2810 sta_priv->common.sta_id = IWL_INVALID_STATION;
2811
2812 atomic_set(&sta_priv->pending_frames, 0);
2813
2814 ret = iwl_legacy_add_station_common(priv, vif_priv->ctx, sta->addr,
2815 is_ap, sta, &sta_id);
2816 if (ret) {
2817 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
2818 sta->addr, ret);
2819 /* Should we return success if return code is EEXIST ? */
2820 mutex_unlock(&priv->mutex);
2821 return ret;
2822 }
2823
2824 sta_priv->common.sta_id = sta_id;
2825
2826 /* Initialize rate scaling */
2827 IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
2828 sta->addr);
2829 iwl4965_rs_rate_init(priv, sta, sta_id);
2830 mutex_unlock(&priv->mutex);
2831
2832 return 0;
2833}
2834
2835void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
2836 struct ieee80211_channel_switch *ch_switch)
2837{
2838 struct iwl_priv *priv = hw->priv;
2839 const struct iwl_channel_info *ch_info;
2840 struct ieee80211_conf *conf = &hw->conf;
2841 struct ieee80211_channel *channel = ch_switch->channel;
2842 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2843
2844 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2845 u16 ch;
2846 unsigned long flags = 0;
2847
2848 IWL_DEBUG_MAC80211(priv, "enter\n");
2849
2850 if (iwl_legacy_is_rfkill(priv))
2851 goto out_exit;
2852
2853 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2854 test_bit(STATUS_SCANNING, &priv->status))
2855 goto out_exit;
2856
2857 if (!iwl_legacy_is_associated_ctx(ctx))
2858 goto out_exit;
2859
2860 /* channel switch in progress */
2861 if (priv->switch_rxon.switch_in_progress == true)
2862 goto out_exit;
2863
2864 mutex_lock(&priv->mutex);
2865 if (priv->cfg->ops->lib->set_channel_switch) {
2866
2867 ch = channel->hw_value;
2868 if (le16_to_cpu(ctx->active.channel) != ch) {
2869 ch_info = iwl_legacy_get_channel_info(priv,
2870 channel->band,
2871 ch);
2872 if (!iwl_legacy_is_channel_valid(ch_info)) {
2873 IWL_DEBUG_MAC80211(priv, "invalid channel\n");
2874 goto out;
2875 }
2876 spin_lock_irqsave(&priv->lock, flags);
2877
2878 priv->current_ht_config.smps = conf->smps_mode;
2879
2880 /* Configure HT40 channels */
2881 ctx->ht.enabled = conf_is_ht(conf);
2882 if (ctx->ht.enabled) {
2883 if (conf_is_ht40_minus(conf)) {
2884 ctx->ht.extension_chan_offset =
2885 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
2886 ctx->ht.is_40mhz = true;
2887 } else if (conf_is_ht40_plus(conf)) {
2888 ctx->ht.extension_chan_offset =
2889 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
2890 ctx->ht.is_40mhz = true;
2891 } else {
2892 ctx->ht.extension_chan_offset =
2893 IEEE80211_HT_PARAM_CHA_SEC_NONE;
2894 ctx->ht.is_40mhz = false;
2895 }
2896 } else
2897 ctx->ht.is_40mhz = false;
2898
2899 if ((le16_to_cpu(ctx->staging.channel) != ch))
2900 ctx->staging.flags = 0;
2901
2902 iwl_legacy_set_rxon_channel(priv, channel, ctx);
2903 iwl_legacy_set_rxon_ht(priv, ht_conf);
2904 iwl_legacy_set_flags_for_band(priv, ctx, channel->band,
2905 ctx->vif);
2906 spin_unlock_irqrestore(&priv->lock, flags);
2907
2908 iwl_legacy_set_rate(priv);
2909 /*
2910 * at this point, staging_rxon has the
2911 * configuration for channel switch
2912 */
2913 if (priv->cfg->ops->lib->set_channel_switch(priv,
2914 ch_switch))
2915 priv->switch_rxon.switch_in_progress = false;
2916 }
2917 }
2918out:
2919 mutex_unlock(&priv->mutex);
2920out_exit:
2921 if (!priv->switch_rxon.switch_in_progress)
2922 ieee80211_chswitch_done(ctx->vif, false);
2923 IWL_DEBUG_MAC80211(priv, "leave\n");
2924}
2925
2926void iwl4965_configure_filter(struct ieee80211_hw *hw,
2927 unsigned int changed_flags,
2928 unsigned int *total_flags,
2929 u64 multicast)
2930{
2931 struct iwl_priv *priv = hw->priv;
2932 __le32 filter_or = 0, filter_nand = 0;
2933 struct iwl_rxon_context *ctx;
2934
2935#define CHK(test, flag) do { \
2936 if (*total_flags & (test)) \
2937 filter_or |= (flag); \
2938 else \
2939 filter_nand |= (flag); \
2940 } while (0)
2941
2942 IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
2943 changed_flags, *total_flags);
2944
2945 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
2946 /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
2947 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
2948 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
2949
2950#undef CHK
2951
2952 mutex_lock(&priv->mutex);
2953
2954 for_each_context(priv, ctx) {
2955 ctx->staging.filter_flags &= ~filter_nand;
2956 ctx->staging.filter_flags |= filter_or;
2957
2958 /*
2959 * Not committing directly because hardware can perform a scan,
2960 * but we'll eventually commit the filter flags change anyway.
2961 */
2962 }
2963
2964 mutex_unlock(&priv->mutex);
2965
2966 /*
2967 * Receiving all multicast frames is always enabled by the
2968 * default flags setup in iwl_legacy_connection_init_rx_config()
2969 * since we currently do not support programming multicast
2970 * filters into the device.
2971 */
2972 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
2973 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
2974}
2975
2976/*****************************************************************************
2977 *
2978 * driver setup and teardown
2979 *
2980 *****************************************************************************/
2981
2982static void iwl4965_bg_txpower_work(struct work_struct *work)
2983{
2984 struct iwl_priv *priv = container_of(work, struct iwl_priv,
2985 txpower_work);
2986
2987 /* If a scan happened to start before we got here
2988 * then just return; the statistics notification will
2989 * kick off another scheduled work to compensate for
2990 * any temperature delta we missed here. */
2991 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2992 test_bit(STATUS_SCANNING, &priv->status))
2993 return;
2994
2995 mutex_lock(&priv->mutex);
2996
2997 /* Regardless of if we are associated, we must reconfigure the
2998 * TX power since frames can be sent on non-radar channels while
2999 * not associated */
3000 priv->cfg->ops->lib->send_tx_power(priv);
3001
3002 /* Update last_temperature to keep is_calib_needed from running
3003 * when it isn't needed... */
3004 priv->last_temperature = priv->temperature;
3005
3006 mutex_unlock(&priv->mutex);
3007}
3008
3009static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
3010{
3011 priv->workqueue = create_singlethread_workqueue(DRV_NAME);
3012
3013 init_waitqueue_head(&priv->wait_command_queue);
3014
3015 INIT_WORK(&priv->restart, iwl4965_bg_restart);
3016 INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish);
3017 INIT_WORK(&priv->run_time_calib_work, iwl4965_bg_run_time_calib_work);
3018 INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start);
3019 INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start);
3020
3021 iwl_legacy_setup_scan_deferred_work(priv);
3022
3023 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
3024
3025 init_timer(&priv->statistics_periodic);
3026 priv->statistics_periodic.data = (unsigned long)priv;
3027 priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
3028
3029 init_timer(&priv->ucode_trace);
3030 priv->ucode_trace.data = (unsigned long)priv;
3031 priv->ucode_trace.function = iwl4965_bg_ucode_trace;
3032
3033 init_timer(&priv->watchdog);
3034 priv->watchdog.data = (unsigned long)priv;
3035 priv->watchdog.function = iwl_legacy_bg_watchdog;
3036
3037 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
3038 iwl4965_irq_tasklet, (unsigned long)priv);
3039}
3040
3041static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
3042{
3043 cancel_work_sync(&priv->txpower_work);
3044 cancel_delayed_work_sync(&priv->init_alive_start);
3045 cancel_delayed_work(&priv->alive_start);
3046 cancel_work_sync(&priv->run_time_calib_work);
3047
3048 iwl_legacy_cancel_scan_deferred_work(priv);
3049
3050 del_timer_sync(&priv->statistics_periodic);
3051 del_timer_sync(&priv->ucode_trace);
3052}
3053
3054static void iwl4965_init_hw_rates(struct iwl_priv *priv,
3055 struct ieee80211_rate *rates)
3056{
3057 int i;
3058
3059 for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
3060 rates[i].bitrate = iwlegacy_rates[i].ieee * 5;
3061 rates[i].hw_value = i; /* Rate scaling will work on indexes */
3062 rates[i].hw_value_short = i;
3063 rates[i].flags = 0;
3064 if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
3065 /*
3066 * If CCK != 1M then set short preamble rate flag.
3067 */
3068 rates[i].flags |=
3069 (iwlegacy_rates[i].plcp == IWL_RATE_1M_PLCP) ?
3070 0 : IEEE80211_RATE_SHORT_PREAMBLE;
3071 }
3072 }
3073}
3074/*
3075 * Acquire priv->lock before calling this function !
3076 */
3077void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
3078{
3079 iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
3080 (index & 0xff) | (txq_id << 8));
3081 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
3082}
3083
3084void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
3085 struct iwl_tx_queue *txq,
3086 int tx_fifo_id, int scd_retry)
3087{
3088 int txq_id = txq->q.id;
3089
3090 /* Find out whether to activate Tx queue */
3091 int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
3092
3093 /* Set up and activate */
3094 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
3095 (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
3096 (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
3097 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
3098 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
3099 IWL49_SCD_QUEUE_STTS_REG_MSK);
3100
3101 txq->sched_retry = scd_retry;
3102
3103 IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
3104 active ? "Activate" : "Deactivate",
3105 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
3106}
3107
3108
3109static int iwl4965_init_drv(struct iwl_priv *priv)
3110{
3111 int ret;
3112
3113 spin_lock_init(&priv->sta_lock);
3114 spin_lock_init(&priv->hcmd_lock);
3115
3116 INIT_LIST_HEAD(&priv->free_frames);
3117
3118 mutex_init(&priv->mutex);
3119 mutex_init(&priv->sync_cmd_mutex);
3120
3121 priv->ieee_channels = NULL;
3122 priv->ieee_rates = NULL;
3123 priv->band = IEEE80211_BAND_2GHZ;
3124
3125 priv->iw_mode = NL80211_IFTYPE_STATION;
3126 priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
3127 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
3128 priv->_4965.agg_tids_count = 0;
3129
3130 /* initialize force reset */
3131 priv->force_reset[IWL_RF_RESET].reset_duration =
3132 IWL_DELAY_NEXT_FORCE_RF_RESET;
3133 priv->force_reset[IWL_FW_RESET].reset_duration =
3134 IWL_DELAY_NEXT_FORCE_FW_RELOAD;
3135
3136 /* Choose which receivers/antennas to use */
3137 if (priv->cfg->ops->hcmd->set_rxon_chain)
3138 priv->cfg->ops->hcmd->set_rxon_chain(priv,
3139 &priv->contexts[IWL_RXON_CTX_BSS]);
3140
3141 iwl_legacy_init_scan_params(priv);
3142
3143 /* Set the tx_power_user_lmt to the lowest power level
3144 * this value will get overwritten by channel max power avg
3145 * from eeprom */
3146 priv->tx_power_user_lmt = IWL4965_TX_POWER_TARGET_POWER_MIN;
3147 priv->tx_power_next = IWL4965_TX_POWER_TARGET_POWER_MIN;
3148
3149 ret = iwl_legacy_init_channel_map(priv);
3150 if (ret) {
3151 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
3152 goto err;
3153 }
3154
3155 ret = iwl_legacy_init_geos(priv);
3156 if (ret) {
3157 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
3158 goto err_free_channel_map;
3159 }
3160 iwl4965_init_hw_rates(priv, priv->ieee_rates);
3161
3162 return 0;
3163
3164err_free_channel_map:
3165 iwl_legacy_free_channel_map(priv);
3166err:
3167 return ret;
3168}
3169
3170static void iwl4965_uninit_drv(struct iwl_priv *priv)
3171{
3172 iwl4965_calib_free_results(priv);
3173 iwl_legacy_free_geos(priv);
3174 iwl_legacy_free_channel_map(priv);
3175 kfree(priv->scan_cmd);
3176}
3177
3178static void iwl4965_hw_detect(struct iwl_priv *priv)
3179{
3180 priv->hw_rev = _iwl_legacy_read32(priv, CSR_HW_REV);
3181 priv->hw_wa_rev = _iwl_legacy_read32(priv, CSR_HW_REV_WA_REG);
3182 pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id);
3183 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
3184}
3185
3186static int iwl4965_set_hw_params(struct iwl_priv *priv)
3187{
3188 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
3189 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
3190 if (priv->cfg->mod_params->amsdu_size_8K)
3191 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
3192 else
3193 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
3194
3195 priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
3196
3197 if (priv->cfg->mod_params->disable_11n)
3198 priv->cfg->sku &= ~IWL_SKU_N;
3199
3200 /* Device-specific setup */
3201 return priv->cfg->ops->lib->set_hw_params(priv);
3202}
3203
3204static const u8 iwl4965_bss_ac_to_fifo[] = {
3205 IWL_TX_FIFO_VO,
3206 IWL_TX_FIFO_VI,
3207 IWL_TX_FIFO_BE,
3208 IWL_TX_FIFO_BK,
3209};
3210
3211static const u8 iwl4965_bss_ac_to_queue[] = {
3212 0, 1, 2, 3,
3213};
3214
3215static int
3216iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3217{
3218 int err = 0, i;
3219 struct iwl_priv *priv;
3220 struct ieee80211_hw *hw;
3221 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
3222 unsigned long flags;
3223 u16 pci_cmd;
3224
3225 /************************
3226 * 1. Allocating HW data
3227 ************************/
3228
3229 hw = iwl_legacy_alloc_all(cfg);
3230 if (!hw) {
3231 err = -ENOMEM;
3232 goto out;
3233 }
3234 priv = hw->priv;
3235 /* At this point both hw and priv are allocated. */
3236
3237 /*
3238 * The default context is always valid,
3239 * more may be discovered when firmware
3240 * is loaded.
3241 */
3242 priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
3243
3244 for (i = 0; i < NUM_IWL_RXON_CTX; i++)
3245 priv->contexts[i].ctxid = i;
3246
3247 priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
3248 priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
3249 priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
3250 priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
3251 priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
3252 priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
3253 priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
3254 priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
3255 priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwl4965_bss_ac_to_fifo;
3256 priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwl4965_bss_ac_to_queue;
3257 priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
3258 BIT(NL80211_IFTYPE_ADHOC);
3259 priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
3260 BIT(NL80211_IFTYPE_STATION);
3261 priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
3262 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
3263 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
3264 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
3265
3266 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 1);
3267
3268 SET_IEEE80211_DEV(hw, &pdev->dev);
3269
3270 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
3271 priv->cfg = cfg;
3272 priv->pci_dev = pdev;
3273 priv->inta_mask = CSR_INI_SET_MASK;
3274
3275 if (iwl_legacy_alloc_traffic_mem(priv))
3276 IWL_ERR(priv, "Not enough memory to generate traffic log\n");
3277
3278 /**************************
3279 * 2. Initializing PCI bus
3280 **************************/
3281 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
3282 PCIE_LINK_STATE_CLKPM);
3283
3284 if (pci_enable_device(pdev)) {
3285 err = -ENODEV;
3286 goto out_ieee80211_free_hw;
3287 }
3288
3289 pci_set_master(pdev);
3290
3291 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
3292 if (!err)
3293 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
3294 if (err) {
3295 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3296 if (!err)
3297 err = pci_set_consistent_dma_mask(pdev,
3298 DMA_BIT_MASK(32));
3299 /* both attempts failed: */
3300 if (err) {
3301 IWL_WARN(priv, "No suitable DMA available.\n");
3302 goto out_pci_disable_device;
3303 }
3304 }
3305
3306 err = pci_request_regions(pdev, DRV_NAME);
3307 if (err)
3308 goto out_pci_disable_device;
3309
3310 pci_set_drvdata(pdev, priv);
3311
3312
3313 /***********************
3314 * 3. Read REV register
3315 ***********************/
3316 priv->hw_base = pci_iomap(pdev, 0, 0);
3317 if (!priv->hw_base) {
3318 err = -ENODEV;
3319 goto out_pci_release_regions;
3320 }
3321
3322 IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
3323 (unsigned long long) pci_resource_len(pdev, 0));
3324 IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
3325
3326 /* these spin locks will be used in apm_ops.init and EEPROM access
3327 * we should init now
3328 */
3329 spin_lock_init(&priv->reg_lock);
3330 spin_lock_init(&priv->lock);
3331
3332 /*
3333 * stop and reset the on-board processor just in case it is in a
3334 * strange state ... like being left stranded by a primary kernel
3335 * and this is now the kdump kernel trying to start up
3336 */
3337 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
3338
3339 iwl4965_hw_detect(priv);
3340 IWL_INFO(priv, "Detected %s, REV=0x%X\n",
3341 priv->cfg->name, priv->hw_rev);
3342
3343 /* We disable the RETRY_TIMEOUT register (0x41) to keep
3344 * PCI Tx retries from interfering with C3 CPU state */
3345 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
3346
3347 iwl4965_prepare_card_hw(priv);
3348 if (!priv->hw_ready) {
3349 IWL_WARN(priv, "Failed, HW not ready\n");
3350 goto out_iounmap;
3351 }
3352
3353 /*****************
3354 * 4. Read EEPROM
3355 *****************/
3356 /* Read the EEPROM */
3357 err = iwl_legacy_eeprom_init(priv);
3358 if (err) {
3359 IWL_ERR(priv, "Unable to init EEPROM\n");
3360 goto out_iounmap;
3361 }
3362 err = iwl4965_eeprom_check_version(priv);
3363 if (err)
3364 goto out_free_eeprom;
3365
3366 if (err)
3367 goto out_free_eeprom;
3368
3369 /* extract MAC Address */
3370 iwl4965_eeprom_get_mac(priv, priv->addresses[0].addr);
3371 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
3372 priv->hw->wiphy->addresses = priv->addresses;
3373 priv->hw->wiphy->n_addresses = 1;
3374
3375 /************************
3376 * 5. Setup HW constants
3377 ************************/
3378 if (iwl4965_set_hw_params(priv)) {
3379 IWL_ERR(priv, "failed to set hw parameters\n");
3380 goto out_free_eeprom;
3381 }
3382
3383 /*******************
3384 * 6. Setup priv
3385 *******************/
3386
3387 err = iwl4965_init_drv(priv);
3388 if (err)
3389 goto out_free_eeprom;
3390 /* At this point both hw and priv are initialized. */
3391
3392 /********************
3393 * 7. Setup services
3394 ********************/
3395 spin_lock_irqsave(&priv->lock, flags);
3396 iwl_legacy_disable_interrupts(priv);
3397 spin_unlock_irqrestore(&priv->lock, flags);
3398
3399 pci_enable_msi(priv->pci_dev);
3400
3401 err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
3402 IRQF_SHARED, DRV_NAME, priv);
3403 if (err) {
3404 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
3405 goto out_disable_msi;
3406 }
3407
3408 iwl4965_setup_deferred_work(priv);
3409 iwl4965_setup_rx_handlers(priv);
3410
3411 /*********************************************
3412 * 8. Enable interrupts and read RFKILL state
3413 *********************************************/
3414
3415 /* enable interrupts if needed: hw bug w/a */
3416 pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
3417 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
3418 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
3419 pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
3420 }
3421
3422 iwl_legacy_enable_interrupts(priv);
3423
3424 /* If platform's RF_KILL switch is NOT set to KILL */
3425 if (iwl_read32(priv, CSR_GP_CNTRL) &
3426 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
3427 clear_bit(STATUS_RF_KILL_HW, &priv->status);
3428 else
3429 set_bit(STATUS_RF_KILL_HW, &priv->status);
3430
3431 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
3432 test_bit(STATUS_RF_KILL_HW, &priv->status));
3433
3434 iwl_legacy_power_initialize(priv);
3435
3436 init_completion(&priv->_4965.firmware_loading_complete);
3437
3438 err = iwl4965_request_firmware(priv, true);
3439 if (err)
3440 goto out_destroy_workqueue;
3441
3442 return 0;
3443
3444 out_destroy_workqueue:
3445 destroy_workqueue(priv->workqueue);
3446 priv->workqueue = NULL;
3447 free_irq(priv->pci_dev->irq, priv);
3448 out_disable_msi:
3449 pci_disable_msi(priv->pci_dev);
3450 iwl4965_uninit_drv(priv);
3451 out_free_eeprom:
3452 iwl_legacy_eeprom_free(priv);
3453 out_iounmap:
3454 pci_iounmap(pdev, priv->hw_base);
3455 out_pci_release_regions:
3456 pci_set_drvdata(pdev, NULL);
3457 pci_release_regions(pdev);
3458 out_pci_disable_device:
3459 pci_disable_device(pdev);
3460 out_ieee80211_free_hw:
3461 iwl_legacy_free_traffic_mem(priv);
3462 ieee80211_free_hw(priv->hw);
3463 out:
3464 return err;
3465}
3466
3467static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
3468{
3469 struct iwl_priv *priv = pci_get_drvdata(pdev);
3470 unsigned long flags;
3471
3472 if (!priv)
3473 return;
3474
3475 wait_for_completion(&priv->_4965.firmware_loading_complete);
3476
3477 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
3478
3479 iwl_legacy_dbgfs_unregister(priv);
3480 sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
3481
3482 /* ieee80211_unregister_hw call wil cause iwl_mac_stop to
3483 * to be called and iwl4965_down since we are removing the device
3484 * we need to set STATUS_EXIT_PENDING bit.
3485 */
3486 set_bit(STATUS_EXIT_PENDING, &priv->status);
3487
3488 iwl_legacy_leds_exit(priv);
3489
3490 if (priv->mac80211_registered) {
3491 ieee80211_unregister_hw(priv->hw);
3492 priv->mac80211_registered = 0;
3493 } else {
3494 iwl4965_down(priv);
3495 }
3496
3497 /*
3498 * Make sure device is reset to low power before unloading driver.
3499 * This may be redundant with iwl4965_down(), but there are paths to
3500 * run iwl4965_down() without calling apm_ops.stop(), and there are
3501 * paths to avoid running iwl4965_down() at all before leaving driver.
3502 * This (inexpensive) call *makes sure* device is reset.
3503 */
3504 iwl_legacy_apm_stop(priv);
3505
3506 /* make sure we flush any pending irq or
3507 * tasklet for the driver
3508 */
3509 spin_lock_irqsave(&priv->lock, flags);
3510 iwl_legacy_disable_interrupts(priv);
3511 spin_unlock_irqrestore(&priv->lock, flags);
3512
3513 iwl4965_synchronize_irq(priv);
3514
3515 iwl4965_dealloc_ucode_pci(priv);
3516
3517 if (priv->rxq.bd)
3518 iwl4965_rx_queue_free(priv, &priv->rxq);
3519 iwl4965_hw_txq_ctx_free(priv);
3520
3521 iwl_legacy_eeprom_free(priv);
3522
3523
3524 /*netif_stop_queue(dev); */
3525 flush_workqueue(priv->workqueue);
3526
3527 /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
3528 * priv->workqueue... so we can't take down the workqueue
3529 * until now... */
3530 destroy_workqueue(priv->workqueue);
3531 priv->workqueue = NULL;
3532 iwl_legacy_free_traffic_mem(priv);
3533
3534 free_irq(priv->pci_dev->irq, priv);
3535 pci_disable_msi(priv->pci_dev);
3536 pci_iounmap(pdev, priv->hw_base);
3537 pci_release_regions(pdev);
3538 pci_disable_device(pdev);
3539 pci_set_drvdata(pdev, NULL);
3540
3541 iwl4965_uninit_drv(priv);
3542
3543 dev_kfree_skb(priv->beacon_skb);
3544
3545 ieee80211_free_hw(priv->hw);
3546}
3547
3548/*
3549 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
3550 * must be called under priv->lock and mac access
3551 */
3552void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
3553{
3554 iwl_legacy_write_prph(priv, IWL49_SCD_TXFACT, mask);
3555}
3556
3557/*****************************************************************************
3558 *
3559 * driver and module entry point
3560 *
3561 *****************************************************************************/
3562
3563/* Hardware specific file defines the PCI IDs table for that hardware module */
3564static DEFINE_PCI_DEVICE_TABLE(iwl4965_hw_card_ids) = {
3565#if defined(CONFIG_IWL4965_MODULE) || defined(CONFIG_IWL4965)
3566 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_cfg)},
3567 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_cfg)},
3568#endif /* CONFIG_IWL4965 */
3569
3570 {0}
3571};
3572MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids);
3573
3574static struct pci_driver iwl4965_driver = {
3575 .name = DRV_NAME,
3576 .id_table = iwl4965_hw_card_ids,
3577 .probe = iwl4965_pci_probe,
3578 .remove = __devexit_p(iwl4965_pci_remove),
3579 .driver.pm = IWL_LEGACY_PM_OPS,
3580};
3581
3582static int __init iwl4965_init(void)
3583{
3584
3585 int ret;
3586 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
3587 pr_info(DRV_COPYRIGHT "\n");
3588
3589 ret = iwl4965_rate_control_register();
3590 if (ret) {
3591 pr_err("Unable to register rate control algorithm: %d\n", ret);
3592 return ret;
3593 }
3594
3595 ret = pci_register_driver(&iwl4965_driver);
3596 if (ret) {
3597 pr_err("Unable to initialize PCI module\n");
3598 goto error_register;
3599 }
3600
3601 return ret;
3602
3603error_register:
3604 iwl4965_rate_control_unregister();
3605 return ret;
3606}
3607
3608static void __exit iwl4965_exit(void)
3609{
3610 pci_unregister_driver(&iwl4965_driver);
3611 iwl4965_rate_control_unregister();
3612}
3613
3614module_exit(iwl4965_exit);
3615module_init(iwl4965_init);
3616
3617#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
3618module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
3619MODULE_PARM_DESC(debug, "debug output mask");
3620#endif
3621
3622module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO);
3623MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
3624module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO);
3625MODULE_PARM_DESC(queues_num, "number of hw queues.");
3626module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO);
3627MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
3628module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K,
3629 int, S_IRUGO);
3630MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
3631module_param_named(fw_restart, iwl4965_mod_params.restart_fw, int, S_IRUGO);
3632MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index ed424574160e..17d555f2215a 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -1,14 +1,52 @@
1config IWLWIFI 1config IWLAGN
2 tristate "Intel Wireless Wifi" 2 tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlagn) "
3 depends on PCI && MAC80211 3 depends on PCI && MAC80211
4 select FW_LOADER 4 select FW_LOADER
5 select NEW_LEDS
6 select LEDS_CLASS
7 select LEDS_TRIGGERS
8 select MAC80211_LEDS
9 ---help---
10 Select to build the driver supporting the:
11
12 Intel Wireless WiFi Link Next-Gen AGN
13
14 This option enables support for use with the following hardware:
15 Intel Wireless WiFi Link 6250AGN Adapter
16 Intel 6000 Series Wi-Fi Adapters (6200AGN and 6300AGN)
17 Intel WiFi Link 1000BGN
18 Intel Wireless WiFi 5150AGN
19 Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN
20 Intel 6005 Series Wi-Fi Adapters
21 Intel 6030 Series Wi-Fi Adapters
22 Intel Wireless WiFi Link 6150BGN 2 Adapter
23 Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN)
24 Intel 2000 Series Wi-Fi Adapters
25
26
27 This driver uses the kernel's mac80211 subsystem.
28
29 In order to use this driver, you will need a microcode (uCode)
30 image for it. You can obtain the microcode from:
31
32 <http://intellinuxwireless.org/>.
33
34 The microcode is typically installed in /lib/firmware. You can
35 look in the hotplug script /etc/hotplug/firmware.agent to
36 determine which directory FIRMWARE_DIR is set to when the script
37 runs.
38
39 If you want to compile the driver as a module ( = code which can be
40 inserted in and removed from the running kernel whenever you want),
41 say M here and read <file:Documentation/kbuild/modules.txt>. The
42 module will be called iwlagn.
5 43
6menu "Debugging Options" 44menu "Debugging Options"
7 depends on IWLWIFI 45 depends on IWLAGN
8 46
9config IWLWIFI_DEBUG 47config IWLWIFI_DEBUG
10 bool "Enable full debugging output in iwlagn and iwl3945 drivers" 48 bool "Enable full debugging output in the iwlagn driver"
11 depends on IWLWIFI 49 depends on IWLAGN
12 ---help--- 50 ---help---
13 This option will enable debug tracing output for the iwlwifi drivers 51 This option will enable debug tracing output for the iwlwifi drivers
14 52
@@ -33,7 +71,7 @@ config IWLWIFI_DEBUG
33 71
34config IWLWIFI_DEBUGFS 72config IWLWIFI_DEBUGFS
35 bool "iwlagn debugfs support" 73 bool "iwlagn debugfs support"
36 depends on IWLWIFI && MAC80211_DEBUGFS 74 depends on IWLAGN && MAC80211_DEBUGFS
37 ---help--- 75 ---help---
38 Enable creation of debugfs files for the iwlwifi drivers. This 76 Enable creation of debugfs files for the iwlwifi drivers. This
39 is a low-impact option that allows getting insight into the 77 is a low-impact option that allows getting insight into the
@@ -41,13 +79,13 @@ config IWLWIFI_DEBUGFS
41 79
42config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE 80config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
43 bool "Experimental uCode support" 81 bool "Experimental uCode support"
44 depends on IWLWIFI && IWLWIFI_DEBUG 82 depends on IWLAGN && IWLWIFI_DEBUG
45 ---help--- 83 ---help---
46 Enable use of experimental ucode for testing and debugging. 84 Enable use of experimental ucode for testing and debugging.
47 85
48config IWLWIFI_DEVICE_TRACING 86config IWLWIFI_DEVICE_TRACING
49 bool "iwlwifi device access tracing" 87 bool "iwlwifi device access tracing"
50 depends on IWLWIFI 88 depends on IWLAGN
51 depends on EVENT_TRACING 89 depends on EVENT_TRACING
52 help 90 help
53 Say Y here to trace all commands, including TX frames and IO 91 Say Y here to trace all commands, including TX frames and IO
@@ -64,73 +102,19 @@ config IWLWIFI_DEVICE_TRACING
64 occur. 102 occur.
65endmenu 103endmenu
66 104
67config IWLAGN 105config IWL_P2P
68 tristate "Intel Wireless WiFi Next Gen AGN (iwlagn)" 106 bool "iwlwifi experimental P2P support"
69 depends on IWLWIFI
70 ---help---
71 Select to build the driver supporting the:
72
73 Intel Wireless WiFi Link Next-Gen AGN
74
75 This driver uses the kernel's mac80211 subsystem.
76
77 In order to use this driver, you will need a microcode (uCode)
78 image for it. You can obtain the microcode from:
79
80 <http://intellinuxwireless.org/>.
81
82 The microcode is typically installed in /lib/firmware. You can
83 look in the hotplug script /etc/hotplug/firmware.agent to
84 determine which directory FIRMWARE_DIR is set to when the script
85 runs.
86
87 If you want to compile the driver as a module ( = code which can be
88 inserted in and removed from the running kernel whenever you want),
89 say M here and read <file:Documentation/kbuild/modules.txt>. The
90 module will be called iwlagn.
91
92
93config IWL4965
94 bool "Intel Wireless WiFi 4965AGN"
95 depends on IWLAGN
96 ---help---
97 This option enables support for Intel Wireless WiFi Link 4965AGN
98
99config IWL5000
100 bool "Intel Wireless-N/Advanced-N/Ultimate-N WiFi Link"
101 depends on IWLAGN 107 depends on IWLAGN
102 ---help--- 108 help
103 This option enables support for use with the following hardware: 109 This option enables experimental P2P support for some devices
104 Intel Wireless WiFi Link 6250AGN Adapter 110 based on microcode support. Since P2P support is still under
105 Intel 6000 Series Wi-Fi Adapters (6200AGN and 6300AGN) 111 development, this option may even enable it for some devices
106 Intel WiFi Link 1000BGN 112 now that turn out to not support it in the future due to
107 Intel Wireless WiFi 5150AGN 113 microcode restrictions.
108 Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN
109 Intel 6000 Gen 2 Series Wi-Fi Adapters (6000G2A and 6000G2B)
110 Intel WIreless WiFi Link 6050BGN Gen 2 Adapter
111 Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN)
112
113config IWL3945
114 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
115 depends on IWLWIFI
116 ---help---
117 Select to build the driver supporting the:
118
119 Intel PRO/Wireless 3945ABG/BG Network Connection
120
121 This driver uses the kernel's mac80211 subsystem.
122
123 In order to use this driver, you will need a microcode (uCode)
124 image for it. You can obtain the microcode from:
125 114
126 <http://intellinuxwireless.org/>. 115 To determine if your microcode supports the experimental P2P
116 offered by this option, check if the driver advertises AP
117 support when it is loaded.
127 118
128 The microcode is typically installed in /lib/firmware. You can 119 Say Y only if you want to experiment with P2P.
129 look in the hotplug script /etc/hotplug/firmware.agent to
130 determine which directory FIRMWARE_DIR is set to when the script
131 runs.
132 120
133 If you want to compile the driver as a module ( = code which can be
134 inserted in and removed from the running kernel whenever you want),
135 say M here and read <file:Documentation/kbuild/modules.txt>. The
136 module will be called iwl3945.
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 93380f97835f..9d6ee836426c 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,35 +1,23 @@
1obj-$(CONFIG_IWLWIFI) += iwlcore.o
2iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
3iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o
4iwlcore-objs += iwl-scan.o iwl-led.o
5iwlcore-$(CONFIG_IWL3945) += iwl-legacy.o
6iwlcore-$(CONFIG_IWL4965) += iwl-legacy.o
7iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
8iwlcore-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
9
10# If 3945 is selected only, iwl-legacy.o will be added
11# to iwlcore-m above, but it needs to be built in.
12iwlcore-objs += $(iwlcore-m)
13
14CFLAGS_iwl-devtrace.o := -I$(src)
15
16# AGN 1# AGN
17obj-$(CONFIG_IWLAGN) += iwlagn.o 2obj-$(CONFIG_IWLAGN) += iwlagn.o
18iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o 3iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o
19iwlagn-objs += iwl-agn-ucode.o iwl-agn-tx.o 4iwlagn-objs += iwl-agn-ucode.o iwl-agn-tx.o
20iwlagn-objs += iwl-agn-lib.o iwl-agn-rx.o iwl-agn-calib.o 5iwlagn-objs += iwl-agn-lib.o iwl-agn-calib.o
21iwlagn-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-eeprom.o 6iwlagn-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-eeprom.o
22iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o
23 7
24iwlagn-$(CONFIG_IWL4965) += iwl-4965.o 8iwlagn-objs += iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
25iwlagn-$(CONFIG_IWL5000) += iwl-agn-rxon.o iwl-agn-hcmd.o iwl-agn-ict.o 9iwlagn-objs += iwl-rx.o iwl-tx.o iwl-sta.o
26iwlagn-$(CONFIG_IWL5000) += iwl-5000.o 10iwlagn-objs += iwl-scan.o iwl-led.o
27iwlagn-$(CONFIG_IWL5000) += iwl-6000.o 11iwlagn-objs += iwl-agn-rxon.o iwl-agn-hcmd.o iwl-agn-ict.o
28iwlagn-$(CONFIG_IWL5000) += iwl-1000.o 12iwlagn-objs += iwl-5000.o
13iwlagn-objs += iwl-6000.o
14iwlagn-objs += iwl-1000.o
15iwlagn-objs += iwl-2000.o
16
17iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o
18iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
19iwlagn-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
29 20
30# 3945 21CFLAGS_iwl-devtrace.o := -I$(src)
31obj-$(CONFIG_IWL3945) += iwl3945.o
32iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
33iwl3945-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-3945-debugfs.o
34 22
35ccflags-y += -D__CHECK_ENDIAN__ 23ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index ba78bc8a259f..27c5007e577c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -49,7 +49,7 @@
49#include "iwl-agn-debugfs.h" 49#include "iwl-agn-debugfs.h"
50 50
51/* Highest firmware API version supported */ 51/* Highest firmware API version supported */
52#define IWL1000_UCODE_API_MAX 3 52#define IWL1000_UCODE_API_MAX 5
53#define IWL100_UCODE_API_MAX 5 53#define IWL100_UCODE_API_MAX 5
54 54
55/* Lowest firmware API version supported */ 55/* Lowest firmware API version supported */
@@ -232,8 +232,6 @@ static struct iwl_lib_ops iwl1000_lib = {
232 .bt_stats_read = iwl_ucode_bt_stats_read, 232 .bt_stats_read = iwl_ucode_bt_stats_read,
233 .reply_tx_error = iwl_reply_tx_error_read, 233 .reply_tx_error = iwl_reply_tx_error_read,
234 }, 234 },
235 .check_plcp_health = iwl_good_plcp_health,
236 .check_ack_health = iwl_good_ack_health,
237 .txfifo_flush = iwlagn_txfifo_flush, 235 .txfifo_flush = iwlagn_txfifo_flush,
238 .dev_txfifo_flush = iwlagn_dev_txfifo_flush, 236 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
239 .tt_ops = { 237 .tt_ops = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
new file mode 100644
index 000000000000..d7b6126408c9
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -0,0 +1,560 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <linux/wireless.h>
36#include <net/mac80211.h>
37#include <linux/etherdevice.h>
38#include <asm/unaligned.h>
39
40#include "iwl-eeprom.h"
41#include "iwl-dev.h"
42#include "iwl-core.h"
43#include "iwl-io.h"
44#include "iwl-sta.h"
45#include "iwl-agn.h"
46#include "iwl-helpers.h"
47#include "iwl-agn-hw.h"
48#include "iwl-6000-hw.h"
49#include "iwl-agn-led.h"
50#include "iwl-agn-debugfs.h"
51
52/* Highest firmware API version supported */
53#define IWL2030_UCODE_API_MAX 5
54#define IWL2000_UCODE_API_MAX 5
55#define IWL200_UCODE_API_MAX 5
56
57/* Lowest firmware API version supported */
58#define IWL2030_UCODE_API_MIN 5
59#define IWL2000_UCODE_API_MIN 5
60#define IWL200_UCODE_API_MIN 5
61
62#define IWL2030_FW_PRE "iwlwifi-2030-"
63#define _IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE #api ".ucode"
64#define IWL2030_MODULE_FIRMWARE(api) _IWL2030_MODULE_FIRMWARE(api)
65
66#define IWL2000_FW_PRE "iwlwifi-2000-"
67#define _IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE #api ".ucode"
68#define IWL2000_MODULE_FIRMWARE(api) _IWL2000_MODULE_FIRMWARE(api)
69
70#define IWL200_FW_PRE "iwlwifi-200-"
71#define _IWL200_MODULE_FIRMWARE(api) IWL200_FW_PRE #api ".ucode"
72#define IWL200_MODULE_FIRMWARE(api) _IWL200_MODULE_FIRMWARE(api)
73
74static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
75{
76 /* want Celsius */
77 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
78 priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
79}
80
81/* NIC configuration for 2000 series */
82static void iwl2000_nic_config(struct iwl_priv *priv)
83{
84 u16 radio_cfg;
85
86 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
87
88 /* write radio config values to register */
89 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX)
90 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
91 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
92 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
93 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
94
95 /* set CSR_HW_CONFIG_REG for uCode use */
96 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
97 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
98 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
99
100 if (priv->cfg->iq_invert)
101 iwl_set_bit(priv, CSR_GP_DRIVER_REG,
102 CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
103
104}
105
106static struct iwl_sensitivity_ranges iwl2000_sensitivity = {
107 .min_nrg_cck = 97,
108 .max_nrg_cck = 0, /* not used, set to 0 */
109 .auto_corr_min_ofdm = 80,
110 .auto_corr_min_ofdm_mrc = 128,
111 .auto_corr_min_ofdm_x1 = 105,
112 .auto_corr_min_ofdm_mrc_x1 = 192,
113
114 .auto_corr_max_ofdm = 145,
115 .auto_corr_max_ofdm_mrc = 232,
116 .auto_corr_max_ofdm_x1 = 110,
117 .auto_corr_max_ofdm_mrc_x1 = 232,
118
119 .auto_corr_min_cck = 125,
120 .auto_corr_max_cck = 175,
121 .auto_corr_min_cck_mrc = 160,
122 .auto_corr_max_cck_mrc = 310,
123 .nrg_th_cck = 97,
124 .nrg_th_ofdm = 100,
125
126 .barker_corr_th_min = 190,
127 .barker_corr_th_min_mrc = 390,
128 .nrg_th_cca = 62,
129};
130
131static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
132{
133 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
134 priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
135 priv->cfg->base_params->num_of_queues =
136 priv->cfg->mod_params->num_of_queues;
137
138 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
139 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
140 priv->hw_params.scd_bc_tbls_size =
141 priv->cfg->base_params->num_of_queues *
142 sizeof(struct iwlagn_scd_bc_tbl);
143 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
144 priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
145 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
146
147 priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
148 priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
149
150 priv->hw_params.max_bsm_size = 0;
151 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
152 BIT(IEEE80211_BAND_5GHZ);
153 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
154
155 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
156 if (priv->cfg->rx_with_siso_diversity)
157 priv->hw_params.rx_chains_num = 1;
158 else
159 priv->hw_params.rx_chains_num =
160 num_of_ant(priv->cfg->valid_rx_ant);
161 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
162 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
163
164 iwl2000_set_ct_threshold(priv);
165
166 /* Set initial sensitivity parameters */
167 /* Set initial calibration set */
168 priv->hw_params.sens = &iwl2000_sensitivity;
169 priv->hw_params.calib_init_cfg =
170 BIT(IWL_CALIB_XTAL) |
171 BIT(IWL_CALIB_LO) |
172 BIT(IWL_CALIB_TX_IQ) |
173 BIT(IWL_CALIB_BASE_BAND);
174 if (priv->cfg->need_dc_calib)
175 priv->hw_params.calib_rt_cfg |= BIT(IWL_CALIB_CFG_DC_IDX);
176 if (priv->cfg->need_temp_offset_calib)
177 priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
178
179 priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
180
181 return 0;
182}
183
184static int iwl2030_hw_channel_switch(struct iwl_priv *priv,
185 struct ieee80211_channel_switch *ch_switch)
186{
187 /*
188 * MULTI-FIXME
189 * See iwl_mac_channel_switch.
190 */
191 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
192 struct iwl6000_channel_switch_cmd cmd;
193 const struct iwl_channel_info *ch_info;
194 u32 switch_time_in_usec, ucode_switch_time;
195 u16 ch;
196 u32 tsf_low;
197 u8 switch_count;
198 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
199 struct ieee80211_vif *vif = ctx->vif;
200 struct iwl_host_cmd hcmd = {
201 .id = REPLY_CHANNEL_SWITCH,
202 .len = sizeof(cmd),
203 .flags = CMD_SYNC,
204 .data = &cmd,
205 };
206
207 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
208 ch = ch_switch->channel->hw_value;
209 IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
210 ctx->active.channel, ch);
211 cmd.channel = cpu_to_le16(ch);
212 cmd.rxon_flags = ctx->staging.flags;
213 cmd.rxon_filter_flags = ctx->staging.filter_flags;
214 switch_count = ch_switch->count;
215 tsf_low = ch_switch->timestamp & 0x0ffffffff;
216 /*
217 * calculate the ucode channel switch time
218 * adding TSF as one of the factor for when to switch
219 */
220 if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
221 if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
222 beacon_interval)) {
223 switch_count -= (priv->ucode_beacon_time -
224 tsf_low) / beacon_interval;
225 } else
226 switch_count = 0;
227 }
228 if (switch_count <= 1)
229 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
230 else {
231 switch_time_in_usec =
232 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
233 ucode_switch_time = iwl_usecs_to_beacons(priv,
234 switch_time_in_usec,
235 beacon_interval);
236 cmd.switch_time = iwl_add_beacon_time(priv,
237 priv->ucode_beacon_time,
238 ucode_switch_time,
239 beacon_interval);
240 }
241 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
242 cmd.switch_time);
243 ch_info = iwl_get_channel_info(priv, priv->band, ch);
244 if (ch_info)
245 cmd.expect_beacon = is_channel_radar(ch_info);
246 else {
247 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
248 ctx->active.channel, ch);
249 return -EFAULT;
250 }
251 priv->switch_rxon.channel = cmd.channel;
252 priv->switch_rxon.switch_in_progress = true;
253
254 return iwl_send_cmd_sync(priv, &hcmd);
255}
256
257static struct iwl_lib_ops iwl2000_lib = {
258 .set_hw_params = iwl2000_hw_set_hw_params,
259 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
260 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
261 .txq_set_sched = iwlagn_txq_set_sched,
262 .txq_agg_enable = iwlagn_txq_agg_enable,
263 .txq_agg_disable = iwlagn_txq_agg_disable,
264 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
265 .txq_free_tfd = iwl_hw_txq_free_tfd,
266 .txq_init = iwl_hw_tx_queue_init,
267 .rx_handler_setup = iwlagn_rx_handler_setup,
268 .setup_deferred_work = iwlagn_bt_setup_deferred_work,
269 .cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
270 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
271 .load_ucode = iwlagn_load_ucode,
272 .dump_nic_event_log = iwl_dump_nic_event_log,
273 .dump_nic_error_log = iwl_dump_nic_error_log,
274 .dump_csr = iwl_dump_csr,
275 .dump_fh = iwl_dump_fh,
276 .init_alive_start = iwlagn_init_alive_start,
277 .alive_notify = iwlagn_alive_notify,
278 .send_tx_power = iwlagn_send_tx_power,
279 .update_chain_flags = iwl_update_chain_flags,
280 .set_channel_switch = iwl2030_hw_channel_switch,
281 .apm_ops = {
282 .init = iwl_apm_init,
283 .config = iwl2000_nic_config,
284 },
285 .eeprom_ops = {
286 .regulatory_bands = {
287 EEPROM_REG_BAND_1_CHANNELS,
288 EEPROM_REG_BAND_2_CHANNELS,
289 EEPROM_REG_BAND_3_CHANNELS,
290 EEPROM_REG_BAND_4_CHANNELS,
291 EEPROM_REG_BAND_5_CHANNELS,
292 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
293 EEPROM_REG_BAND_52_HT40_CHANNELS
294 },
295 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
296 .release_semaphore = iwlcore_eeprom_release_semaphore,
297 .calib_version = iwlagn_eeprom_calib_version,
298 .query_addr = iwlagn_eeprom_query_addr,
299 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
300 },
301 .isr_ops = {
302 .isr = iwl_isr_ict,
303 .free = iwl_free_isr_ict,
304 .alloc = iwl_alloc_isr_ict,
305 .reset = iwl_reset_ict,
306 .disable = iwl_disable_ict,
307 },
308 .temp_ops = {
309 .temperature = iwlagn_temperature,
310 },
311 .debugfs_ops = {
312 .rx_stats_read = iwl_ucode_rx_stats_read,
313 .tx_stats_read = iwl_ucode_tx_stats_read,
314 .general_stats_read = iwl_ucode_general_stats_read,
315 .bt_stats_read = iwl_ucode_bt_stats_read,
316 .reply_tx_error = iwl_reply_tx_error_read,
317 },
318 .txfifo_flush = iwlagn_txfifo_flush,
319 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
320 .tt_ops = {
321 .lower_power_detection = iwl_tt_is_low_power_state,
322 .tt_power_mode = iwl_tt_current_power_mode,
323 .ct_kill_check = iwl_check_for_ct_kill,
324 }
325};
326
327static const struct iwl_ops iwl2000_ops = {
328 .lib = &iwl2000_lib,
329 .hcmd = &iwlagn_hcmd,
330 .utils = &iwlagn_hcmd_utils,
331 .led = &iwlagn_led_ops,
332 .ieee80211_ops = &iwlagn_hw_ops,
333};
334
335static const struct iwl_ops iwl2030_ops = {
336 .lib = &iwl2000_lib,
337 .hcmd = &iwlagn_bt_hcmd,
338 .utils = &iwlagn_hcmd_utils,
339 .led = &iwlagn_led_ops,
340 .ieee80211_ops = &iwlagn_hw_ops,
341};
342
343static const struct iwl_ops iwl200_ops = {
344 .lib = &iwl2000_lib,
345 .hcmd = &iwlagn_hcmd,
346 .utils = &iwlagn_hcmd_utils,
347 .led = &iwlagn_led_ops,
348 .ieee80211_ops = &iwlagn_hw_ops,
349};
350
351static const struct iwl_ops iwl230_ops = {
352 .lib = &iwl2000_lib,
353 .hcmd = &iwlagn_bt_hcmd,
354 .utils = &iwlagn_hcmd_utils,
355 .led = &iwlagn_led_ops,
356 .ieee80211_ops = &iwlagn_hw_ops,
357};
358
359static struct iwl_base_params iwl2000_base_params = {
360 .eeprom_size = OTP_LOW_IMAGE_SIZE,
361 .num_of_queues = IWLAGN_NUM_QUEUES,
362 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
363 .pll_cfg_val = 0,
364 .set_l0s = true,
365 .use_bsm = false,
366 .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
367 .shadow_ram_support = true,
368 .led_compensation = 51,
369 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
370 .adv_thermal_throttle = true,
371 .support_ct_kill_exit = true,
372 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
373 .chain_noise_scale = 1000,
374 .wd_timeout = IWL_DEF_WD_TIMEOUT,
375 .max_event_log_size = 512,
376 .ucode_tracing = true,
377 .sensitivity_calib_by_driver = true,
378 .chain_noise_calib_by_driver = true,
379 .shadow_reg_enable = true,
380};
381
382
383static struct iwl_base_params iwl2030_base_params = {
384 .eeprom_size = OTP_LOW_IMAGE_SIZE,
385 .num_of_queues = IWLAGN_NUM_QUEUES,
386 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
387 .pll_cfg_val = 0,
388 .set_l0s = true,
389 .use_bsm = false,
390 .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
391 .shadow_ram_support = true,
392 .led_compensation = 57,
393 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
394 .adv_thermal_throttle = true,
395 .support_ct_kill_exit = true,
396 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
397 .chain_noise_scale = 1000,
398 .wd_timeout = IWL_LONG_WD_TIMEOUT,
399 .max_event_log_size = 512,
400 .ucode_tracing = true,
401 .sensitivity_calib_by_driver = true,
402 .chain_noise_calib_by_driver = true,
403 .shadow_reg_enable = true,
404};
405
406static struct iwl_ht_params iwl2000_ht_params = {
407 .ht_greenfield_support = true,
408 .use_rts_for_aggregation = true, /* use rts/cts protection */
409};
410
411static struct iwl_bt_params iwl2030_bt_params = {
412 .bt_statistics = true,
413 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
414 .advanced_bt_coexist = true,
415 .agg_time_limit = BT_AGG_THRESHOLD_DEF,
416 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
417 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
418 .bt_sco_disable = true,
419 .bt_session_2 = true,
420};
421
422#define IWL_DEVICE_2000 \
423 .fw_name_pre = IWL2000_FW_PRE, \
424 .ucode_api_max = IWL2000_UCODE_API_MAX, \
425 .ucode_api_min = IWL2000_UCODE_API_MIN, \
426 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
427 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
428 .ops = &iwl2000_ops, \
429 .mod_params = &iwlagn_mod_params, \
430 .base_params = &iwl2000_base_params, \
431 .need_dc_calib = true, \
432 .need_temp_offset_calib = true, \
433 .led_mode = IWL_LED_RF_STATE, \
434 .iq_invert = true \
435
436struct iwl_cfg iwl2000_2bgn_cfg = {
437 .name = "2000 Series 2x2 BGN",
438 IWL_DEVICE_2000,
439 .ht_params = &iwl2000_ht_params,
440};
441
442struct iwl_cfg iwl2000_2bg_cfg = {
443 .name = "2000 Series 2x2 BG",
444 IWL_DEVICE_2000,
445};
446
447#define IWL_DEVICE_2030 \
448 .fw_name_pre = IWL2030_FW_PRE, \
449 .ucode_api_max = IWL2030_UCODE_API_MAX, \
450 .ucode_api_min = IWL2030_UCODE_API_MIN, \
451 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
452 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
453 .ops = &iwl2030_ops, \
454 .mod_params = &iwlagn_mod_params, \
455 .base_params = &iwl2030_base_params, \
456 .bt_params = &iwl2030_bt_params, \
457 .need_dc_calib = true, \
458 .need_temp_offset_calib = true, \
459 .led_mode = IWL_LED_RF_STATE, \
460 .adv_pm = true, \
461 .iq_invert = true \
462
463struct iwl_cfg iwl2030_2bgn_cfg = {
464 .name = "2000 Series 2x2 BGN/BT",
465 IWL_DEVICE_2030,
466 .ht_params = &iwl2000_ht_params,
467};
468
469struct iwl_cfg iwl2030_2bg_cfg = {
470 .name = "2000 Series 2x2 BG/BT",
471 IWL_DEVICE_2030,
472};
473
474#define IWL_DEVICE_6035 \
475 .fw_name_pre = IWL2030_FW_PRE, \
476 .ucode_api_max = IWL2030_UCODE_API_MAX, \
477 .ucode_api_min = IWL2030_UCODE_API_MIN, \
478 .eeprom_ver = EEPROM_6035_EEPROM_VERSION, \
479 .eeprom_calib_ver = EEPROM_6035_TX_POWER_VERSION, \
480 .ops = &iwl2030_ops, \
481 .mod_params = &iwlagn_mod_params, \
482 .base_params = &iwl2030_base_params, \
483 .bt_params = &iwl2030_bt_params, \
484 .need_dc_calib = true, \
485 .need_temp_offset_calib = true, \
486 .led_mode = IWL_LED_RF_STATE, \
487 .adv_pm = true \
488
489struct iwl_cfg iwl6035_2agn_cfg = {
490 .name = "2000 Series 2x2 AGN/BT",
491 IWL_DEVICE_6035,
492 .ht_params = &iwl2000_ht_params,
493};
494
495struct iwl_cfg iwl6035_2abg_cfg = {
496 .name = "2000 Series 2x2 ABG/BT",
497 IWL_DEVICE_6035,
498};
499
500struct iwl_cfg iwl6035_2bg_cfg = {
501 .name = "2000 Series 2x2 BG/BT",
502 IWL_DEVICE_6035,
503};
504
505#define IWL_DEVICE_200 \
506 .fw_name_pre = IWL200_FW_PRE, \
507 .ucode_api_max = IWL200_UCODE_API_MAX, \
508 .ucode_api_min = IWL200_UCODE_API_MIN, \
509 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
510 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
511 .ops = &iwl200_ops, \
512 .mod_params = &iwlagn_mod_params, \
513 .base_params = &iwl2000_base_params, \
514 .need_dc_calib = true, \
515 .need_temp_offset_calib = true, \
516 .led_mode = IWL_LED_RF_STATE, \
517 .adv_pm = true, \
518 .rx_with_siso_diversity = true \
519
520struct iwl_cfg iwl200_bg_cfg = {
521 .name = "200 Series 1x1 BG",
522 IWL_DEVICE_200,
523};
524
525struct iwl_cfg iwl200_bgn_cfg = {
526 .name = "200 Series 1x1 BGN",
527 IWL_DEVICE_200,
528 .ht_params = &iwl2000_ht_params,
529};
530
531#define IWL_DEVICE_230 \
532 .fw_name_pre = IWL200_FW_PRE, \
533 .ucode_api_max = IWL200_UCODE_API_MAX, \
534 .ucode_api_min = IWL200_UCODE_API_MIN, \
535 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
536 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
537 .ops = &iwl230_ops, \
538 .mod_params = &iwlagn_mod_params, \
539 .base_params = &iwl2030_base_params, \
540 .bt_params = &iwl2030_bt_params, \
541 .need_dc_calib = true, \
542 .need_temp_offset_calib = true, \
543 .led_mode = IWL_LED_RF_STATE, \
544 .adv_pm = true, \
545 .rx_with_siso_diversity = true \
546
547struct iwl_cfg iwl230_bg_cfg = {
548 .name = "200 Series 1x1 BG/BT",
549 IWL_DEVICE_230,
550};
551
552struct iwl_cfg iwl230_bgn_cfg = {
553 .name = "200 Series 1x1 BGN/BT",
554 IWL_DEVICE_230,
555 .ht_params = &iwl2000_ht_params,
556};
557
558MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX));
559MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX));
560MODULE_FIRMWARE(IWL200_MODULE_FIRMWARE(IWL200_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 537fb8c84e3a..3ea31b659d1a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -402,8 +402,6 @@ static struct iwl_lib_ops iwl5000_lib = {
402 .bt_stats_read = iwl_ucode_bt_stats_read, 402 .bt_stats_read = iwl_ucode_bt_stats_read,
403 .reply_tx_error = iwl_reply_tx_error_read, 403 .reply_tx_error = iwl_reply_tx_error_read,
404 }, 404 },
405 .check_plcp_health = iwl_good_plcp_health,
406 .check_ack_health = iwl_good_ack_health,
407 .txfifo_flush = iwlagn_txfifo_flush, 405 .txfifo_flush = iwlagn_txfifo_flush,
408 .dev_txfifo_flush = iwlagn_dev_txfifo_flush, 406 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
409 .tt_ops = { 407 .tt_ops = {
@@ -471,8 +469,6 @@ static struct iwl_lib_ops iwl5150_lib = {
471 .bt_stats_read = iwl_ucode_bt_stats_read, 469 .bt_stats_read = iwl_ucode_bt_stats_read,
472 .reply_tx_error = iwl_reply_tx_error_read, 470 .reply_tx_error = iwl_reply_tx_error_read,
473 }, 471 },
474 .check_plcp_health = iwl_good_plcp_health,
475 .check_ack_health = iwl_good_ack_health,
476 .txfifo_flush = iwlagn_txfifo_flush, 472 .txfifo_flush = iwlagn_txfifo_flush,
477 .dev_txfifo_flush = iwlagn_dev_txfifo_flush, 473 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
478 .tt_ops = { 474 .tt_ops = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index ef36aff1bb43..a745b01c0ec1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -67,13 +67,13 @@
67#define _IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE #api ".ucode" 67#define _IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE #api ".ucode"
68#define IWL6050_MODULE_FIRMWARE(api) _IWL6050_MODULE_FIRMWARE(api) 68#define IWL6050_MODULE_FIRMWARE(api) _IWL6050_MODULE_FIRMWARE(api)
69 69
70#define IWL6000G2A_FW_PRE "iwlwifi-6000g2a-" 70#define IWL6005_FW_PRE "iwlwifi-6000g2a-"
71#define _IWL6000G2A_MODULE_FIRMWARE(api) IWL6000G2A_FW_PRE #api ".ucode" 71#define _IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE #api ".ucode"
72#define IWL6000G2A_MODULE_FIRMWARE(api) _IWL6000G2A_MODULE_FIRMWARE(api) 72#define IWL6005_MODULE_FIRMWARE(api) _IWL6005_MODULE_FIRMWARE(api)
73 73
74#define IWL6000G2B_FW_PRE "iwlwifi-6000g2b-" 74#define IWL6030_FW_PRE "iwlwifi-6000g2b-"
75#define _IWL6000G2B_MODULE_FIRMWARE(api) IWL6000G2B_FW_PRE #api ".ucode" 75#define _IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE #api ".ucode"
76#define IWL6000G2B_MODULE_FIRMWARE(api) _IWL6000G2B_MODULE_FIRMWARE(api) 76#define IWL6030_MODULE_FIRMWARE(api) _IWL6030_MODULE_FIRMWARE(api)
77 77
78static void iwl6000_set_ct_threshold(struct iwl_priv *priv) 78static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
79{ 79{
@@ -90,7 +90,7 @@ static void iwl6050_additional_nic_config(struct iwl_priv *priv)
90 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); 90 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
91} 91}
92 92
93static void iwl6050g2_additional_nic_config(struct iwl_priv *priv) 93static void iwl6150_additional_nic_config(struct iwl_priv *priv)
94{ 94{
95 /* Indicate calibration version to uCode. */ 95 /* Indicate calibration version to uCode. */
96 if (priv->cfg->ops->lib->eeprom_ops.calib_version(priv) >= 6) 96 if (priv->cfg->ops->lib->eeprom_ops.calib_version(priv) >= 6)
@@ -343,8 +343,6 @@ static struct iwl_lib_ops iwl6000_lib = {
343 .bt_stats_read = iwl_ucode_bt_stats_read, 343 .bt_stats_read = iwl_ucode_bt_stats_read,
344 .reply_tx_error = iwl_reply_tx_error_read, 344 .reply_tx_error = iwl_reply_tx_error_read,
345 }, 345 },
346 .check_plcp_health = iwl_good_plcp_health,
347 .check_ack_health = iwl_good_ack_health,
348 .txfifo_flush = iwlagn_txfifo_flush, 346 .txfifo_flush = iwlagn_txfifo_flush,
349 .dev_txfifo_flush = iwlagn_dev_txfifo_flush, 347 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
350 .tt_ops = { 348 .tt_ops = {
@@ -354,7 +352,7 @@ static struct iwl_lib_ops iwl6000_lib = {
354 } 352 }
355}; 353};
356 354
357static struct iwl_lib_ops iwl6000g2b_lib = { 355static struct iwl_lib_ops iwl6030_lib = {
358 .set_hw_params = iwl6000_hw_set_hw_params, 356 .set_hw_params = iwl6000_hw_set_hw_params,
359 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl, 357 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
360 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl, 358 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
@@ -415,8 +413,6 @@ static struct iwl_lib_ops iwl6000g2b_lib = {
415 .bt_stats_read = iwl_ucode_bt_stats_read, 413 .bt_stats_read = iwl_ucode_bt_stats_read,
416 .reply_tx_error = iwl_reply_tx_error_read, 414 .reply_tx_error = iwl_reply_tx_error_read,
417 }, 415 },
418 .check_plcp_health = iwl_good_plcp_health,
419 .check_ack_health = iwl_good_ack_health,
420 .txfifo_flush = iwlagn_txfifo_flush, 416 .txfifo_flush = iwlagn_txfifo_flush,
421 .dev_txfifo_flush = iwlagn_dev_txfifo_flush, 417 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
422 .tt_ops = { 418 .tt_ops = {
@@ -430,8 +426,8 @@ static struct iwl_nic_ops iwl6050_nic_ops = {
430 .additional_nic_config = &iwl6050_additional_nic_config, 426 .additional_nic_config = &iwl6050_additional_nic_config,
431}; 427};
432 428
433static struct iwl_nic_ops iwl6050g2_nic_ops = { 429static struct iwl_nic_ops iwl6150_nic_ops = {
434 .additional_nic_config = &iwl6050g2_additional_nic_config, 430 .additional_nic_config = &iwl6150_additional_nic_config,
435}; 431};
436 432
437static const struct iwl_ops iwl6000_ops = { 433static const struct iwl_ops iwl6000_ops = {
@@ -451,17 +447,17 @@ static const struct iwl_ops iwl6050_ops = {
451 .ieee80211_ops = &iwlagn_hw_ops, 447 .ieee80211_ops = &iwlagn_hw_ops,
452}; 448};
453 449
454static const struct iwl_ops iwl6050g2_ops = { 450static const struct iwl_ops iwl6150_ops = {
455 .lib = &iwl6000_lib, 451 .lib = &iwl6000_lib,
456 .hcmd = &iwlagn_hcmd, 452 .hcmd = &iwlagn_hcmd,
457 .utils = &iwlagn_hcmd_utils, 453 .utils = &iwlagn_hcmd_utils,
458 .led = &iwlagn_led_ops, 454 .led = &iwlagn_led_ops,
459 .nic = &iwl6050g2_nic_ops, 455 .nic = &iwl6150_nic_ops,
460 .ieee80211_ops = &iwlagn_hw_ops, 456 .ieee80211_ops = &iwlagn_hw_ops,
461}; 457};
462 458
463static const struct iwl_ops iwl6000g2b_ops = { 459static const struct iwl_ops iwl6030_ops = {
464 .lib = &iwl6000g2b_lib, 460 .lib = &iwl6030_lib,
465 .hcmd = &iwlagn_bt_hcmd, 461 .hcmd = &iwlagn_bt_hcmd,
466 .utils = &iwlagn_hcmd_utils, 462 .utils = &iwlagn_hcmd_utils,
467 .led = &iwlagn_led_ops, 463 .led = &iwlagn_led_ops,
@@ -479,7 +475,6 @@ static struct iwl_base_params iwl6000_base_params = {
479 .shadow_ram_support = true, 475 .shadow_ram_support = true,
480 .led_compensation = 51, 476 .led_compensation = 51,
481 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 477 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
482 .supports_idle = true,
483 .adv_thermal_throttle = true, 478 .adv_thermal_throttle = true,
484 .support_ct_kill_exit = true, 479 .support_ct_kill_exit = true,
485 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 480 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@ -503,7 +498,6 @@ static struct iwl_base_params iwl6050_base_params = {
503 .shadow_ram_support = true, 498 .shadow_ram_support = true,
504 .led_compensation = 51, 499 .led_compensation = 51,
505 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 500 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
506 .supports_idle = true,
507 .adv_thermal_throttle = true, 501 .adv_thermal_throttle = true,
508 .support_ct_kill_exit = true, 502 .support_ct_kill_exit = true,
509 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 503 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@ -526,7 +520,6 @@ static struct iwl_base_params iwl6000_g2_base_params = {
526 .shadow_ram_support = true, 520 .shadow_ram_support = true,
527 .led_compensation = 57, 521 .led_compensation = 57,
528 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 522 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
529 .supports_idle = true,
530 .adv_thermal_throttle = true, 523 .adv_thermal_throttle = true,
531 .support_ct_kill_exit = true, 524 .support_ct_kill_exit = true,
532 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 525 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@ -555,11 +548,11 @@ static struct iwl_bt_params iwl6000_bt_params = {
555}; 548};
556 549
557#define IWL_DEVICE_6005 \ 550#define IWL_DEVICE_6005 \
558 .fw_name_pre = IWL6000G2A_FW_PRE, \ 551 .fw_name_pre = IWL6005_FW_PRE, \
559 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ 552 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
560 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ 553 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
561 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, \ 554 .eeprom_ver = EEPROM_6005_EEPROM_VERSION, \
562 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION, \ 555 .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
563 .ops = &iwl6000_ops, \ 556 .ops = &iwl6000_ops, \
564 .mod_params = &iwlagn_mod_params, \ 557 .mod_params = &iwlagn_mod_params, \
565 .base_params = &iwl6000_g2_base_params, \ 558 .base_params = &iwl6000_g2_base_params, \
@@ -584,12 +577,12 @@ struct iwl_cfg iwl6005_2bg_cfg = {
584}; 577};
585 578
586#define IWL_DEVICE_6030 \ 579#define IWL_DEVICE_6030 \
587 .fw_name_pre = IWL6000G2B_FW_PRE, \ 580 .fw_name_pre = IWL6030_FW_PRE, \
588 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ 581 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
589 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ 582 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
590 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, \ 583 .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \
591 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION, \ 584 .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
592 .ops = &iwl6000g2b_ops, \ 585 .ops = &iwl6030_ops, \
593 .mod_params = &iwlagn_mod_params, \ 586 .mod_params = &iwlagn_mod_params, \
594 .base_params = &iwl6000_g2_base_params, \ 587 .base_params = &iwl6000_g2_base_params, \
595 .bt_params = &iwl6000_bt_params, \ 588 .bt_params = &iwl6000_bt_params, \
@@ -708,9 +701,9 @@ struct iwl_cfg iwl6150_bgn_cfg = {
708 .fw_name_pre = IWL6050_FW_PRE, 701 .fw_name_pre = IWL6050_FW_PRE,
709 .ucode_api_max = IWL6050_UCODE_API_MAX, 702 .ucode_api_max = IWL6050_UCODE_API_MAX,
710 .ucode_api_min = IWL6050_UCODE_API_MIN, 703 .ucode_api_min = IWL6050_UCODE_API_MIN,
711 .eeprom_ver = EEPROM_6050G2_EEPROM_VERSION, 704 .eeprom_ver = EEPROM_6150_EEPROM_VERSION,
712 .eeprom_calib_ver = EEPROM_6050G2_TX_POWER_VERSION, 705 .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION,
713 .ops = &iwl6050g2_ops, 706 .ops = &iwl6150_ops,
714 .mod_params = &iwlagn_mod_params, 707 .mod_params = &iwlagn_mod_params,
715 .base_params = &iwl6050_base_params, 708 .base_params = &iwl6050_base_params,
716 .ht_params = &iwl6000_ht_params, 709 .ht_params = &iwl6000_ht_params,
@@ -736,5 +729,5 @@ struct iwl_cfg iwl6000_3agn_cfg = {
736 729
737MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); 730MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
738MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX)); 731MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
739MODULE_FIRMWARE(IWL6000G2A_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); 732MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
740MODULE_FIRMWARE(IWL6000G2B_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); 733MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
index d16bb5ede014..9006293e740c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -631,8 +631,7 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv, void *resp)
631 } 631 }
632 632
633 spin_lock_irqsave(&priv->lock, flags); 633 spin_lock_irqsave(&priv->lock, flags);
634 if (priv->cfg->bt_params && 634 if (iwl_bt_statistics(priv)) {
635 priv->cfg->bt_params->bt_statistics) {
636 rx_info = &(((struct iwl_bt_notif_statistics *)resp)-> 635 rx_info = &(((struct iwl_bt_notif_statistics *)resp)->
637 rx.general.common); 636 rx.general.common);
638 ofdm = &(((struct iwl_bt_notif_statistics *)resp)->rx.ofdm); 637 ofdm = &(((struct iwl_bt_notif_statistics *)resp)->rx.ofdm);
@@ -897,8 +896,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
897 } 896 }
898 897
899 spin_lock_irqsave(&priv->lock, flags); 898 spin_lock_irqsave(&priv->lock, flags);
900 if (priv->cfg->bt_params && 899 if (iwl_bt_statistics(priv)) {
901 priv->cfg->bt_params->bt_statistics) {
902 rx_info = &(((struct iwl_bt_notif_statistics *)stat_resp)-> 900 rx_info = &(((struct iwl_bt_notif_statistics *)stat_resp)->
903 rx.general.common); 901 rx.general.common);
904 } else { 902 } else {
@@ -913,8 +911,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
913 911
914 rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK); 912 rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
915 rxon_chnum = le16_to_cpu(ctx->staging.channel); 913 rxon_chnum = le16_to_cpu(ctx->staging.channel);
916 if (priv->cfg->bt_params && 914 if (iwl_bt_statistics(priv)) {
917 priv->cfg->bt_params->bt_statistics) {
918 stat_band24 = !!(((struct iwl_bt_notif_statistics *) 915 stat_band24 = !!(((struct iwl_bt_notif_statistics *)
919 stat_resp)->flag & 916 stat_resp)->flag &
920 STATISTICS_REPLY_FLG_BAND_24G_MSK); 917 STATISTICS_REPLY_FLG_BAND_24G_MSK);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
index a6dbd8983dac..b500aaae53ec 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
@@ -39,8 +39,7 @@ static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
39 int p = 0; 39 int p = 0;
40 u32 flag; 40 u32 flag;
41 41
42 if (priv->cfg->bt_params && 42 if (iwl_bt_statistics(priv))
43 priv->cfg->bt_params->bt_statistics)
44 flag = le32_to_cpu(priv->_agn.statistics_bt.flag); 43 flag = le32_to_cpu(priv->_agn.statistics_bt.flag);
45 else 44 else
46 flag = le32_to_cpu(priv->_agn.statistics.flag); 45 flag = le32_to_cpu(priv->_agn.statistics.flag);
@@ -89,8 +88,7 @@ ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
89 * the last statistics notification from uCode 88 * the last statistics notification from uCode
90 * might not reflect the current uCode activity 89 * might not reflect the current uCode activity
91 */ 90 */
92 if (priv->cfg->bt_params && 91 if (iwl_bt_statistics(priv)) {
93 priv->cfg->bt_params->bt_statistics) {
94 ofdm = &priv->_agn.statistics_bt.rx.ofdm; 92 ofdm = &priv->_agn.statistics_bt.rx.ofdm;
95 cck = &priv->_agn.statistics_bt.rx.cck; 93 cck = &priv->_agn.statistics_bt.rx.cck;
96 general = &priv->_agn.statistics_bt.rx.general.common; 94 general = &priv->_agn.statistics_bt.rx.general.common;
@@ -536,8 +534,7 @@ ssize_t iwl_ucode_tx_stats_read(struct file *file,
536 * the last statistics notification from uCode 534 * the last statistics notification from uCode
537 * might not reflect the current uCode activity 535 * might not reflect the current uCode activity
538 */ 536 */
539 if (priv->cfg->bt_params && 537 if (iwl_bt_statistics(priv)) {
540 priv->cfg->bt_params->bt_statistics) {
541 tx = &priv->_agn.statistics_bt.tx; 538 tx = &priv->_agn.statistics_bt.tx;
542 accum_tx = &priv->_agn.accum_statistics_bt.tx; 539 accum_tx = &priv->_agn.accum_statistics_bt.tx;
543 delta_tx = &priv->_agn.delta_statistics_bt.tx; 540 delta_tx = &priv->_agn.delta_statistics_bt.tx;
@@ -737,8 +734,7 @@ ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
737 * the last statistics notification from uCode 734 * the last statistics notification from uCode
738 * might not reflect the current uCode activity 735 * might not reflect the current uCode activity
739 */ 736 */
740 if (priv->cfg->bt_params && 737 if (iwl_bt_statistics(priv)) {
741 priv->cfg->bt_params->bt_statistics) {
742 general = &priv->_agn.statistics_bt.general.common; 738 general = &priv->_agn.statistics_bt.general.common;
743 dbg = &priv->_agn.statistics_bt.general.common.dbg; 739 dbg = &priv->_agn.statistics_bt.general.common.dbg;
744 div = &priv->_agn.statistics_bt.general.common.div; 740 div = &priv->_agn.statistics_bt.general.common.div;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
index 366340f3fb0f..41543ad4cb84 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
@@ -305,7 +305,11 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv)
305 cmd.slots[0].type = 0; /* BSS */ 305 cmd.slots[0].type = 0; /* BSS */
306 cmd.slots[1].type = 1; /* PAN */ 306 cmd.slots[1].type = 1; /* PAN */
307 307
308 if (ctx_bss->vif && ctx_pan->vif) { 308 if (priv->_agn.hw_roc_channel) {
309 /* both contexts must be used for this to happen */
310 slot1 = priv->_agn.hw_roc_duration;
311 slot0 = IWL_MIN_SLOT_TIME;
312 } else if (ctx_bss->vif && ctx_pan->vif) {
309 int bcnint = ctx_pan->vif->bss_conf.beacon_int; 313 int bcnint = ctx_pan->vif->bss_conf.beacon_int;
310 int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1; 314 int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
311 315
@@ -330,12 +334,12 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv)
330 if (test_bit(STATUS_SCAN_HW, &priv->status) || 334 if (test_bit(STATUS_SCAN_HW, &priv->status) ||
331 (!ctx_bss->vif->bss_conf.idle && 335 (!ctx_bss->vif->bss_conf.idle &&
332 !ctx_bss->vif->bss_conf.assoc)) { 336 !ctx_bss->vif->bss_conf.assoc)) {
333 slot0 = dtim * bcnint * 3 - 20; 337 slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
334 slot1 = 20; 338 slot1 = IWL_MIN_SLOT_TIME;
335 } else if (!ctx_pan->vif->bss_conf.idle && 339 } else if (!ctx_pan->vif->bss_conf.idle &&
336 !ctx_pan->vif->bss_conf.assoc) { 340 !ctx_pan->vif->bss_conf.assoc) {
337 slot1 = bcnint * 3 - 20; 341 slot1 = bcnint * 3 - IWL_MIN_SLOT_TIME;
338 slot0 = 20; 342 slot0 = IWL_MIN_SLOT_TIME;
339 } 343 }
340 } else if (ctx_pan->vif) { 344 } else if (ctx_pan->vif) {
341 slot0 = 0; 345 slot0 = 0;
@@ -344,8 +348,8 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv)
344 slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1); 348 slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
345 349
346 if (test_bit(STATUS_SCAN_HW, &priv->status)) { 350 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
347 slot0 = slot1 * 3 - 20; 351 slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME;
348 slot1 = 20; 352 slot1 = IWL_MIN_SLOT_TIME;
349 } 353 }
350 } 354 }
351 355
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.c b/drivers/net/wireless/iwlwifi/iwl-agn-led.c
index 1a24946bc203..c1190d965614 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-led.c
@@ -63,23 +63,11 @@ static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
63} 63}
64 64
65/* Set led register off */ 65/* Set led register off */
66static int iwl_led_on_reg(struct iwl_priv *priv) 66void iwlagn_led_enable(struct iwl_priv *priv)
67{ 67{
68 IWL_DEBUG_LED(priv, "led on\n");
69 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON); 68 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
70 return 0;
71}
72
73/* Set led register off */
74static int iwl_led_off_reg(struct iwl_priv *priv)
75{
76 IWL_DEBUG_LED(priv, "LED Reg off\n");
77 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_OFF);
78 return 0;
79} 69}
80 70
81const struct iwl_led_ops iwlagn_led_ops = { 71const struct iwl_led_ops iwlagn_led_ops = {
82 .cmd = iwl_send_led_cmd, 72 .cmd = iwl_send_led_cmd,
83 .on = iwl_led_on_reg,
84 .off = iwl_led_off_reg,
85}; 73};
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.h b/drivers/net/wireless/iwlwifi/iwl-agn-led.h
index a594e4fdc6b8..96f323dc5dd6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-led.h
@@ -28,5 +28,6 @@
28#define __iwl_agn_led_h__ 28#define __iwl_agn_led_h__
29 29
30extern const struct iwl_led_ops iwlagn_led_ops; 30extern const struct iwl_led_ops iwlagn_led_ops;
31void iwlagn_led_enable(struct iwl_priv *priv);
31 32
32#endif /* __iwl_agn_led_h__ */ 33#endif /* __iwl_agn_led_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 3dee87e8f55d..2003c1d4295f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -473,6 +473,11 @@ void iwlagn_rx_handler_setup(struct iwl_priv *priv)
473 priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] = 473 priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] =
474 iwlagn_rx_calib_complete; 474 iwlagn_rx_calib_complete;
475 priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx; 475 priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
476
477 /* set up notification wait support */
478 spin_lock_init(&priv->_agn.notif_wait_lock);
479 INIT_LIST_HEAD(&priv->_agn.notif_waits);
480 init_waitqueue_head(&priv->_agn.notif_waitq);
476} 481}
477 482
478void iwlagn_setup_deferred_work(struct iwl_priv *priv) 483void iwlagn_setup_deferred_work(struct iwl_priv *priv)
@@ -528,9 +533,10 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
528 533
529void iwlagn_temperature(struct iwl_priv *priv) 534void iwlagn_temperature(struct iwl_priv *priv)
530{ 535{
531 /* store temperature from statistics (in Celsius) */ 536 /* store temperature from correct statistics (in Celsius) */
532 priv->temperature = 537 priv->temperature = le32_to_cpu((iwl_bt_statistics(priv)) ?
533 le32_to_cpu(priv->_agn.statistics.general.common.temperature); 538 priv->_agn.statistics_bt.general.common.temperature :
539 priv->_agn.statistics.general.common.temperature);
534 iwl_tt_handler(priv); 540 iwl_tt_handler(priv);
535} 541}
536 542
@@ -604,6 +610,7 @@ const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv,
604struct iwl_mod_params iwlagn_mod_params = { 610struct iwl_mod_params iwlagn_mod_params = {
605 .amsdu_size_8K = 1, 611 .amsdu_size_8K = 1,
606 .restart_fw = 1, 612 .restart_fw = 1,
613 .plcp_check = true,
607 /* the rest are 0 by default */ 614 /* the rest are 0 by default */
608}; 615};
609 616
@@ -988,240 +995,6 @@ int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
988 return -1; 995 return -1;
989} 996}
990 997
991/* Calc max signal level (dBm) among 3 possible receivers */
992static inline int iwlagn_calc_rssi(struct iwl_priv *priv,
993 struct iwl_rx_phy_res *rx_resp)
994{
995 return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
996}
997
998static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
999{
1000 u32 decrypt_out = 0;
1001
1002 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
1003 RX_RES_STATUS_STATION_FOUND)
1004 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
1005 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
1006
1007 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
1008
1009 /* packet was not encrypted */
1010 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
1011 RX_RES_STATUS_SEC_TYPE_NONE)
1012 return decrypt_out;
1013
1014 /* packet was encrypted with unknown alg */
1015 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
1016 RX_RES_STATUS_SEC_TYPE_ERR)
1017 return decrypt_out;
1018
1019 /* decryption was not done in HW */
1020 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
1021 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
1022 return decrypt_out;
1023
1024 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
1025
1026 case RX_RES_STATUS_SEC_TYPE_CCMP:
1027 /* alg is CCM: check MIC only */
1028 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
1029 /* Bad MIC */
1030 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
1031 else
1032 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
1033
1034 break;
1035
1036 case RX_RES_STATUS_SEC_TYPE_TKIP:
1037 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
1038 /* Bad TTAK */
1039 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
1040 break;
1041 }
1042 /* fall through if TTAK OK */
1043 default:
1044 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
1045 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
1046 else
1047 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
1048 break;
1049 }
1050
1051 IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
1052 decrypt_in, decrypt_out);
1053
1054 return decrypt_out;
1055}
1056
1057static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
1058 struct ieee80211_hdr *hdr,
1059 u16 len,
1060 u32 ampdu_status,
1061 struct iwl_rx_mem_buffer *rxb,
1062 struct ieee80211_rx_status *stats)
1063{
1064 struct sk_buff *skb;
1065 __le16 fc = hdr->frame_control;
1066
1067 /* We only process data packets if the interface is open */
1068 if (unlikely(!priv->is_open)) {
1069 IWL_DEBUG_DROP_LIMIT(priv,
1070 "Dropping packet while interface is not open.\n");
1071 return;
1072 }
1073
1074 /* In case of HW accelerated crypto and bad decryption, drop */
1075 if (!priv->cfg->mod_params->sw_crypto &&
1076 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
1077 return;
1078
1079 skb = dev_alloc_skb(128);
1080 if (!skb) {
1081 IWL_ERR(priv, "dev_alloc_skb failed\n");
1082 return;
1083 }
1084
1085 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
1086
1087 iwl_update_stats(priv, false, fc, len);
1088 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
1089
1090 ieee80211_rx(priv->hw, skb);
1091 priv->alloc_rxb_page--;
1092 rxb->page = NULL;
1093}
1094
1095/* Called for REPLY_RX (legacy ABG frames), or
1096 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
1097void iwlagn_rx_reply_rx(struct iwl_priv *priv,
1098 struct iwl_rx_mem_buffer *rxb)
1099{
1100 struct ieee80211_hdr *header;
1101 struct ieee80211_rx_status rx_status;
1102 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1103 struct iwl_rx_phy_res *phy_res;
1104 __le32 rx_pkt_status;
1105 struct iwl_rx_mpdu_res_start *amsdu;
1106 u32 len;
1107 u32 ampdu_status;
1108 u32 rate_n_flags;
1109
1110 /**
1111 * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
1112 * REPLY_RX: physical layer info is in this buffer
1113 * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
1114 * command and cached in priv->last_phy_res
1115 *
1116 * Here we set up local variables depending on which command is
1117 * received.
1118 */
1119 if (pkt->hdr.cmd == REPLY_RX) {
1120 phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
1121 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
1122 + phy_res->cfg_phy_cnt);
1123
1124 len = le16_to_cpu(phy_res->byte_count);
1125 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
1126 phy_res->cfg_phy_cnt + len);
1127 ampdu_status = le32_to_cpu(rx_pkt_status);
1128 } else {
1129 if (!priv->_agn.last_phy_res_valid) {
1130 IWL_ERR(priv, "MPDU frame without cached PHY data\n");
1131 return;
1132 }
1133 phy_res = &priv->_agn.last_phy_res;
1134 amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
1135 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
1136 len = le16_to_cpu(amsdu->byte_count);
1137 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
1138 ampdu_status = iwlagn_translate_rx_status(priv,
1139 le32_to_cpu(rx_pkt_status));
1140 }
1141
1142 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
1143 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
1144 phy_res->cfg_phy_cnt);
1145 return;
1146 }
1147
1148 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
1149 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
1150 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
1151 le32_to_cpu(rx_pkt_status));
1152 return;
1153 }
1154
1155 /* This will be used in several places later */
1156 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
1157
1158 /* rx_status carries information about the packet to mac80211 */
1159 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
1160 rx_status.freq =
1161 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel));
1162 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
1163 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
1164 rx_status.rate_idx =
1165 iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
1166 rx_status.flag = 0;
1167
1168 /* TSF isn't reliable. In order to allow smooth user experience,
1169 * this W/A doesn't propagate it to the mac80211 */
1170 /*rx_status.flag |= RX_FLAG_TSFT;*/
1171
1172 priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
1173
1174 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
1175 rx_status.signal = iwlagn_calc_rssi(priv, phy_res);
1176
1177 iwl_dbg_log_rx_data_frame(priv, len, header);
1178 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
1179 rx_status.signal, (unsigned long long)rx_status.mactime);
1180
1181 /*
1182 * "antenna number"
1183 *
1184 * It seems that the antenna field in the phy flags value
1185 * is actually a bit field. This is undefined by radiotap,
1186 * it wants an actual antenna number but I always get "7"
1187 * for most legacy frames I receive indicating that the
1188 * same frame was received on all three RX chains.
1189 *
1190 * I think this field should be removed in favor of a
1191 * new 802.11n radiotap field "RX chains" that is defined
1192 * as a bitmask.
1193 */
1194 rx_status.antenna =
1195 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
1196 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
1197
1198 /* set the preamble flag if appropriate */
1199 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
1200 rx_status.flag |= RX_FLAG_SHORTPRE;
1201
1202 /* Set up the HT phy flags */
1203 if (rate_n_flags & RATE_MCS_HT_MSK)
1204 rx_status.flag |= RX_FLAG_HT;
1205 if (rate_n_flags & RATE_MCS_HT40_MSK)
1206 rx_status.flag |= RX_FLAG_40MHZ;
1207 if (rate_n_flags & RATE_MCS_SGI_MSK)
1208 rx_status.flag |= RX_FLAG_SHORT_GI;
1209
1210 iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
1211 rxb, &rx_status);
1212}
1213
1214/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
1215 * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
1216void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
1217 struct iwl_rx_mem_buffer *rxb)
1218{
1219 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1220 priv->_agn.last_phy_res_valid = true;
1221 memcpy(&priv->_agn.last_phy_res, pkt->u.raw,
1222 sizeof(struct iwl_rx_phy_res));
1223}
1224
1225static int iwl_get_single_channel_for_scan(struct iwl_priv *priv, 998static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
1226 struct ieee80211_vif *vif, 999 struct ieee80211_vif *vif,
1227 enum ieee80211_band band, 1000 enum ieee80211_band band,
@@ -1342,6 +1115,18 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
1342 return added; 1115 return added;
1343} 1116}
1344 1117
1118static int iwl_fill_offch_tx(struct iwl_priv *priv, void *data, size_t maxlen)
1119{
1120 struct sk_buff *skb = priv->_agn.offchan_tx_skb;
1121
1122 if (skb->len < maxlen)
1123 maxlen = skb->len;
1124
1125 memcpy(data, skb->data, maxlen);
1126
1127 return maxlen;
1128}
1129
1345int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) 1130int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1346{ 1131{
1347 struct iwl_host_cmd cmd = { 1132 struct iwl_host_cmd cmd = {
@@ -1384,20 +1169,25 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1384 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; 1169 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
1385 scan->quiet_time = IWL_ACTIVE_QUIET_TIME; 1170 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
1386 1171
1387 if (iwl_is_any_associated(priv)) { 1172 if (priv->scan_type != IWL_SCAN_OFFCH_TX &&
1173 iwl_is_any_associated(priv)) {
1388 u16 interval = 0; 1174 u16 interval = 0;
1389 u32 extra; 1175 u32 extra;
1390 u32 suspend_time = 100; 1176 u32 suspend_time = 100;
1391 u32 scan_suspend_time = 100; 1177 u32 scan_suspend_time = 100;
1392 unsigned long flags;
1393 1178
1394 IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); 1179 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
1395 spin_lock_irqsave(&priv->lock, flags); 1180 switch (priv->scan_type) {
1396 if (priv->is_internal_short_scan) 1181 case IWL_SCAN_OFFCH_TX:
1182 WARN_ON(1);
1183 break;
1184 case IWL_SCAN_RADIO_RESET:
1397 interval = 0; 1185 interval = 0;
1398 else 1186 break;
1187 case IWL_SCAN_NORMAL:
1399 interval = vif->bss_conf.beacon_int; 1188 interval = vif->bss_conf.beacon_int;
1400 spin_unlock_irqrestore(&priv->lock, flags); 1189 break;
1190 }
1401 1191
1402 scan->suspend_time = 0; 1192 scan->suspend_time = 0;
1403 scan->max_out_time = cpu_to_le32(200 * 1024); 1193 scan->max_out_time = cpu_to_le32(200 * 1024);
@@ -1410,29 +1200,41 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1410 scan->suspend_time = cpu_to_le32(scan_suspend_time); 1200 scan->suspend_time = cpu_to_le32(scan_suspend_time);
1411 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n", 1201 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
1412 scan_suspend_time, interval); 1202 scan_suspend_time, interval);
1203 } else if (priv->scan_type == IWL_SCAN_OFFCH_TX) {
1204 scan->suspend_time = 0;
1205 scan->max_out_time =
1206 cpu_to_le32(1024 * priv->_agn.offchan_tx_timeout);
1413 } 1207 }
1414 1208
1415 if (priv->is_internal_short_scan) { 1209 switch (priv->scan_type) {
1210 case IWL_SCAN_RADIO_RESET:
1416 IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n"); 1211 IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
1417 } else if (priv->scan_request->n_ssids) { 1212 break;
1418 int i, p = 0; 1213 case IWL_SCAN_NORMAL:
1419 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n"); 1214 if (priv->scan_request->n_ssids) {
1420 for (i = 0; i < priv->scan_request->n_ssids; i++) { 1215 int i, p = 0;
1421 /* always does wildcard anyway */ 1216 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
1422 if (!priv->scan_request->ssids[i].ssid_len) 1217 for (i = 0; i < priv->scan_request->n_ssids; i++) {
1423 continue; 1218 /* always does wildcard anyway */
1424 scan->direct_scan[p].id = WLAN_EID_SSID; 1219 if (!priv->scan_request->ssids[i].ssid_len)
1425 scan->direct_scan[p].len = 1220 continue;
1426 priv->scan_request->ssids[i].ssid_len; 1221 scan->direct_scan[p].id = WLAN_EID_SSID;
1427 memcpy(scan->direct_scan[p].ssid, 1222 scan->direct_scan[p].len =
1428 priv->scan_request->ssids[i].ssid, 1223 priv->scan_request->ssids[i].ssid_len;
1429 priv->scan_request->ssids[i].ssid_len); 1224 memcpy(scan->direct_scan[p].ssid,
1430 n_probes++; 1225 priv->scan_request->ssids[i].ssid,
1431 p++; 1226 priv->scan_request->ssids[i].ssid_len);
1432 } 1227 n_probes++;
1433 is_active = true; 1228 p++;
1434 } else 1229 }
1435 IWL_DEBUG_SCAN(priv, "Start passive scan.\n"); 1230 is_active = true;
1231 } else
1232 IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
1233 break;
1234 case IWL_SCAN_OFFCH_TX:
1235 IWL_DEBUG_SCAN(priv, "Start offchannel TX scan.\n");
1236 break;
1237 }
1436 1238
1437 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; 1239 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
1438 scan->tx_cmd.sta_id = ctx->bcast_sta_id; 1240 scan->tx_cmd.sta_id = ctx->bcast_sta_id;
@@ -1530,38 +1332,77 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1530 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS; 1332 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
1531 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS; 1333 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
1532 scan->rx_chain = cpu_to_le16(rx_chain); 1334 scan->rx_chain = cpu_to_le16(rx_chain);
1533 if (!priv->is_internal_short_scan) { 1335 switch (priv->scan_type) {
1336 case IWL_SCAN_NORMAL:
1534 cmd_len = iwl_fill_probe_req(priv, 1337 cmd_len = iwl_fill_probe_req(priv,
1535 (struct ieee80211_mgmt *)scan->data, 1338 (struct ieee80211_mgmt *)scan->data,
1536 vif->addr, 1339 vif->addr,
1537 priv->scan_request->ie, 1340 priv->scan_request->ie,
1538 priv->scan_request->ie_len, 1341 priv->scan_request->ie_len,
1539 IWL_MAX_SCAN_SIZE - sizeof(*scan)); 1342 IWL_MAX_SCAN_SIZE - sizeof(*scan));
1540 } else { 1343 break;
1344 case IWL_SCAN_RADIO_RESET:
1541 /* use bcast addr, will not be transmitted but must be valid */ 1345 /* use bcast addr, will not be transmitted but must be valid */
1542 cmd_len = iwl_fill_probe_req(priv, 1346 cmd_len = iwl_fill_probe_req(priv,
1543 (struct ieee80211_mgmt *)scan->data, 1347 (struct ieee80211_mgmt *)scan->data,
1544 iwl_bcast_addr, NULL, 0, 1348 iwl_bcast_addr, NULL, 0,
1545 IWL_MAX_SCAN_SIZE - sizeof(*scan)); 1349 IWL_MAX_SCAN_SIZE - sizeof(*scan));
1546 1350 break;
1351 case IWL_SCAN_OFFCH_TX:
1352 cmd_len = iwl_fill_offch_tx(priv, scan->data,
1353 IWL_MAX_SCAN_SIZE
1354 - sizeof(*scan)
1355 - sizeof(struct iwl_scan_channel));
1356 scan->scan_flags |= IWL_SCAN_FLAGS_ACTION_FRAME_TX;
1357 break;
1358 default:
1359 BUG();
1547 } 1360 }
1548 scan->tx_cmd.len = cpu_to_le16(cmd_len); 1361 scan->tx_cmd.len = cpu_to_le16(cmd_len);
1549 1362
1550 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK | 1363 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
1551 RXON_FILTER_BCON_AWARE_MSK); 1364 RXON_FILTER_BCON_AWARE_MSK);
1552 1365
1553 if (priv->is_internal_short_scan) { 1366 switch (priv->scan_type) {
1367 case IWL_SCAN_RADIO_RESET:
1554 scan->channel_count = 1368 scan->channel_count =
1555 iwl_get_single_channel_for_scan(priv, vif, band, 1369 iwl_get_single_channel_for_scan(priv, vif, band,
1556 (void *)&scan->data[le16_to_cpu( 1370 (void *)&scan->data[cmd_len]);
1557 scan->tx_cmd.len)]); 1371 break;
1558 } else { 1372 case IWL_SCAN_NORMAL:
1559 scan->channel_count = 1373 scan->channel_count =
1560 iwl_get_channels_for_scan(priv, vif, band, 1374 iwl_get_channels_for_scan(priv, vif, band,
1561 is_active, n_probes, 1375 is_active, n_probes,
1562 (void *)&scan->data[le16_to_cpu( 1376 (void *)&scan->data[cmd_len]);
1563 scan->tx_cmd.len)]); 1377 break;
1378 case IWL_SCAN_OFFCH_TX: {
1379 struct iwl_scan_channel *scan_ch;
1380
1381 scan->channel_count = 1;
1382
1383 scan_ch = (void *)&scan->data[cmd_len];
1384 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
1385 scan_ch->channel =
1386 cpu_to_le16(priv->_agn.offchan_tx_chan->hw_value);
1387 scan_ch->active_dwell =
1388 cpu_to_le16(priv->_agn.offchan_tx_timeout);
1389 scan_ch->passive_dwell = 0;
1390
1391 /* Set txpower levels to defaults */
1392 scan_ch->dsp_atten = 110;
1393
1394 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
1395 * power level:
1396 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
1397 */
1398 if (priv->_agn.offchan_tx_chan->band == IEEE80211_BAND_5GHZ)
1399 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
1400 else
1401 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
1402 }
1403 break;
1564 } 1404 }
1405
1565 if (scan->channel_count == 0) { 1406 if (scan->channel_count == 0) {
1566 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count); 1407 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
1567 return -EIO; 1408 return -EIO;
@@ -1801,26 +1642,39 @@ static const __le32 iwlagn_concurrent_lookup[12] = {
1801 1642
1802void iwlagn_send_advance_bt_config(struct iwl_priv *priv) 1643void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
1803{ 1644{
1804 struct iwlagn_bt_cmd bt_cmd = { 1645 struct iwl_basic_bt_cmd basic = {
1805 .max_kill = IWLAGN_BT_MAX_KILL_DEFAULT, 1646 .max_kill = IWLAGN_BT_MAX_KILL_DEFAULT,
1806 .bt3_timer_t7_value = IWLAGN_BT3_T7_DEFAULT, 1647 .bt3_timer_t7_value = IWLAGN_BT3_T7_DEFAULT,
1807 .bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT, 1648 .bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT,
1808 .bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT, 1649 .bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT,
1809 }; 1650 };
1651 struct iwl6000_bt_cmd bt_cmd_6000;
1652 struct iwl2000_bt_cmd bt_cmd_2000;
1653 int ret;
1810 1654
1811 BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) != 1655 BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
1812 sizeof(bt_cmd.bt3_lookup_table)); 1656 sizeof(basic.bt3_lookup_table));
1813 1657
1814 if (priv->cfg->bt_params) 1658 if (priv->cfg->bt_params) {
1815 bt_cmd.prio_boost = priv->cfg->bt_params->bt_prio_boost; 1659 if (priv->cfg->bt_params->bt_session_2) {
1816 else 1660 bt_cmd_2000.prio_boost = cpu_to_le32(
1817 bt_cmd.prio_boost = 0; 1661 priv->cfg->bt_params->bt_prio_boost);
1818 bt_cmd.kill_ack_mask = priv->kill_ack_mask; 1662 bt_cmd_2000.tx_prio_boost = 0;
1819 bt_cmd.kill_cts_mask = priv->kill_cts_mask; 1663 bt_cmd_2000.rx_prio_boost = 0;
1664 } else {
1665 bt_cmd_6000.prio_boost =
1666 priv->cfg->bt_params->bt_prio_boost;
1667 bt_cmd_6000.tx_prio_boost = 0;
1668 bt_cmd_6000.rx_prio_boost = 0;
1669 }
1670 } else {
1671 IWL_ERR(priv, "failed to construct BT Coex Config\n");
1672 return;
1673 }
1820 1674
1821 bt_cmd.valid = priv->bt_valid; 1675 basic.kill_ack_mask = priv->kill_ack_mask;
1822 bt_cmd.tx_prio_boost = 0; 1676 basic.kill_cts_mask = priv->kill_cts_mask;
1823 bt_cmd.rx_prio_boost = 0; 1677 basic.valid = priv->bt_valid;
1824 1678
1825 /* 1679 /*
1826 * Configure BT coex mode to "no coexistence" when the 1680 * Configure BT coex mode to "no coexistence" when the
@@ -1829,49 +1683,45 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
1829 * IBSS mode (no proper uCode support for coex then). 1683 * IBSS mode (no proper uCode support for coex then).
1830 */ 1684 */
1831 if (!bt_coex_active || priv->iw_mode == NL80211_IFTYPE_ADHOC) { 1685 if (!bt_coex_active || priv->iw_mode == NL80211_IFTYPE_ADHOC) {
1832 bt_cmd.flags = 0; 1686 basic.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED;
1833 } else { 1687 } else {
1834 bt_cmd.flags = IWLAGN_BT_FLAG_COEX_MODE_3W << 1688 basic.flags = IWLAGN_BT_FLAG_COEX_MODE_3W <<
1835 IWLAGN_BT_FLAG_COEX_MODE_SHIFT; 1689 IWLAGN_BT_FLAG_COEX_MODE_SHIFT;
1836 if (priv->cfg->bt_params && 1690 if (priv->cfg->bt_params &&
1837 priv->cfg->bt_params->bt_sco_disable) 1691 priv->cfg->bt_params->bt_sco_disable)
1838 bt_cmd.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE; 1692 basic.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
1839 1693
1840 if (priv->bt_ch_announce) 1694 if (priv->bt_ch_announce)
1841 bt_cmd.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION; 1695 basic.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION;
1842 IWL_DEBUG_INFO(priv, "BT coex flag: 0X%x\n", bt_cmd.flags); 1696 IWL_DEBUG_INFO(priv, "BT coex flag: 0X%x\n", basic.flags);
1843 } 1697 }
1844 priv->bt_enable_flag = bt_cmd.flags; 1698 priv->bt_enable_flag = basic.flags;
1845 if (priv->bt_full_concurrent) 1699 if (priv->bt_full_concurrent)
1846 memcpy(bt_cmd.bt3_lookup_table, iwlagn_concurrent_lookup, 1700 memcpy(basic.bt3_lookup_table, iwlagn_concurrent_lookup,
1847 sizeof(iwlagn_concurrent_lookup)); 1701 sizeof(iwlagn_concurrent_lookup));
1848 else 1702 else
1849 memcpy(bt_cmd.bt3_lookup_table, iwlagn_def_3w_lookup, 1703 memcpy(basic.bt3_lookup_table, iwlagn_def_3w_lookup,
1850 sizeof(iwlagn_def_3w_lookup)); 1704 sizeof(iwlagn_def_3w_lookup));
1851 1705
1852 IWL_DEBUG_INFO(priv, "BT coex %s in %s mode\n", 1706 IWL_DEBUG_INFO(priv, "BT coex %s in %s mode\n",
1853 bt_cmd.flags ? "active" : "disabled", 1707 basic.flags ? "active" : "disabled",
1854 priv->bt_full_concurrent ? 1708 priv->bt_full_concurrent ?
1855 "full concurrency" : "3-wire"); 1709 "full concurrency" : "3-wire");
1856 1710
1857 if (iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG, sizeof(bt_cmd), &bt_cmd)) 1711 if (priv->cfg->bt_params->bt_session_2) {
1712 memcpy(&bt_cmd_2000.basic, &basic,
1713 sizeof(basic));
1714 ret = iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1715 sizeof(bt_cmd_2000), &bt_cmd_2000);
1716 } else {
1717 memcpy(&bt_cmd_6000.basic, &basic,
1718 sizeof(basic));
1719 ret = iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1720 sizeof(bt_cmd_6000), &bt_cmd_6000);
1721 }
1722 if (ret)
1858 IWL_ERR(priv, "failed to send BT Coex Config\n"); 1723 IWL_ERR(priv, "failed to send BT Coex Config\n");
1859 1724
1860 /*
1861 * When we are doing a restart, need to also reconfigure BT
1862 * SCO to the device. If not doing a restart, bt_sco_active
1863 * will always be false, so there's no need to have an extra
1864 * variable to check for it.
1865 */
1866 if (priv->bt_sco_active) {
1867 struct iwlagn_bt_sco_cmd sco_cmd = { .flags = 0 };
1868
1869 if (priv->bt_sco_active)
1870 sco_cmd.flags |= IWLAGN_BT_SCO_ACTIVE;
1871 if (iwl_send_cmd_pdu(priv, REPLY_BT_COEX_SCO,
1872 sizeof(sco_cmd), &sco_cmd))
1873 IWL_ERR(priv, "failed to send BT SCO command\n");
1874 }
1875} 1725}
1876 1726
1877static void iwlagn_bt_traffic_change_work(struct work_struct *work) 1727static void iwlagn_bt_traffic_change_work(struct work_struct *work)
@@ -1881,6 +1731,11 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
1881 struct iwl_rxon_context *ctx; 1731 struct iwl_rxon_context *ctx;
1882 int smps_request = -1; 1732 int smps_request = -1;
1883 1733
1734 if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
1735 /* bt coex disabled */
1736 return;
1737 }
1738
1884 /* 1739 /*
1885 * Note: bt_traffic_load can be overridden by scan complete and 1740 * Note: bt_traffic_load can be overridden by scan complete and
1886 * coex profile notifications. Ignore that since only bad consequence 1741 * coex profile notifications. Ignore that since only bad consequence
@@ -1991,12 +1846,14 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
1991 (BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >> 1846 (BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >>
1992 BT_UART_MSG_FRAME6DISCOVERABLE_POS); 1847 BT_UART_MSG_FRAME6DISCOVERABLE_POS);
1993 1848
1994 IWL_DEBUG_NOTIF(priv, "Sniff Activity = 0x%X, Inquiry/Page SR Mode = " 1849 IWL_DEBUG_NOTIF(priv, "Sniff Activity = 0x%X, Page = "
1995 "0x%X, Connectable = 0x%X", 1850 "0x%X, Inquiry = 0x%X, Connectable = 0x%X",
1996 (BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >> 1851 (BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >>
1997 BT_UART_MSG_FRAME7SNIFFACTIVITY_POS, 1852 BT_UART_MSG_FRAME7SNIFFACTIVITY_POS,
1998 (BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_MSK & uart_msg->frame7) >> 1853 (BT_UART_MSG_FRAME7PAGE_MSK & uart_msg->frame7) >>
1999 BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS, 1854 BT_UART_MSG_FRAME7PAGE_POS,
1855 (BT_UART_MSG_FRAME7INQUIRY_MSK & uart_msg->frame7) >>
1856 BT_UART_MSG_FRAME7INQUIRY_POS,
2000 (BT_UART_MSG_FRAME7CONNECTABLE_MSK & uart_msg->frame7) >> 1857 (BT_UART_MSG_FRAME7CONNECTABLE_MSK & uart_msg->frame7) >>
2001 BT_UART_MSG_FRAME7CONNECTABLE_POS); 1858 BT_UART_MSG_FRAME7CONNECTABLE_POS);
2002} 1859}
@@ -2032,9 +1889,13 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
2032 unsigned long flags; 1889 unsigned long flags;
2033 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1890 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2034 struct iwl_bt_coex_profile_notif *coex = &pkt->u.bt_coex_profile_notif; 1891 struct iwl_bt_coex_profile_notif *coex = &pkt->u.bt_coex_profile_notif;
2035 struct iwlagn_bt_sco_cmd sco_cmd = { .flags = 0 };
2036 struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg; 1892 struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg;
2037 1893
1894 if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
1895 /* bt coex disabled */
1896 return;
1897 }
1898
2038 IWL_DEBUG_NOTIF(priv, "BT Coex notification:\n"); 1899 IWL_DEBUG_NOTIF(priv, "BT Coex notification:\n");
2039 IWL_DEBUG_NOTIF(priv, " status: %d\n", coex->bt_status); 1900 IWL_DEBUG_NOTIF(priv, " status: %d\n", coex->bt_status);
2040 IWL_DEBUG_NOTIF(priv, " traffic load: %d\n", coex->bt_traffic_load); 1901 IWL_DEBUG_NOTIF(priv, " traffic load: %d\n", coex->bt_traffic_load);
@@ -2063,15 +1924,6 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
2063 queue_work(priv->workqueue, 1924 queue_work(priv->workqueue,
2064 &priv->bt_traffic_change_work); 1925 &priv->bt_traffic_change_work);
2065 } 1926 }
2066 if (priv->bt_sco_active !=
2067 (uart_msg->frame3 & BT_UART_MSG_FRAME3SCOESCO_MSK)) {
2068 priv->bt_sco_active = uart_msg->frame3 &
2069 BT_UART_MSG_FRAME3SCOESCO_MSK;
2070 if (priv->bt_sco_active)
2071 sco_cmd.flags |= IWLAGN_BT_SCO_ACTIVE;
2072 iwl_send_cmd_pdu_async(priv, REPLY_BT_COEX_SCO,
2073 sizeof(sco_cmd), &sco_cmd, NULL);
2074 }
2075 } 1927 }
2076 1928
2077 iwlagn_set_kill_msk(priv, uart_msg); 1929 iwlagn_set_kill_msk(priv, uart_msg);
@@ -2389,3 +2241,44 @@ int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
2389 } 2241 }
2390 return 0; 2242 return 0;
2391} 2243}
2244
2245/* notification wait support */
2246void iwlagn_init_notification_wait(struct iwl_priv *priv,
2247 struct iwl_notification_wait *wait_entry,
2248 void (*fn)(struct iwl_priv *priv,
2249 struct iwl_rx_packet *pkt),
2250 u8 cmd)
2251{
2252 wait_entry->fn = fn;
2253 wait_entry->cmd = cmd;
2254 wait_entry->triggered = false;
2255
2256 spin_lock_bh(&priv->_agn.notif_wait_lock);
2257 list_add(&wait_entry->list, &priv->_agn.notif_waits);
2258 spin_unlock_bh(&priv->_agn.notif_wait_lock);
2259}
2260
2261signed long iwlagn_wait_notification(struct iwl_priv *priv,
2262 struct iwl_notification_wait *wait_entry,
2263 unsigned long timeout)
2264{
2265 int ret;
2266
2267 ret = wait_event_timeout(priv->_agn.notif_waitq,
2268 &wait_entry->triggered,
2269 timeout);
2270
2271 spin_lock_bh(&priv->_agn.notif_wait_lock);
2272 list_del(&wait_entry->list);
2273 spin_unlock_bh(&priv->_agn.notif_wait_lock);
2274
2275 return ret;
2276}
2277
2278void iwlagn_remove_notification(struct iwl_priv *priv,
2279 struct iwl_notification_wait *wait_entry)
2280{
2281 spin_lock_bh(&priv->_agn.notif_wait_lock);
2282 list_del(&wait_entry->list);
2283 spin_unlock_bh(&priv->_agn.notif_wait_lock);
2284}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 75fcd30a7c13..d03b4734c892 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -179,31 +179,31 @@ static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
179}; 179};
180 180
181static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = { 181static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
182 {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */ 182 {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202}, /* Norm */
183 {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */ 183 {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210}, /* SGI */
184 {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */ 184 {0, 0, 0, 0, 47, 0, 91, 133, 171, 242, 305, 334, 362}, /* AGG */
185 {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */ 185 {0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */
186}; 186};
187 187
188static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = { 188static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
189 {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */ 189 {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
190 {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */ 190 {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
191 {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */ 191 {0, 0, 0, 0, 94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */
192 {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */ 192 {0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */
193}; 193};
194 194
195static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = { 195static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
196 {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */ 196 {0, 0, 0, 0, 74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */
197 {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */ 197 {0, 0, 0, 0, 81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */
198 {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */ 198 {0, 0, 0, 0, 89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */
199 {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/ 199 {0, 0, 0, 0, 97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/
200}; 200};
201 201
202static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = { 202static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
203 {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */ 203 {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
204 {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */ 204 {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
205 {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */ 205 {0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */
206 {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */ 206 {0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
207}; 207};
208 208
209static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = { 209static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
@@ -2890,6 +2890,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
2890 u8 ant_toggle_cnt = 0; 2890 u8 ant_toggle_cnt = 0;
2891 u8 use_ht_possible = 1; 2891 u8 use_ht_possible = 1;
2892 u8 valid_tx_ant = 0; 2892 u8 valid_tx_ant = 0;
2893 struct iwl_station_priv *sta_priv =
2894 container_of(lq_sta, struct iwl_station_priv, lq_sta);
2893 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq; 2895 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
2894 2896
2895 /* Override starting rate (index 0) if needed for debug purposes */ 2897 /* Override starting rate (index 0) if needed for debug purposes */
@@ -3008,7 +3010,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
3008 repeat_rate--; 3010 repeat_rate--;
3009 } 3011 }
3010 3012
3011 lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF; 3013 lq_cmd->agg_params.agg_frame_cnt_limit =
3014 sta_priv->max_agg_bufsize ?: LINK_QUAL_AGG_FRAME_LIMIT_DEF;
3012 lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; 3015 lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
3013 3016
3014 lq_cmd->agg_params.agg_time_limit = 3017 lq_cmd->agg_params.agg_time_limit =
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index 75e50d33ecb3..184828c72b31 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -213,6 +213,7 @@ enum {
213 IWL_CCK_BASIC_RATES_MASK) 213 IWL_CCK_BASIC_RATES_MASK)
214 214
215#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1) 215#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
216#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1)
216 217
217#define IWL_INVALID_VALUE -1 218#define IWL_INVALID_VALUE -1
218 219
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index 6d140bd53291..dfdbea6e8f99 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -52,10 +52,14 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
52 struct iwl_rxon_context *ctx, 52 struct iwl_rxon_context *ctx,
53 struct iwl_rxon_cmd *send) 53 struct iwl_rxon_cmd *send)
54{ 54{
55 struct iwl_notification_wait disable_wait;
55 __le32 old_filter = send->filter_flags; 56 __le32 old_filter = send->filter_flags;
56 u8 old_dev_type = send->dev_type; 57 u8 old_dev_type = send->dev_type;
57 int ret; 58 int ret;
58 59
60 iwlagn_init_notification_wait(priv, &disable_wait, NULL,
61 REPLY_WIPAN_DEACTIVATION_COMPLETE);
62
59 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 63 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
60 send->dev_type = RXON_DEV_TYPE_P2P; 64 send->dev_type = RXON_DEV_TYPE_P2P;
61 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, sizeof(*send), send); 65 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, sizeof(*send), send);
@@ -63,11 +67,18 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
63 send->filter_flags = old_filter; 67 send->filter_flags = old_filter;
64 send->dev_type = old_dev_type; 68 send->dev_type = old_dev_type;
65 69
66 if (ret) 70 if (ret) {
67 IWL_ERR(priv, "Error disabling PAN (%d)\n", ret); 71 IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
68 72 iwlagn_remove_notification(priv, &disable_wait);
69 /* FIXME: WAIT FOR PAN DISABLE */ 73 } else {
70 msleep(300); 74 signed long wait_res;
75
76 wait_res = iwlagn_wait_notification(priv, &disable_wait, HZ);
77 if (wait_res == 0) {
78 IWL_ERR(priv, "Timed out waiting for PAN disable\n");
79 ret = -EIO;
80 }
81 }
71 82
72 return ret; 83 return ret;
73} 84}
@@ -145,6 +156,23 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
145 /* always get timestamp with Rx frame */ 156 /* always get timestamp with Rx frame */
146 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK; 157 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
147 158
159 if (ctx->ctxid == IWL_RXON_CTX_PAN && priv->_agn.hw_roc_channel) {
160 struct ieee80211_channel *chan = priv->_agn.hw_roc_channel;
161
162 iwl_set_rxon_channel(priv, chan, ctx);
163 iwl_set_flags_for_band(priv, ctx, chan->band, NULL);
164 ctx->staging.filter_flags |=
165 RXON_FILTER_ASSOC_MSK |
166 RXON_FILTER_PROMISC_MSK |
167 RXON_FILTER_CTL2HOST_MSK;
168 ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
169 new_assoc = true;
170
171 if (memcmp(&ctx->staging, &ctx->active,
172 sizeof(ctx->staging)) == 0)
173 return 0;
174 }
175
148 if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) || 176 if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
149 !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK)) 177 !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
150 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; 178 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
@@ -288,10 +316,9 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
288 * If we issue a new RXON command which required a tune then we must 316 * If we issue a new RXON command which required a tune then we must
289 * send a new TXPOWER command or we won't be able to Tx any frames. 317 * send a new TXPOWER command or we won't be able to Tx any frames.
290 * 318 *
291 * FIXME: which RXON requires a tune? Can we optimise this out in 319 * It's expected we set power here if channel is changing.
292 * some cases?
293 */ 320 */
294 ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true); 321 ret = iwl_set_tx_power(priv, priv->tx_power_next, true);
295 if (ret) { 322 if (ret) {
296 IWL_ERR(priv, "Error sending TX power (%d)\n", ret); 323 IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
297 return ret; 324 return ret;
@@ -444,6 +471,7 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv,
444 struct iwl_rxon_context *tmp; 471 struct iwl_rxon_context *tmp;
445 struct ieee80211_sta *sta; 472 struct ieee80211_sta *sta;
446 struct iwl_ht_config *ht_conf = &priv->current_ht_config; 473 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
474 struct ieee80211_sta_ht_cap *ht_cap;
447 bool need_multiple; 475 bool need_multiple;
448 476
449 lockdep_assert_held(&priv->mutex); 477 lockdep_assert_held(&priv->mutex);
@@ -452,23 +480,7 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv,
452 case NL80211_IFTYPE_STATION: 480 case NL80211_IFTYPE_STATION:
453 rcu_read_lock(); 481 rcu_read_lock();
454 sta = ieee80211_find_sta(vif, bss_conf->bssid); 482 sta = ieee80211_find_sta(vif, bss_conf->bssid);
455 if (sta) { 483 if (!sta) {
456 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
457 int maxstreams;
458
459 maxstreams = (ht_cap->mcs.tx_params &
460 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
461 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
462 maxstreams += 1;
463
464 need_multiple = true;
465
466 if ((ht_cap->mcs.rx_mask[1] == 0) &&
467 (ht_cap->mcs.rx_mask[2] == 0))
468 need_multiple = false;
469 if (maxstreams <= 1)
470 need_multiple = false;
471 } else {
472 /* 484 /*
473 * If at all, this can only happen through a race 485 * If at all, this can only happen through a race
474 * when the AP disconnects us while we're still 486 * when the AP disconnects us while we're still
@@ -476,7 +488,46 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv,
476 * will soon tell us about that. 488 * will soon tell us about that.
477 */ 489 */
478 need_multiple = false; 490 need_multiple = false;
491 rcu_read_unlock();
492 break;
493 }
494
495 ht_cap = &sta->ht_cap;
496
497 need_multiple = true;
498
499 /*
500 * If the peer advertises no support for receiving 2 and 3
501 * stream MCS rates, it can't be transmitting them either.
502 */
503 if (ht_cap->mcs.rx_mask[1] == 0 &&
504 ht_cap->mcs.rx_mask[2] == 0) {
505 need_multiple = false;
506 } else if (!(ht_cap->mcs.tx_params &
507 IEEE80211_HT_MCS_TX_DEFINED)) {
508 /* If it can't TX MCS at all ... */
509 need_multiple = false;
510 } else if (ht_cap->mcs.tx_params &
511 IEEE80211_HT_MCS_TX_RX_DIFF) {
512 int maxstreams;
513
514 /*
515 * But if it can receive them, it might still not
516 * be able to transmit them, which is what we need
517 * to check here -- so check the number of streams
518 * it advertises for TX (if different from RX).
519 */
520
521 maxstreams = (ht_cap->mcs.tx_params &
522 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK);
523 maxstreams >>=
524 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
525 maxstreams += 1;
526
527 if (maxstreams <= 1)
528 need_multiple = false;
479 } 529 }
530
480 rcu_read_unlock(); 531 rcu_read_unlock();
481 break; 532 break;
482 case NL80211_IFTYPE_ADHOC: 533 case NL80211_IFTYPE_ADHOC:
@@ -546,12 +597,10 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
546 597
547 if (changes & BSS_CHANGED_ASSOC) { 598 if (changes & BSS_CHANGED_ASSOC) {
548 if (bss_conf->assoc) { 599 if (bss_conf->assoc) {
549 iwl_led_associate(priv);
550 priv->timestamp = bss_conf->timestamp; 600 priv->timestamp = bss_conf->timestamp;
551 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; 601 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
552 } else { 602 } else {
553 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 603 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
554 iwl_led_disassociate(priv);
555 } 604 }
556 } 605 }
557 606
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 24a11b8f73bc..a709d05c5868 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -539,7 +539,14 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
539 unsigned long flags; 539 unsigned long flags;
540 bool is_agg = false; 540 bool is_agg = false;
541 541
542 if (info->control.vif) 542 /*
543 * If the frame needs to go out off-channel, then
544 * we'll have put the PAN context to that channel,
545 * so make the frame go out there.
546 */
547 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
548 ctx = &priv->contexts[IWL_RXON_CTX_PAN];
549 else if (info->control.vif)
543 ctx = iwl_rxon_ctx_from_vif(info->control.vif); 550 ctx = iwl_rxon_ctx_from_vif(info->control.vif);
544 551
545 spin_lock_irqsave(&priv->lock, flags); 552 spin_lock_irqsave(&priv->lock, flags);
@@ -940,7 +947,7 @@ void iwlagn_txq_ctx_reset(struct iwl_priv *priv)
940 */ 947 */
941void iwlagn_txq_ctx_stop(struct iwl_priv *priv) 948void iwlagn_txq_ctx_stop(struct iwl_priv *priv)
942{ 949{
943 int ch; 950 int ch, txq_id;
944 unsigned long flags; 951 unsigned long flags;
945 952
946 /* Turn off all Tx DMA fifos */ 953 /* Turn off all Tx DMA fifos */
@@ -959,6 +966,16 @@ void iwlagn_txq_ctx_stop(struct iwl_priv *priv)
959 iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG)); 966 iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG));
960 } 967 }
961 spin_unlock_irqrestore(&priv->lock, flags); 968 spin_unlock_irqrestore(&priv->lock, flags);
969
970 if (!priv->txq)
971 return;
972
973 /* Unmap DMA from host system and free skb's */
974 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
975 if (txq_id == priv->cmd_queue)
976 iwl_cmd_queue_unmap(priv);
977 else
978 iwl_tx_queue_unmap(priv, txq_id);
962} 979}
963 980
964/* 981/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
index 24dabcd2a36c..d807e5e2b718 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -308,14 +308,6 @@ void iwlagn_init_alive_start(struct iwl_priv *priv)
308{ 308{
309 int ret = 0; 309 int ret = 0;
310 310
311 /* Check alive response for "valid" sign from uCode */
312 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
313 /* We had an error bringing up the hardware, so take it
314 * all the way back down so we can try again */
315 IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
316 goto restart;
317 }
318
319 /* initialize uCode was loaded... verify inst image. 311 /* initialize uCode was loaded... verify inst image.
320 * This is a paranoid check, because we would not have gotten the 312 * This is a paranoid check, because we would not have gotten the
321 * "initialize" alive if code weren't properly loaded. */ 313 * "initialize" alive if code weren't properly loaded. */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index c1cfd9952e52..581dc9f10273 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -59,6 +59,7 @@
59#include "iwl-sta.h" 59#include "iwl-sta.h"
60#include "iwl-agn-calib.h" 60#include "iwl-agn-calib.h"
61#include "iwl-agn.h" 61#include "iwl-agn.h"
62#include "iwl-agn-led.h"
62 63
63 64
64/****************************************************************************** 65/******************************************************************************
@@ -85,7 +86,6 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION);
85MODULE_VERSION(DRV_VERSION); 86MODULE_VERSION(DRV_VERSION);
86MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); 87MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
87MODULE_LICENSE("GPL"); 88MODULE_LICENSE("GPL");
88MODULE_ALIAS("iwl4965");
89 89
90static int iwlagn_ant_coupling; 90static int iwlagn_ant_coupling;
91static bool iwlagn_bt_ch_announce = 1; 91static bool iwlagn_bt_ch_announce = 1;
@@ -424,47 +424,6 @@ int iwl_hw_tx_queue_init(struct iwl_priv *priv,
424 return 0; 424 return 0;
425} 425}
426 426
427/******************************************************************************
428 *
429 * Generic RX handler implementations
430 *
431 ******************************************************************************/
432static void iwl_rx_reply_alive(struct iwl_priv *priv,
433 struct iwl_rx_mem_buffer *rxb)
434{
435 struct iwl_rx_packet *pkt = rxb_addr(rxb);
436 struct iwl_alive_resp *palive;
437 struct delayed_work *pwork;
438
439 palive = &pkt->u.alive_frame;
440
441 IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
442 "0x%01X 0x%01X\n",
443 palive->is_valid, palive->ver_type,
444 palive->ver_subtype);
445
446 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
447 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
448 memcpy(&priv->card_alive_init,
449 &pkt->u.alive_frame,
450 sizeof(struct iwl_init_alive_resp));
451 pwork = &priv->init_alive_start;
452 } else {
453 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
454 memcpy(&priv->card_alive, &pkt->u.alive_frame,
455 sizeof(struct iwl_alive_resp));
456 pwork = &priv->alive_start;
457 }
458
459 /* We delay the ALIVE response by 5ms to
460 * give the HW RF Kill time to activate... */
461 if (palive->is_valid == UCODE_VALID_OK)
462 queue_delayed_work(priv->workqueue, pwork,
463 msecs_to_jiffies(5));
464 else
465 IWL_WARN(priv, "uCode did not respond OK.\n");
466}
467
468static void iwl_bg_beacon_update(struct work_struct *work) 427static void iwl_bg_beacon_update(struct work_struct *work)
469{ 428{
470 struct iwl_priv *priv = 429 struct iwl_priv *priv =
@@ -699,83 +658,6 @@ static void iwl_bg_ucode_trace(unsigned long data)
699 } 658 }
700} 659}
701 660
702static void iwl_rx_beacon_notif(struct iwl_priv *priv,
703 struct iwl_rx_mem_buffer *rxb)
704{
705 struct iwl_rx_packet *pkt = rxb_addr(rxb);
706 struct iwl4965_beacon_notif *beacon =
707 (struct iwl4965_beacon_notif *)pkt->u.raw;
708#ifdef CONFIG_IWLWIFI_DEBUG
709 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
710
711 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
712 "tsf %d %d rate %d\n",
713 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
714 beacon->beacon_notify_hdr.failure_frame,
715 le32_to_cpu(beacon->ibss_mgr_status),
716 le32_to_cpu(beacon->high_tsf),
717 le32_to_cpu(beacon->low_tsf), rate);
718#endif
719
720 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
721
722 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
723 queue_work(priv->workqueue, &priv->beacon_update);
724}
725
726/* Handle notification from uCode that card's power state is changing
727 * due to software, hardware, or critical temperature RFKILL */
728static void iwl_rx_card_state_notif(struct iwl_priv *priv,
729 struct iwl_rx_mem_buffer *rxb)
730{
731 struct iwl_rx_packet *pkt = rxb_addr(rxb);
732 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
733 unsigned long status = priv->status;
734
735 IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
736 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
737 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
738 (flags & CT_CARD_DISABLED) ?
739 "Reached" : "Not reached");
740
741 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
742 CT_CARD_DISABLED)) {
743
744 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
745 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
746
747 iwl_write_direct32(priv, HBUS_TARG_MBX_C,
748 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
749
750 if (!(flags & RXON_CARD_DISABLED)) {
751 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
752 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
753 iwl_write_direct32(priv, HBUS_TARG_MBX_C,
754 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
755 }
756 if (flags & CT_CARD_DISABLED)
757 iwl_tt_enter_ct_kill(priv);
758 }
759 if (!(flags & CT_CARD_DISABLED))
760 iwl_tt_exit_ct_kill(priv);
761
762 if (flags & HW_CARD_DISABLED)
763 set_bit(STATUS_RF_KILL_HW, &priv->status);
764 else
765 clear_bit(STATUS_RF_KILL_HW, &priv->status);
766
767
768 if (!(flags & RXON_CARD_DISABLED))
769 iwl_scan_cancel(priv);
770
771 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
772 test_bit(STATUS_RF_KILL_HW, &priv->status)))
773 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
774 test_bit(STATUS_RF_KILL_HW, &priv->status));
775 else
776 wake_up_interruptible(&priv->wait_command_queue);
777}
778
779static void iwl_bg_tx_flush(struct work_struct *work) 661static void iwl_bg_tx_flush(struct work_struct *work)
780{ 662{
781 struct iwl_priv *priv = 663 struct iwl_priv *priv =
@@ -795,58 +677,13 @@ static void iwl_bg_tx_flush(struct work_struct *work)
795} 677}
796 678
797/** 679/**
798 * iwl_setup_rx_handlers - Initialize Rx handler callbacks
799 *
800 * Setup the RX handlers for each of the reply types sent from the uCode
801 * to the host.
802 *
803 * This function chains into the hardware specific files for them to setup
804 * any hardware specific handlers as well.
805 */
806static void iwl_setup_rx_handlers(struct iwl_priv *priv)
807{
808 priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive;
809 priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error;
810 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
811 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
812 iwl_rx_spectrum_measure_notif;
813 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
814 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
815 iwl_rx_pm_debug_statistics_notif;
816 priv->rx_handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif;
817
818 /*
819 * The same handler is used for both the REPLY to a discrete
820 * statistics request from the host as well as for the periodic
821 * statistics notifications (after received beacons) from the uCode.
822 */
823 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_reply_statistics;
824 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics;
825
826 iwl_setup_rx_scan_handlers(priv);
827
828 /* status change handler */
829 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif;
830
831 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
832 iwl_rx_missed_beacon_notif;
833 /* Rx handlers */
834 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwlagn_rx_reply_rx_phy;
835 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwlagn_rx_reply_rx;
836 /* block ack */
837 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba;
838 /* Set up hardware specific Rx handlers */
839 priv->cfg->ops->lib->rx_handler_setup(priv);
840}
841
842/**
843 * iwl_rx_handle - Main entry function for receiving responses from uCode 680 * iwl_rx_handle - Main entry function for receiving responses from uCode
844 * 681 *
845 * Uses the priv->rx_handlers callback function array to invoke 682 * Uses the priv->rx_handlers callback function array to invoke
846 * the appropriate handlers, including command responses, 683 * the appropriate handlers, including command responses,
847 * frame-received notifications, and other notifications. 684 * frame-received notifications, and other notifications.
848 */ 685 */
849void iwl_rx_handle(struct iwl_priv *priv) 686static void iwl_rx_handle(struct iwl_priv *priv)
850{ 687{
851 struct iwl_rx_mem_buffer *rxb; 688 struct iwl_rx_mem_buffer *rxb;
852 struct iwl_rx_packet *pkt; 689 struct iwl_rx_packet *pkt;
@@ -910,6 +747,27 @@ void iwl_rx_handle(struct iwl_priv *priv)
910 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && 747 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
911 (pkt->hdr.cmd != REPLY_TX); 748 (pkt->hdr.cmd != REPLY_TX);
912 749
750 /*
751 * Do the notification wait before RX handlers so
752 * even if the RX handler consumes the RXB we have
753 * access to it in the notification wait entry.
754 */
755 if (!list_empty(&priv->_agn.notif_waits)) {
756 struct iwl_notification_wait *w;
757
758 spin_lock(&priv->_agn.notif_wait_lock);
759 list_for_each_entry(w, &priv->_agn.notif_waits, list) {
760 if (w->cmd == pkt->hdr.cmd) {
761 w->triggered = true;
762 if (w->fn)
763 w->fn(priv, pkt);
764 }
765 }
766 spin_unlock(&priv->_agn.notif_wait_lock);
767
768 wake_up_all(&priv->_agn.notif_waitq);
769 }
770
913 /* Based on type of command response or notification, 771 /* Based on type of command response or notification,
914 * handle those that need handling via function in 772 * handle those that need handling via function in
915 * rx_handlers table. See iwl_setup_rx_handlers() */ 773 * rx_handlers table. See iwl_setup_rx_handlers() */
@@ -1379,66 +1237,6 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1379 iwl_enable_rfkill_int(priv); 1237 iwl_enable_rfkill_int(priv);
1380} 1238}
1381 1239
1382/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
1383#define ACK_CNT_RATIO (50)
1384#define BA_TIMEOUT_CNT (5)
1385#define BA_TIMEOUT_MAX (16)
1386
1387/**
1388 * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
1389 *
1390 * When the ACK count ratio is 0 and aggregated BA timeout retries exceeding
1391 * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
1392 * operation state.
1393 */
1394bool iwl_good_ack_health(struct iwl_priv *priv,
1395 struct iwl_rx_packet *pkt)
1396{
1397 bool rc = true;
1398 int actual_ack_cnt_delta, expected_ack_cnt_delta;
1399 int ba_timeout_delta;
1400
1401 actual_ack_cnt_delta =
1402 le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) -
1403 le32_to_cpu(priv->_agn.statistics.tx.actual_ack_cnt);
1404 expected_ack_cnt_delta =
1405 le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) -
1406 le32_to_cpu(priv->_agn.statistics.tx.expected_ack_cnt);
1407 ba_timeout_delta =
1408 le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) -
1409 le32_to_cpu(priv->_agn.statistics.tx.agg.ba_timeout);
1410 if ((priv->_agn.agg_tids_count > 0) &&
1411 (expected_ack_cnt_delta > 0) &&
1412 (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta)
1413 < ACK_CNT_RATIO) &&
1414 (ba_timeout_delta > BA_TIMEOUT_CNT)) {
1415 IWL_DEBUG_RADIO(priv, "actual_ack_cnt delta = %d,"
1416 " expected_ack_cnt = %d\n",
1417 actual_ack_cnt_delta, expected_ack_cnt_delta);
1418
1419#ifdef CONFIG_IWLWIFI_DEBUGFS
1420 /*
1421 * This is ifdef'ed on DEBUGFS because otherwise the
1422 * statistics aren't available. If DEBUGFS is set but
1423 * DEBUG is not, these will just compile out.
1424 */
1425 IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n",
1426 priv->_agn.delta_statistics.tx.rx_detected_cnt);
1427 IWL_DEBUG_RADIO(priv,
1428 "ack_or_ba_timeout_collision delta = %d\n",
1429 priv->_agn.delta_statistics.tx.
1430 ack_or_ba_timeout_collision);
1431#endif
1432 IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n",
1433 ba_timeout_delta);
1434 if (!actual_ack_cnt_delta &&
1435 (ba_timeout_delta >= BA_TIMEOUT_MAX))
1436 rc = false;
1437 }
1438 return rc;
1439}
1440
1441
1442/***************************************************************************** 1240/*****************************************************************************
1443 * 1241 *
1444 * sysfs attributes 1242 * sysfs attributes
@@ -2632,13 +2430,6 @@ static void iwl_alive_start(struct iwl_priv *priv)
2632 2430
2633 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); 2431 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
2634 2432
2635 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
2636 /* We had an error bringing up the hardware, so take it
2637 * all the way back down so we can try again */
2638 IWL_DEBUG_INFO(priv, "Alive failed.\n");
2639 goto restart;
2640 }
2641
2642 /* Initialize uCode has loaded Runtime uCode ... verify inst image. 2433 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
2643 * This is a paranoid check, because we would not have gotten the 2434 * This is a paranoid check, because we would not have gotten the
2644 * "runtime" alive if code weren't properly loaded. */ 2435 * "runtime" alive if code weren't properly loaded. */
@@ -2710,9 +2501,11 @@ static void iwl_alive_start(struct iwl_priv *priv)
2710 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); 2501 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2711 } 2502 }
2712 2503
2713 if (priv->cfg->bt_params && 2504 if (!priv->cfg->bt_params || (priv->cfg->bt_params &&
2714 !priv->cfg->bt_params->advanced_bt_coexist) { 2505 !priv->cfg->bt_params->advanced_bt_coexist)) {
2715 /* Configure Bluetooth device coexistence support */ 2506 /*
2507 * default is 2-wire BT coexexistence support
2508 */
2716 priv->cfg->ops->hcmd->send_bt_config(priv); 2509 priv->cfg->ops->hcmd->send_bt_config(priv);
2717 } 2510 }
2718 2511
@@ -2726,8 +2519,6 @@ static void iwl_alive_start(struct iwl_priv *priv)
2726 /* At this point, the NIC is initialized and operational */ 2519 /* At this point, the NIC is initialized and operational */
2727 iwl_rf_kill_ct_config(priv); 2520 iwl_rf_kill_ct_config(priv);
2728 2521
2729 iwl_leds_init(priv);
2730
2731 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); 2522 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
2732 wake_up_interruptible(&priv->wait_command_queue); 2523 wake_up_interruptible(&priv->wait_command_queue);
2733 2524
@@ -2769,7 +2560,6 @@ static void __iwl_down(struct iwl_priv *priv)
2769 priv->cfg->bt_params->bt_init_traffic_load; 2560 priv->cfg->bt_params->bt_init_traffic_load;
2770 else 2561 else
2771 priv->bt_traffic_load = 0; 2562 priv->bt_traffic_load = 0;
2772 priv->bt_sco_active = false;
2773 priv->bt_full_concurrent = false; 2563 priv->bt_full_concurrent = false;
2774 priv->bt_ci_compliance = 0; 2564 priv->bt_ci_compliance = 0;
2775 2565
@@ -3063,8 +2853,7 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
3063 } 2853 }
3064 2854
3065 if (priv->start_calib) { 2855 if (priv->start_calib) {
3066 if (priv->cfg->bt_params && 2856 if (iwl_bt_statistics(priv)) {
3067 priv->cfg->bt_params->bt_statistics) {
3068 iwl_chain_noise_calibration(priv, 2857 iwl_chain_noise_calibration(priv,
3069 (void *)&priv->_agn.statistics_bt); 2858 (void *)&priv->_agn.statistics_bt);
3070 iwl_sensitivity_calibration(priv, 2859 iwl_sensitivity_calibration(priv,
@@ -3089,7 +2878,7 @@ static void iwl_bg_restart(struct work_struct *data)
3089 2878
3090 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) { 2879 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
3091 struct iwl_rxon_context *ctx; 2880 struct iwl_rxon_context *ctx;
3092 bool bt_sco, bt_full_concurrent; 2881 bool bt_full_concurrent;
3093 u8 bt_ci_compliance; 2882 u8 bt_ci_compliance;
3094 u8 bt_load; 2883 u8 bt_load;
3095 u8 bt_status; 2884 u8 bt_status;
@@ -3108,7 +2897,6 @@ static void iwl_bg_restart(struct work_struct *data)
3108 * re-configure the hw when we reconfigure the BT 2897 * re-configure the hw when we reconfigure the BT
3109 * command. 2898 * command.
3110 */ 2899 */
3111 bt_sco = priv->bt_sco_active;
3112 bt_full_concurrent = priv->bt_full_concurrent; 2900 bt_full_concurrent = priv->bt_full_concurrent;
3113 bt_ci_compliance = priv->bt_ci_compliance; 2901 bt_ci_compliance = priv->bt_ci_compliance;
3114 bt_load = priv->bt_traffic_load; 2902 bt_load = priv->bt_traffic_load;
@@ -3116,7 +2904,6 @@ static void iwl_bg_restart(struct work_struct *data)
3116 2904
3117 __iwl_down(priv); 2905 __iwl_down(priv);
3118 2906
3119 priv->bt_sco_active = bt_sco;
3120 priv->bt_full_concurrent = bt_full_concurrent; 2907 priv->bt_full_concurrent = bt_full_concurrent;
3121 priv->bt_ci_compliance = bt_ci_compliance; 2908 priv->bt_ci_compliance = bt_ci_compliance;
3122 priv->bt_traffic_load = bt_load; 2909 priv->bt_traffic_load = bt_load;
@@ -3150,6 +2937,91 @@ static void iwl_bg_rx_replenish(struct work_struct *data)
3150 mutex_unlock(&priv->mutex); 2937 mutex_unlock(&priv->mutex);
3151} 2938}
3152 2939
2940static int iwl_mac_offchannel_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
2941 struct ieee80211_channel *chan,
2942 enum nl80211_channel_type channel_type,
2943 unsigned int wait)
2944{
2945 struct iwl_priv *priv = hw->priv;
2946 int ret;
2947
2948 /* Not supported if we don't have PAN */
2949 if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN))) {
2950 ret = -EOPNOTSUPP;
2951 goto free;
2952 }
2953
2954 /* Not supported on pre-P2P firmware */
2955 if (!(priv->contexts[IWL_RXON_CTX_PAN].interface_modes &
2956 BIT(NL80211_IFTYPE_P2P_CLIENT))) {
2957 ret = -EOPNOTSUPP;
2958 goto free;
2959 }
2960
2961 mutex_lock(&priv->mutex);
2962
2963 if (!priv->contexts[IWL_RXON_CTX_PAN].is_active) {
2964 /*
2965 * If the PAN context is free, use the normal
2966 * way of doing remain-on-channel offload + TX.
2967 */
2968 ret = 1;
2969 goto out;
2970 }
2971
2972 /* TODO: queue up if scanning? */
2973 if (test_bit(STATUS_SCANNING, &priv->status) ||
2974 priv->_agn.offchan_tx_skb) {
2975 ret = -EBUSY;
2976 goto out;
2977 }
2978
2979 /*
2980 * max_scan_ie_len doesn't include the blank SSID or the header,
2981 * so need to add that again here.
2982 */
2983 if (skb->len > hw->wiphy->max_scan_ie_len + 24 + 2) {
2984 ret = -ENOBUFS;
2985 goto out;
2986 }
2987
2988 priv->_agn.offchan_tx_skb = skb;
2989 priv->_agn.offchan_tx_timeout = wait;
2990 priv->_agn.offchan_tx_chan = chan;
2991
2992 ret = iwl_scan_initiate(priv, priv->contexts[IWL_RXON_CTX_PAN].vif,
2993 IWL_SCAN_OFFCH_TX, chan->band);
2994 if (ret)
2995 priv->_agn.offchan_tx_skb = NULL;
2996 out:
2997 mutex_unlock(&priv->mutex);
2998 free:
2999 if (ret < 0)
3000 kfree_skb(skb);
3001
3002 return ret;
3003}
3004
3005static int iwl_mac_offchannel_tx_cancel_wait(struct ieee80211_hw *hw)
3006{
3007 struct iwl_priv *priv = hw->priv;
3008 int ret;
3009
3010 mutex_lock(&priv->mutex);
3011
3012 if (!priv->_agn.offchan_tx_skb)
3013 return -EINVAL;
3014
3015 priv->_agn.offchan_tx_skb = NULL;
3016
3017 ret = iwl_scan_cancel_timeout(priv, 200);
3018 if (ret)
3019 ret = -EIO;
3020 mutex_unlock(&priv->mutex);
3021
3022 return ret;
3023}
3024
3153/***************************************************************************** 3025/*****************************************************************************
3154 * 3026 *
3155 * mac80211 entry point functions 3027 * mac80211 entry point functions
@@ -3178,6 +3050,8 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
3178 IEEE80211_HW_SPECTRUM_MGMT | 3050 IEEE80211_HW_SPECTRUM_MGMT |
3179 IEEE80211_HW_REPORTS_TX_ACK_STATUS; 3051 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
3180 3052
3053 hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
3054
3181 if (!priv->cfg->base_params->broken_powersave) 3055 if (!priv->cfg->base_params->broken_powersave)
3182 hw->flags |= IEEE80211_HW_SUPPORTS_PS | 3056 hw->flags |= IEEE80211_HW_SUPPORTS_PS |
3183 IEEE80211_HW_SUPPORTS_DYNAMIC_PS; 3057 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
@@ -3194,8 +3068,11 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
3194 hw->wiphy->interface_modes |= ctx->exclusive_interface_modes; 3068 hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
3195 } 3069 }
3196 3070
3071 hw->wiphy->max_remain_on_channel_duration = 1000;
3072
3197 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | 3073 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
3198 WIPHY_FLAG_DISABLE_BEACON_HINTS; 3074 WIPHY_FLAG_DISABLE_BEACON_HINTS |
3075 WIPHY_FLAG_IBSS_RSN;
3199 3076
3200 /* 3077 /*
3201 * For now, disable PS by default because it affects 3078 * For now, disable PS by default because it affects
@@ -3219,6 +3096,8 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
3219 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 3096 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
3220 &priv->bands[IEEE80211_BAND_5GHZ]; 3097 &priv->bands[IEEE80211_BAND_5GHZ];
3221 3098
3099 iwl_leds_init(priv);
3100
3222 ret = ieee80211_register_hw(priv->hw); 3101 ret = ieee80211_register_hw(priv->hw);
3223 if (ret) { 3102 if (ret) {
3224 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); 3103 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
@@ -3263,7 +3142,7 @@ int iwlagn_mac_start(struct ieee80211_hw *hw)
3263 } 3142 }
3264 } 3143 }
3265 3144
3266 iwl_led_start(priv); 3145 iwlagn_led_enable(priv);
3267 3146
3268out: 3147out:
3269 priv->is_open = 1; 3148 priv->is_open = 1;
@@ -3294,7 +3173,7 @@ void iwlagn_mac_stop(struct ieee80211_hw *hw)
3294 IWL_DEBUG_MAC80211(priv, "leave\n"); 3173 IWL_DEBUG_MAC80211(priv, "leave\n");
3295} 3174}
3296 3175
3297int iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 3176void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
3298{ 3177{
3299 struct iwl_priv *priv = hw->priv; 3178 struct iwl_priv *priv = hw->priv;
3300 3179
@@ -3307,7 +3186,6 @@ int iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
3307 dev_kfree_skb_any(skb); 3186 dev_kfree_skb_any(skb);
3308 3187
3309 IWL_DEBUG_MACDUMP(priv, "leave\n"); 3188 IWL_DEBUG_MACDUMP(priv, "leave\n");
3310 return NETDEV_TX_OK;
3311} 3189}
3312 3190
3313void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw, 3191void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
@@ -3345,6 +3223,14 @@ int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3345 return -EOPNOTSUPP; 3223 return -EOPNOTSUPP;
3346 } 3224 }
3347 3225
3226 /*
3227 * To support IBSS RSN, don't program group keys in IBSS, the
3228 * hardware will then not attempt to decrypt the frames.
3229 */
3230 if (vif->type == NL80211_IFTYPE_ADHOC &&
3231 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
3232 return -EOPNOTSUPP;
3233
3348 sta_id = iwl_sta_id_or_broadcast(priv, vif_priv->ctx, sta); 3234 sta_id = iwl_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
3349 if (sta_id == IWL_INVALID_STATION) 3235 if (sta_id == IWL_INVALID_STATION)
3350 return -EINVAL; 3236 return -EINVAL;
@@ -3399,10 +3285,12 @@ int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3399int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, 3285int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
3400 struct ieee80211_vif *vif, 3286 struct ieee80211_vif *vif,
3401 enum ieee80211_ampdu_mlme_action action, 3287 enum ieee80211_ampdu_mlme_action action,
3402 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 3288 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
3289 u8 buf_size)
3403{ 3290{
3404 struct iwl_priv *priv = hw->priv; 3291 struct iwl_priv *priv = hw->priv;
3405 int ret = -EINVAL; 3292 int ret = -EINVAL;
3293 struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
3406 3294
3407 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", 3295 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
3408 sta->addr, tid); 3296 sta->addr, tid);
@@ -3457,11 +3345,28 @@ int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
3457 } 3345 }
3458 break; 3346 break;
3459 case IEEE80211_AMPDU_TX_OPERATIONAL: 3347 case IEEE80211_AMPDU_TX_OPERATIONAL:
3348 /*
3349 * If the limit is 0, then it wasn't initialised yet,
3350 * use the default. We can do that since we take the
3351 * minimum below, and we don't want to go above our
3352 * default due to hardware restrictions.
3353 */
3354 if (sta_priv->max_agg_bufsize == 0)
3355 sta_priv->max_agg_bufsize =
3356 LINK_QUAL_AGG_FRAME_LIMIT_DEF;
3357
3358 /*
3359 * Even though in theory the peer could have different
3360 * aggregation reorder buffer sizes for different sessions,
3361 * our ucode doesn't allow for that and has a global limit
3362 * for each station. Therefore, use the minimum of all the
3363 * aggregation sessions and our default value.
3364 */
3365 sta_priv->max_agg_bufsize =
3366 min(sta_priv->max_agg_bufsize, buf_size);
3367
3460 if (priv->cfg->ht_params && 3368 if (priv->cfg->ht_params &&
3461 priv->cfg->ht_params->use_rts_for_aggregation) { 3369 priv->cfg->ht_params->use_rts_for_aggregation) {
3462 struct iwl_station_priv *sta_priv =
3463 (void *) sta->drv_priv;
3464
3465 /* 3370 /*
3466 * switch to RTS/CTS if it is the prefer protection 3371 * switch to RTS/CTS if it is the prefer protection
3467 * method for HT traffic 3372 * method for HT traffic
@@ -3469,9 +3374,13 @@ int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
3469 3374
3470 sta_priv->lq_sta.lq.general_params.flags |= 3375 sta_priv->lq_sta.lq.general_params.flags |=
3471 LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK; 3376 LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
3472 iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
3473 &sta_priv->lq_sta.lq, CMD_ASYNC, false);
3474 } 3377 }
3378
3379 sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
3380 sta_priv->max_agg_bufsize;
3381
3382 iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
3383 &sta_priv->lq_sta.lq, CMD_ASYNC, false);
3475 ret = 0; 3384 ret = 0;
3476 break; 3385 break;
3477 } 3386 }
@@ -3709,6 +3618,95 @@ done:
3709 IWL_DEBUG_MAC80211(priv, "leave\n"); 3618 IWL_DEBUG_MAC80211(priv, "leave\n");
3710} 3619}
3711 3620
3621static void iwlagn_disable_roc(struct iwl_priv *priv)
3622{
3623 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
3624 struct ieee80211_channel *chan = ACCESS_ONCE(priv->hw->conf.channel);
3625
3626 lockdep_assert_held(&priv->mutex);
3627
3628 if (!ctx->is_active)
3629 return;
3630
3631 ctx->staging.dev_type = RXON_DEV_TYPE_2STA;
3632 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3633 iwl_set_rxon_channel(priv, chan, ctx);
3634 iwl_set_flags_for_band(priv, ctx, chan->band, NULL);
3635
3636 priv->_agn.hw_roc_channel = NULL;
3637
3638 iwlcore_commit_rxon(priv, ctx);
3639
3640 ctx->is_active = false;
3641}
3642
3643static void iwlagn_bg_roc_done(struct work_struct *work)
3644{
3645 struct iwl_priv *priv = container_of(work, struct iwl_priv,
3646 _agn.hw_roc_work.work);
3647
3648 mutex_lock(&priv->mutex);
3649 ieee80211_remain_on_channel_expired(priv->hw);
3650 iwlagn_disable_roc(priv);
3651 mutex_unlock(&priv->mutex);
3652}
3653
3654static int iwl_mac_remain_on_channel(struct ieee80211_hw *hw,
3655 struct ieee80211_channel *channel,
3656 enum nl80211_channel_type channel_type,
3657 int duration)
3658{
3659 struct iwl_priv *priv = hw->priv;
3660 int err = 0;
3661
3662 if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
3663 return -EOPNOTSUPP;
3664
3665 if (!(priv->contexts[IWL_RXON_CTX_PAN].interface_modes &
3666 BIT(NL80211_IFTYPE_P2P_CLIENT)))
3667 return -EOPNOTSUPP;
3668
3669 mutex_lock(&priv->mutex);
3670
3671 if (priv->contexts[IWL_RXON_CTX_PAN].is_active ||
3672 test_bit(STATUS_SCAN_HW, &priv->status)) {
3673 err = -EBUSY;
3674 goto out;
3675 }
3676
3677 priv->contexts[IWL_RXON_CTX_PAN].is_active = true;
3678 priv->_agn.hw_roc_channel = channel;
3679 priv->_agn.hw_roc_chantype = channel_type;
3680 priv->_agn.hw_roc_duration = DIV_ROUND_UP(duration * 1000, 1024);
3681 iwlcore_commit_rxon(priv, &priv->contexts[IWL_RXON_CTX_PAN]);
3682 queue_delayed_work(priv->workqueue, &priv->_agn.hw_roc_work,
3683 msecs_to_jiffies(duration + 20));
3684
3685 msleep(IWL_MIN_SLOT_TIME); /* TU is almost ms */
3686 ieee80211_ready_on_channel(priv->hw);
3687
3688 out:
3689 mutex_unlock(&priv->mutex);
3690
3691 return err;
3692}
3693
3694static int iwl_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
3695{
3696 struct iwl_priv *priv = hw->priv;
3697
3698 if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
3699 return -EOPNOTSUPP;
3700
3701 cancel_delayed_work_sync(&priv->_agn.hw_roc_work);
3702
3703 mutex_lock(&priv->mutex);
3704 iwlagn_disable_roc(priv);
3705 mutex_unlock(&priv->mutex);
3706
3707 return 0;
3708}
3709
3712/***************************************************************************** 3710/*****************************************************************************
3713 * 3711 *
3714 * driver setup and teardown 3712 * driver setup and teardown
@@ -3730,6 +3728,7 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
3730 INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config); 3728 INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config);
3731 INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start); 3729 INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start);
3732 INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start); 3730 INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start);
3731 INIT_DELAYED_WORK(&priv->_agn.hw_roc_work, iwlagn_bg_roc_done);
3733 3732
3734 iwl_setup_scan_deferred_work(priv); 3733 iwl_setup_scan_deferred_work(priv);
3735 3734
@@ -3823,6 +3822,8 @@ static int iwl_init_drv(struct iwl_priv *priv)
3823 priv->force_reset[IWL_FW_RESET].reset_duration = 3822 priv->force_reset[IWL_FW_RESET].reset_duration =
3824 IWL_DELAY_NEXT_FORCE_FW_RELOAD; 3823 IWL_DELAY_NEXT_FORCE_FW_RELOAD;
3825 3824
3825 priv->rx_statistics_jiffies = jiffies;
3826
3826 /* Choose which receivers/antennas to use */ 3827 /* Choose which receivers/antennas to use */
3827 if (priv->cfg->ops->hcmd->set_rxon_chain) 3828 if (priv->cfg->ops->hcmd->set_rxon_chain)
3828 priv->cfg->ops->hcmd->set_rxon_chain(priv, 3829 priv->cfg->ops->hcmd->set_rxon_chain(priv,
@@ -3876,7 +3877,6 @@ static void iwl_uninit_drv(struct iwl_priv *priv)
3876 kfree(priv->scan_cmd); 3877 kfree(priv->scan_cmd);
3877} 3878}
3878 3879
3879#ifdef CONFIG_IWL5000
3880struct ieee80211_ops iwlagn_hw_ops = { 3880struct ieee80211_ops iwlagn_hw_ops = {
3881 .tx = iwlagn_mac_tx, 3881 .tx = iwlagn_mac_tx,
3882 .start = iwlagn_mac_start, 3882 .start = iwlagn_mac_start,
@@ -3898,14 +3898,17 @@ struct ieee80211_ops iwlagn_hw_ops = {
3898 .channel_switch = iwlagn_mac_channel_switch, 3898 .channel_switch = iwlagn_mac_channel_switch,
3899 .flush = iwlagn_mac_flush, 3899 .flush = iwlagn_mac_flush,
3900 .tx_last_beacon = iwl_mac_tx_last_beacon, 3900 .tx_last_beacon = iwl_mac_tx_last_beacon,
3901 .remain_on_channel = iwl_mac_remain_on_channel,
3902 .cancel_remain_on_channel = iwl_mac_cancel_remain_on_channel,
3903 .offchannel_tx = iwl_mac_offchannel_tx,
3904 .offchannel_tx_cancel_wait = iwl_mac_offchannel_tx_cancel_wait,
3901}; 3905};
3902#endif
3903 3906
3904static void iwl_hw_detect(struct iwl_priv *priv) 3907static void iwl_hw_detect(struct iwl_priv *priv)
3905{ 3908{
3906 priv->hw_rev = _iwl_read32(priv, CSR_HW_REV); 3909 priv->hw_rev = _iwl_read32(priv, CSR_HW_REV);
3907 priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG); 3910 priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG);
3908 pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id); 3911 priv->rev_id = priv->pci_dev->revision;
3909 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id); 3912 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
3910} 3913}
3911 3914
@@ -3967,12 +3970,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3967 if (cfg->mod_params->disable_hw_scan) { 3970 if (cfg->mod_params->disable_hw_scan) {
3968 dev_printk(KERN_DEBUG, &(pdev->dev), 3971 dev_printk(KERN_DEBUG, &(pdev->dev),
3969 "sw scan support is deprecated\n"); 3972 "sw scan support is deprecated\n");
3970#ifdef CONFIG_IWL5000
3971 iwlagn_hw_ops.hw_scan = NULL; 3973 iwlagn_hw_ops.hw_scan = NULL;
3972#endif
3973#ifdef CONFIG_IWL4965
3974 iwl4965_hw_ops.hw_scan = NULL;
3975#endif
3976 } 3974 }
3977 3975
3978 hw = iwl_alloc_all(cfg); 3976 hw = iwl_alloc_all(cfg);
@@ -4025,6 +4023,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4025 priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE; 4023 priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE;
4026 priv->contexts[IWL_RXON_CTX_PAN].interface_modes = 4024 priv->contexts[IWL_RXON_CTX_PAN].interface_modes =
4027 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP); 4025 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP);
4026#ifdef CONFIG_IWL_P2P
4027 priv->contexts[IWL_RXON_CTX_PAN].interface_modes |=
4028 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
4029#endif
4028 priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP; 4030 priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
4029 priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA; 4031 priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
4030 priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P; 4032 priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
@@ -4272,6 +4274,9 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
4272 * we need to set STATUS_EXIT_PENDING bit. 4274 * we need to set STATUS_EXIT_PENDING bit.
4273 */ 4275 */
4274 set_bit(STATUS_EXIT_PENDING, &priv->status); 4276 set_bit(STATUS_EXIT_PENDING, &priv->status);
4277
4278 iwl_leds_exit(priv);
4279
4275 if (priv->mac80211_registered) { 4280 if (priv->mac80211_registered) {
4276 ieee80211_unregister_hw(priv->hw); 4281 ieee80211_unregister_hw(priv->hw);
4277 priv->mac80211_registered = 0; 4282 priv->mac80211_registered = 0;
@@ -4344,12 +4349,6 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
4344 4349
4345/* Hardware specific file defines the PCI IDs table for that hardware module */ 4350/* Hardware specific file defines the PCI IDs table for that hardware module */
4346static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { 4351static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
4347#ifdef CONFIG_IWL4965
4348 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)},
4349 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)},
4350#endif /* CONFIG_IWL4965 */
4351#ifdef CONFIG_IWL5000
4352/* 5100 Series WiFi */
4353 {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */ 4352 {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */
4354 {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */ 4353 {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */
4355 {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */ 4354 {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */
@@ -4492,7 +4491,48 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
4492 {IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)}, 4491 {IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)},
4493 {IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)}, 4492 {IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)},
4494 4493
4495#endif /* CONFIG_IWL5000 */ 4494/* 2x00 Series */
4495 {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)},
4496 {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)},
4497 {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)},
4498 {IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)},
4499 {IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)},
4500 {IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)},
4501
4502/* 2x30 Series */
4503 {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
4504 {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)},
4505 {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)},
4506 {IWL_PCI_DEVICE(0x0887, 0x4066, iwl2030_2bg_cfg)},
4507 {IWL_PCI_DEVICE(0x0888, 0x4266, iwl2030_2bg_cfg)},
4508 {IWL_PCI_DEVICE(0x0887, 0x4466, iwl2030_2bg_cfg)},
4509
4510/* 6x35 Series */
4511 {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
4512 {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
4513 {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
4514 {IWL_PCI_DEVICE(0x088E, 0x4064, iwl6035_2abg_cfg)},
4515 {IWL_PCI_DEVICE(0x088F, 0x4264, iwl6035_2abg_cfg)},
4516 {IWL_PCI_DEVICE(0x088E, 0x4464, iwl6035_2abg_cfg)},
4517 {IWL_PCI_DEVICE(0x088E, 0x4066, iwl6035_2bg_cfg)},
4518 {IWL_PCI_DEVICE(0x088F, 0x4266, iwl6035_2bg_cfg)},
4519 {IWL_PCI_DEVICE(0x088E, 0x4466, iwl6035_2bg_cfg)},
4520
4521/* 200 Series */
4522 {IWL_PCI_DEVICE(0x0894, 0x0022, iwl200_bgn_cfg)},
4523 {IWL_PCI_DEVICE(0x0895, 0x0222, iwl200_bgn_cfg)},
4524 {IWL_PCI_DEVICE(0x0894, 0x0422, iwl200_bgn_cfg)},
4525 {IWL_PCI_DEVICE(0x0894, 0x0026, iwl200_bg_cfg)},
4526 {IWL_PCI_DEVICE(0x0895, 0x0226, iwl200_bg_cfg)},
4527 {IWL_PCI_DEVICE(0x0894, 0x0426, iwl200_bg_cfg)},
4528
4529/* 230 Series */
4530 {IWL_PCI_DEVICE(0x0892, 0x0062, iwl230_bgn_cfg)},
4531 {IWL_PCI_DEVICE(0x0893, 0x0262, iwl230_bgn_cfg)},
4532 {IWL_PCI_DEVICE(0x0892, 0x0462, iwl230_bgn_cfg)},
4533 {IWL_PCI_DEVICE(0x0892, 0x0066, iwl230_bg_cfg)},
4534 {IWL_PCI_DEVICE(0x0893, 0x0266, iwl230_bg_cfg)},
4535 {IWL_PCI_DEVICE(0x0892, 0x0466, iwl230_bg_cfg)},
4496 4536
4497 {0} 4537 {0}
4498}; 4538};
@@ -4592,3 +4632,9 @@ MODULE_PARM_DESC(antenna_coupling,
4592module_param_named(bt_ch_inhibition, iwlagn_bt_ch_announce, bool, S_IRUGO); 4632module_param_named(bt_ch_inhibition, iwlagn_bt_ch_announce, bool, S_IRUGO);
4593MODULE_PARM_DESC(bt_ch_inhibition, 4633MODULE_PARM_DESC(bt_ch_inhibition,
4594 "Disable BT channel inhibition (default: enable)"); 4634 "Disable BT channel inhibition (default: enable)");
4635
4636module_param_named(plcp_check, iwlagn_mod_params.plcp_check, bool, S_IRUGO);
4637MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
4638
4639module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO);
4640MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index da303585f801..20f8e4188994 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -96,6 +96,17 @@ extern struct iwl_cfg iwl100_bgn_cfg;
96extern struct iwl_cfg iwl100_bg_cfg; 96extern struct iwl_cfg iwl100_bg_cfg;
97extern struct iwl_cfg iwl130_bgn_cfg; 97extern struct iwl_cfg iwl130_bgn_cfg;
98extern struct iwl_cfg iwl130_bg_cfg; 98extern struct iwl_cfg iwl130_bg_cfg;
99extern struct iwl_cfg iwl2000_2bgn_cfg;
100extern struct iwl_cfg iwl2000_2bg_cfg;
101extern struct iwl_cfg iwl2030_2bgn_cfg;
102extern struct iwl_cfg iwl2030_2bg_cfg;
103extern struct iwl_cfg iwl6035_2agn_cfg;
104extern struct iwl_cfg iwl6035_2abg_cfg;
105extern struct iwl_cfg iwl6035_2bg_cfg;
106extern struct iwl_cfg iwl200_bg_cfg;
107extern struct iwl_cfg iwl200_bgn_cfg;
108extern struct iwl_cfg iwl230_bg_cfg;
109extern struct iwl_cfg iwl230_bgn_cfg;
99 110
100extern struct iwl_mod_params iwlagn_mod_params; 111extern struct iwl_mod_params iwlagn_mod_params;
101extern struct iwl_hcmd_ops iwlagn_hcmd; 112extern struct iwl_hcmd_ops iwlagn_hcmd;
@@ -110,8 +121,6 @@ void iwl_disable_ict(struct iwl_priv *priv);
110int iwl_alloc_isr_ict(struct iwl_priv *priv); 121int iwl_alloc_isr_ict(struct iwl_priv *priv);
111void iwl_free_isr_ict(struct iwl_priv *priv); 122void iwl_free_isr_ict(struct iwl_priv *priv);
112irqreturn_t iwl_isr_ict(int irq, void *data); 123irqreturn_t iwl_isr_ict(int irq, void *data);
113bool iwl_good_ack_health(struct iwl_priv *priv,
114 struct iwl_rx_packet *pkt);
115 124
116/* tx queue */ 125/* tx queue */
117void iwlagn_set_wr_ptrs(struct iwl_priv *priv, 126void iwlagn_set_wr_ptrs(struct iwl_priv *priv,
@@ -181,11 +190,7 @@ void iwlagn_rx_replenish_now(struct iwl_priv *priv);
181void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq); 190void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
182int iwlagn_rxq_stop(struct iwl_priv *priv); 191int iwlagn_rxq_stop(struct iwl_priv *priv);
183int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band); 192int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
184void iwlagn_rx_reply_rx(struct iwl_priv *priv, 193void iwl_setup_rx_handlers(struct iwl_priv *priv);
185 struct iwl_rx_mem_buffer *rxb);
186void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
187 struct iwl_rx_mem_buffer *rxb);
188void iwl_rx_handle(struct iwl_priv *priv);
189 194
190/* tx */ 195/* tx */
191void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); 196void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
@@ -235,16 +240,6 @@ static inline bool iwl_is_tx_success(u32 status)
235 240
236u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid); 241u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
237 242
238/* rx */
239void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
240 struct iwl_rx_mem_buffer *rxb);
241bool iwl_good_plcp_health(struct iwl_priv *priv,
242 struct iwl_rx_packet *pkt);
243void iwl_rx_statistics(struct iwl_priv *priv,
244 struct iwl_rx_mem_buffer *rxb);
245void iwl_reply_statistics(struct iwl_priv *priv,
246 struct iwl_rx_mem_buffer *rxb);
247
248/* scan */ 243/* scan */
249int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif); 244int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
250void iwlagn_post_scan(struct iwl_priv *priv); 245void iwlagn_post_scan(struct iwl_priv *priv);
@@ -330,8 +325,23 @@ void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
330int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv); 325int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv);
331void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv); 326void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv);
332 327
328/* notification wait support */
329void __acquires(wait_entry)
330iwlagn_init_notification_wait(struct iwl_priv *priv,
331 struct iwl_notification_wait *wait_entry,
332 void (*fn)(struct iwl_priv *priv,
333 struct iwl_rx_packet *pkt),
334 u8 cmd);
335signed long __releases(wait_entry)
336iwlagn_wait_notification(struct iwl_priv *priv,
337 struct iwl_notification_wait *wait_entry,
338 unsigned long timeout);
339void __releases(wait_entry)
340iwlagn_remove_notification(struct iwl_priv *priv,
341 struct iwl_notification_wait *wait_entry);
342
333/* mac80211 handlers (for 4965) */ 343/* mac80211 handlers (for 4965) */
334int iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb); 344void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
335int iwlagn_mac_start(struct ieee80211_hw *hw); 345int iwlagn_mac_start(struct ieee80211_hw *hw);
336void iwlagn_mac_stop(struct ieee80211_hw *hw); 346void iwlagn_mac_stop(struct ieee80211_hw *hw);
337void iwlagn_configure_filter(struct ieee80211_hw *hw, 347void iwlagn_configure_filter(struct ieee80211_hw *hw,
@@ -349,7 +359,8 @@ void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
349int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, 359int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
350 struct ieee80211_vif *vif, 360 struct ieee80211_vif *vif,
351 enum ieee80211_ampdu_mlme_action action, 361 enum ieee80211_ampdu_mlme_action action,
352 struct ieee80211_sta *sta, u16 tid, u16 *ssn); 362 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
363 u8 buf_size);
353int iwlagn_mac_sta_add(struct ieee80211_hw *hw, 364int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
354 struct ieee80211_vif *vif, 365 struct ieee80211_vif *vif,
355 struct ieee80211_sta *sta); 366 struct ieee80211_sta *sta);
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index f893d4a6aa87..ca42ffa63ed7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -178,7 +178,6 @@ enum {
178 REPLY_BT_COEX_PRIO_TABLE = 0xcc, 178 REPLY_BT_COEX_PRIO_TABLE = 0xcc,
179 REPLY_BT_COEX_PROT_ENV = 0xcd, 179 REPLY_BT_COEX_PROT_ENV = 0xcd,
180 REPLY_BT_COEX_PROFILE_NOTIF = 0xce, 180 REPLY_BT_COEX_PROFILE_NOTIF = 0xce,
181 REPLY_BT_COEX_SCO = 0xcf,
182 181
183 /* PAN commands */ 182 /* PAN commands */
184 REPLY_WIPAN_PARAMS = 0xb2, 183 REPLY_WIPAN_PARAMS = 0xb2,
@@ -189,6 +188,7 @@ enum {
189 REPLY_WIPAN_WEPKEY = 0xb8, /* use REPLY_WEPKEY structure */ 188 REPLY_WIPAN_WEPKEY = 0xb8, /* use REPLY_WEPKEY structure */
190 REPLY_WIPAN_P2P_CHANNEL_SWITCH = 0xb9, 189 REPLY_WIPAN_P2P_CHANNEL_SWITCH = 0xb9,
191 REPLY_WIPAN_NOA_NOTIFICATION = 0xbc, 190 REPLY_WIPAN_NOA_NOTIFICATION = 0xbc,
191 REPLY_WIPAN_DEACTIVATION_COMPLETE = 0xbd,
192 192
193 REPLY_MAX = 0xff 193 REPLY_MAX = 0xff
194}; 194};
@@ -2477,7 +2477,7 @@ struct iwl_bt_cmd {
2477 IWLAGN_BT_VALID_BT4_TIMES | \ 2477 IWLAGN_BT_VALID_BT4_TIMES | \
2478 IWLAGN_BT_VALID_3W_LUT) 2478 IWLAGN_BT_VALID_3W_LUT)
2479 2479
2480struct iwlagn_bt_cmd { 2480struct iwl_basic_bt_cmd {
2481 u8 flags; 2481 u8 flags;
2482 u8 ledtime; /* unused */ 2482 u8 ledtime; /* unused */
2483 u8 max_kill; 2483 u8 max_kill;
@@ -2490,6 +2490,10 @@ struct iwlagn_bt_cmd {
2490 __le32 bt3_lookup_table[12]; 2490 __le32 bt3_lookup_table[12];
2491 __le16 bt4_decision_time; /* unused */ 2491 __le16 bt4_decision_time; /* unused */
2492 __le16 valid; 2492 __le16 valid;
2493};
2494
2495struct iwl6000_bt_cmd {
2496 struct iwl_basic_bt_cmd basic;
2493 u8 prio_boost; 2497 u8 prio_boost;
2494 /* 2498 /*
2495 * set IWLAGN_BT_VALID_BOOST to "1" in "valid" bitmask 2499 * set IWLAGN_BT_VALID_BOOST to "1" in "valid" bitmask
@@ -2499,6 +2503,18 @@ struct iwlagn_bt_cmd {
2499 __le16 rx_prio_boost; /* SW boost of WiFi rx priority */ 2503 __le16 rx_prio_boost; /* SW boost of WiFi rx priority */
2500}; 2504};
2501 2505
2506struct iwl2000_bt_cmd {
2507 struct iwl_basic_bt_cmd basic;
2508 __le32 prio_boost;
2509 /*
2510 * set IWLAGN_BT_VALID_BOOST to "1" in "valid" bitmask
2511 * if configure the following patterns
2512 */
2513 u8 reserved;
2514 u8 tx_prio_boost; /* SW boost of WiFi tx priority */
2515 __le16 rx_prio_boost; /* SW boost of WiFi rx priority */
2516};
2517
2502#define IWLAGN_BT_SCO_ACTIVE cpu_to_le32(BIT(0)) 2518#define IWLAGN_BT_SCO_ACTIVE cpu_to_le32(BIT(0))
2503 2519
2504struct iwlagn_bt_sco_cmd { 2520struct iwlagn_bt_sco_cmd {
@@ -2948,9 +2964,15 @@ struct iwl3945_scan_cmd {
2948 u8 data[0]; 2964 u8 data[0];
2949} __packed; 2965} __packed;
2950 2966
2967enum iwl_scan_flags {
2968 /* BIT(0) currently unused */
2969 IWL_SCAN_FLAGS_ACTION_FRAME_TX = BIT(1),
2970 /* bits 2-7 reserved */
2971};
2972
2951struct iwl_scan_cmd { 2973struct iwl_scan_cmd {
2952 __le16 len; 2974 __le16 len;
2953 u8 reserved0; 2975 u8 scan_flags; /* scan flags: see enum iwl_scan_flags */
2954 u8 channel_count; /* # channels in channel list */ 2976 u8 channel_count; /* # channels in channel list */
2955 __le16 quiet_time; /* dwell only this # millisecs on quiet channel 2977 __le16 quiet_time; /* dwell only this # millisecs on quiet channel
2956 * (only for active scan) */ 2978 * (only for active scan) */
@@ -3082,6 +3104,13 @@ struct iwl4965_beacon_notif {
3082 __le32 ibss_mgr_status; 3104 __le32 ibss_mgr_status;
3083} __packed; 3105} __packed;
3084 3106
3107struct iwlagn_beacon_notif {
3108 struct iwlagn_tx_resp beacon_notify_hdr;
3109 __le32 low_tsf;
3110 __le32 high_tsf;
3111 __le32 ibss_mgr_status;
3112} __packed;
3113
3085/* 3114/*
3086 * REPLY_TX_BEACON = 0x91 (command, has simple generic response) 3115 * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
3087 */ 3116 */
@@ -4143,6 +4172,10 @@ enum iwl_bt_coex_profile_traffic_load {
4143 */ 4172 */
4144}; 4173};
4145 4174
4175#define BT_SESSION_ACTIVITY_1_UART_MSG 0x1
4176#define BT_SESSION_ACTIVITY_2_UART_MSG 0x2
4177
4178/* BT UART message - Share Part (BT -> WiFi) */
4146#define BT_UART_MSG_FRAME1MSGTYPE_POS (0) 4179#define BT_UART_MSG_FRAME1MSGTYPE_POS (0)
4147#define BT_UART_MSG_FRAME1MSGTYPE_MSK \ 4180#define BT_UART_MSG_FRAME1MSGTYPE_MSK \
4148 (0x7 << BT_UART_MSG_FRAME1MSGTYPE_POS) 4181 (0x7 << BT_UART_MSG_FRAME1MSGTYPE_POS)
@@ -4227,9 +4260,12 @@ enum iwl_bt_coex_profile_traffic_load {
4227#define BT_UART_MSG_FRAME7SNIFFACTIVITY_POS (0) 4260#define BT_UART_MSG_FRAME7SNIFFACTIVITY_POS (0)
4228#define BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK \ 4261#define BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK \
4229 (0x7 << BT_UART_MSG_FRAME7SNIFFACTIVITY_POS) 4262 (0x7 << BT_UART_MSG_FRAME7SNIFFACTIVITY_POS)
4230#define BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS (3) 4263#define BT_UART_MSG_FRAME7PAGE_POS (3)
4231#define BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_MSK \ 4264#define BT_UART_MSG_FRAME7PAGE_MSK \
4232 (0x3 << BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS) 4265 (0x1 << BT_UART_MSG_FRAME7PAGE_POS)
4266#define BT_UART_MSG_FRAME7INQUIRY_POS (4)
4267#define BT_UART_MSG_FRAME7INQUIRY_MSK \
4268 (0x1 << BT_UART_MSG_FRAME7INQUIRY_POS)
4233#define BT_UART_MSG_FRAME7CONNECTABLE_POS (5) 4269#define BT_UART_MSG_FRAME7CONNECTABLE_POS (5)
4234#define BT_UART_MSG_FRAME7CONNECTABLE_MSK \ 4270#define BT_UART_MSG_FRAME7CONNECTABLE_MSK \
4235 (0x1 << BT_UART_MSG_FRAME7CONNECTABLE_POS) 4271 (0x1 << BT_UART_MSG_FRAME7CONNECTABLE_POS)
@@ -4237,6 +4273,83 @@ enum iwl_bt_coex_profile_traffic_load {
4237#define BT_UART_MSG_FRAME7RESERVED_MSK \ 4273#define BT_UART_MSG_FRAME7RESERVED_MSK \
4238 (0x3 << BT_UART_MSG_FRAME7RESERVED_POS) 4274 (0x3 << BT_UART_MSG_FRAME7RESERVED_POS)
4239 4275
4276/* BT Session Activity 2 UART message (BT -> WiFi) */
4277#define BT_UART_MSG_2_FRAME1RESERVED1_POS (5)
4278#define BT_UART_MSG_2_FRAME1RESERVED1_MSK \
4279 (0x1<<BT_UART_MSG_2_FRAME1RESERVED1_POS)
4280#define BT_UART_MSG_2_FRAME1RESERVED2_POS (6)
4281#define BT_UART_MSG_2_FRAME1RESERVED2_MSK \
4282 (0x3<<BT_UART_MSG_2_FRAME1RESERVED2_POS)
4283
4284#define BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_POS (0)
4285#define BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_MSK \
4286 (0x3F<<BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_POS)
4287#define BT_UART_MSG_2_FRAME2RESERVED_POS (6)
4288#define BT_UART_MSG_2_FRAME2RESERVED_MSK \
4289 (0x3<<BT_UART_MSG_2_FRAME2RESERVED_POS)
4290
4291#define BT_UART_MSG_2_FRAME3BRLASTTXPOWER_POS (0)
4292#define BT_UART_MSG_2_FRAME3BRLASTTXPOWER_MSK \
4293 (0xF<<BT_UART_MSG_2_FRAME3BRLASTTXPOWER_POS)
4294#define BT_UART_MSG_2_FRAME3INQPAGESRMODE_POS (4)
4295#define BT_UART_MSG_2_FRAME3INQPAGESRMODE_MSK \
4296 (0x1<<BT_UART_MSG_2_FRAME3INQPAGESRMODE_POS)
4297#define BT_UART_MSG_2_FRAME3LEMASTER_POS (5)
4298#define BT_UART_MSG_2_FRAME3LEMASTER_MSK \
4299 (0x1<<BT_UART_MSG_2_FRAME3LEMASTER_POS)
4300#define BT_UART_MSG_2_FRAME3RESERVED_POS (6)
4301#define BT_UART_MSG_2_FRAME3RESERVED_MSK \
4302 (0x3<<BT_UART_MSG_2_FRAME3RESERVED_POS)
4303
4304#define BT_UART_MSG_2_FRAME4LELASTTXPOWER_POS (0)
4305#define BT_UART_MSG_2_FRAME4LELASTTXPOWER_MSK \
4306 (0xF<<BT_UART_MSG_2_FRAME4LELASTTXPOWER_POS)
4307#define BT_UART_MSG_2_FRAME4NUMLECONN_POS (4)
4308#define BT_UART_MSG_2_FRAME4NUMLECONN_MSK \
4309 (0x3<<BT_UART_MSG_2_FRAME4NUMLECONN_POS)
4310#define BT_UART_MSG_2_FRAME4RESERVED_POS (6)
4311#define BT_UART_MSG_2_FRAME4RESERVED_MSK \
4312 (0x3<<BT_UART_MSG_2_FRAME4RESERVED_POS)
4313
4314#define BT_UART_MSG_2_FRAME5BTMINRSSI_POS (0)
4315#define BT_UART_MSG_2_FRAME5BTMINRSSI_MSK \
4316 (0xF<<BT_UART_MSG_2_FRAME5BTMINRSSI_POS)
4317#define BT_UART_MSG_2_FRAME5LESCANINITMODE_POS (4)
4318#define BT_UART_MSG_2_FRAME5LESCANINITMODE_MSK \
4319 (0x1<<BT_UART_MSG_2_FRAME5LESCANINITMODE_POS)
4320#define BT_UART_MSG_2_FRAME5LEADVERMODE_POS (5)
4321#define BT_UART_MSG_2_FRAME5LEADVERMODE_MSK \
4322 (0x1<<BT_UART_MSG_2_FRAME5LEADVERMODE_POS)
4323#define BT_UART_MSG_2_FRAME5RESERVED_POS (6)
4324#define BT_UART_MSG_2_FRAME5RESERVED_MSK \
4325 (0x3<<BT_UART_MSG_2_FRAME5RESERVED_POS)
4326
4327#define BT_UART_MSG_2_FRAME6LECONNINTERVAL_POS (0)
4328#define BT_UART_MSG_2_FRAME6LECONNINTERVAL_MSK \
4329 (0x1F<<BT_UART_MSG_2_FRAME6LECONNINTERVAL_POS)
4330#define BT_UART_MSG_2_FRAME6RFU_POS (5)
4331#define BT_UART_MSG_2_FRAME6RFU_MSK \
4332 (0x1<<BT_UART_MSG_2_FRAME6RFU_POS)
4333#define BT_UART_MSG_2_FRAME6RESERVED_POS (6)
4334#define BT_UART_MSG_2_FRAME6RESERVED_MSK \
4335 (0x3<<BT_UART_MSG_2_FRAME6RESERVED_POS)
4336
4337#define BT_UART_MSG_2_FRAME7LECONNSLAVELAT_POS (0)
4338#define BT_UART_MSG_2_FRAME7LECONNSLAVELAT_MSK \
4339 (0x7<<BT_UART_MSG_2_FRAME7LECONNSLAVELAT_POS)
4340#define BT_UART_MSG_2_FRAME7LEPROFILE1_POS (3)
4341#define BT_UART_MSG_2_FRAME7LEPROFILE1_MSK \
4342 (0x1<<BT_UART_MSG_2_FRAME7LEPROFILE1_POS)
4343#define BT_UART_MSG_2_FRAME7LEPROFILE2_POS (4)
4344#define BT_UART_MSG_2_FRAME7LEPROFILE2_MSK \
4345 (0x1<<BT_UART_MSG_2_FRAME7LEPROFILE2_POS)
4346#define BT_UART_MSG_2_FRAME7LEPROFILEOTHER_POS (5)
4347#define BT_UART_MSG_2_FRAME7LEPROFILEOTHER_MSK \
4348 (0x1<<BT_UART_MSG_2_FRAME7LEPROFILEOTHER_POS)
4349#define BT_UART_MSG_2_FRAME7RESERVED_POS (6)
4350#define BT_UART_MSG_2_FRAME7RESERVED_MSK \
4351 (0x3<<BT_UART_MSG_2_FRAME7RESERVED_POS)
4352
4240 4353
4241struct iwl_bt_uart_msg { 4354struct iwl_bt_uart_msg {
4242 u8 header; 4355 u8 header;
@@ -4369,6 +4482,11 @@ int iwl_agn_check_rxon_cmd(struct iwl_priv *priv);
4369 * REPLY_WIPAN_PARAMS = 0xb2 (Commands and Notification) 4482 * REPLY_WIPAN_PARAMS = 0xb2 (Commands and Notification)
4370 */ 4483 */
4371 4484
4485/*
4486 * Minimum slot time in TU
4487 */
4488#define IWL_MIN_SLOT_TIME 20
4489
4372/** 4490/**
4373 * struct iwl_wipan_slot 4491 * struct iwl_wipan_slot
4374 * @width: Time in TU 4492 * @width: Time in TU
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index efbde1f1a8bf..6c30fa652e27 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -43,11 +43,6 @@
43#include "iwl-helpers.h" 43#include "iwl-helpers.h"
44 44
45 45
46MODULE_DESCRIPTION("iwl core");
47MODULE_VERSION(IWLWIFI_VERSION);
48MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
49MODULE_LICENSE("GPL");
50
51/* 46/*
52 * set bt_coex_active to true, uCode will do kill/defer 47 * set bt_coex_active to true, uCode will do kill/defer
53 * every time the priority line is asserted (BT is sending signals on the 48 * every time the priority line is asserted (BT is sending signals on the
@@ -65,15 +60,12 @@ MODULE_LICENSE("GPL");
65 * default: bt_coex_active = true (BT_COEX_ENABLE) 60 * default: bt_coex_active = true (BT_COEX_ENABLE)
66 */ 61 */
67bool bt_coex_active = true; 62bool bt_coex_active = true;
68EXPORT_SYMBOL_GPL(bt_coex_active);
69module_param(bt_coex_active, bool, S_IRUGO); 63module_param(bt_coex_active, bool, S_IRUGO);
70MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist"); 64MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
71 65
72u32 iwl_debug_level; 66u32 iwl_debug_level;
73EXPORT_SYMBOL(iwl_debug_level);
74 67
75const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 68const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
76EXPORT_SYMBOL(iwl_bcast_addr);
77 69
78 70
79/* This function both allocates and initializes hw and priv. */ 71/* This function both allocates and initializes hw and priv. */
@@ -98,7 +90,6 @@ struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg)
98out: 90out:
99 return hw; 91 return hw;
100} 92}
101EXPORT_SYMBOL(iwl_alloc_all);
102 93
103#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ 94#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
104#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ 95#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
@@ -219,15 +210,12 @@ int iwlcore_init_geos(struct iwl_priv *priv)
219 if (!is_channel_valid(ch)) 210 if (!is_channel_valid(ch))
220 continue; 211 continue;
221 212
222 if (is_channel_a_band(ch)) 213 sband = &priv->bands[ch->band];
223 sband = &priv->bands[IEEE80211_BAND_5GHZ];
224 else
225 sband = &priv->bands[IEEE80211_BAND_2GHZ];
226 214
227 geo_ch = &sband->channels[sband->n_channels++]; 215 geo_ch = &sband->channels[sband->n_channels++];
228 216
229 geo_ch->center_freq = 217 geo_ch->center_freq =
230 ieee80211_channel_to_frequency(ch->channel); 218 ieee80211_channel_to_frequency(ch->channel, ch->band);
231 geo_ch->max_power = ch->max_power_avg; 219 geo_ch->max_power = ch->max_power_avg;
232 geo_ch->max_antenna_gain = 0xff; 220 geo_ch->max_antenna_gain = 0xff;
233 geo_ch->hw_value = ch->channel; 221 geo_ch->hw_value = ch->channel;
@@ -275,7 +263,6 @@ int iwlcore_init_geos(struct iwl_priv *priv)
275 263
276 return 0; 264 return 0;
277} 265}
278EXPORT_SYMBOL(iwlcore_init_geos);
279 266
280/* 267/*
281 * iwlcore_free_geos - undo allocations in iwlcore_init_geos 268 * iwlcore_free_geos - undo allocations in iwlcore_init_geos
@@ -286,7 +273,6 @@ void iwlcore_free_geos(struct iwl_priv *priv)
286 kfree(priv->ieee_rates); 273 kfree(priv->ieee_rates);
287 clear_bit(STATUS_GEO_CONFIGURED, &priv->status); 274 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
288} 275}
289EXPORT_SYMBOL(iwlcore_free_geos);
290 276
291static bool iwl_is_channel_extension(struct iwl_priv *priv, 277static bool iwl_is_channel_extension(struct iwl_priv *priv,
292 enum ieee80211_band band, 278 enum ieee80211_band band,
@@ -331,7 +317,6 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
331 le16_to_cpu(ctx->staging.channel), 317 le16_to_cpu(ctx->staging.channel),
332 ctx->ht.extension_chan_offset); 318 ctx->ht.extension_chan_offset);
333} 319}
334EXPORT_SYMBOL(iwl_is_ht40_tx_allowed);
335 320
336static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val) 321static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
337{ 322{
@@ -432,7 +417,6 @@ int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
432 return iwl_send_cmd_pdu(priv, ctx->rxon_timing_cmd, 417 return iwl_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
433 sizeof(ctx->timing), &ctx->timing); 418 sizeof(ctx->timing), &ctx->timing);
434} 419}
435EXPORT_SYMBOL(iwl_send_rxon_timing);
436 420
437void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx, 421void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
438 int hw_decrypt) 422 int hw_decrypt)
@@ -445,7 +429,6 @@ void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
445 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK; 429 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
446 430
447} 431}
448EXPORT_SYMBOL(iwl_set_rxon_hwcrypto);
449 432
450/* validate RXON structure is valid */ 433/* validate RXON structure is valid */
451int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx) 434int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
@@ -518,7 +501,6 @@ int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
518 } 501 }
519 return 0; 502 return 0;
520} 503}
521EXPORT_SYMBOL(iwl_check_rxon_cmd);
522 504
523/** 505/**
524 * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed 506 * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
@@ -582,7 +564,6 @@ int iwl_full_rxon_required(struct iwl_priv *priv,
582 564
583 return 0; 565 return 0;
584} 566}
585EXPORT_SYMBOL(iwl_full_rxon_required);
586 567
587u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv, 568u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv,
588 struct iwl_rxon_context *ctx) 569 struct iwl_rxon_context *ctx)
@@ -596,7 +577,6 @@ u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv,
596 else 577 else
597 return IWL_RATE_6M_PLCP; 578 return IWL_RATE_6M_PLCP;
598} 579}
599EXPORT_SYMBOL(iwl_rate_get_lowest_plcp);
600 580
601static void _iwl_set_rxon_ht(struct iwl_priv *priv, 581static void _iwl_set_rxon_ht(struct iwl_priv *priv,
602 struct iwl_ht_config *ht_conf, 582 struct iwl_ht_config *ht_conf,
@@ -673,7 +653,6 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
673 for_each_context(priv, ctx) 653 for_each_context(priv, ctx)
674 _iwl_set_rxon_ht(priv, ht_conf, ctx); 654 _iwl_set_rxon_ht(priv, ht_conf, ctx);
675} 655}
676EXPORT_SYMBOL(iwl_set_rxon_ht);
677 656
678/* Return valid, unused, channel for a passive scan to reset the RF */ 657/* Return valid, unused, channel for a passive scan to reset the RF */
679u8 iwl_get_single_channel_number(struct iwl_priv *priv, 658u8 iwl_get_single_channel_number(struct iwl_priv *priv,
@@ -714,7 +693,6 @@ u8 iwl_get_single_channel_number(struct iwl_priv *priv,
714 693
715 return channel; 694 return channel;
716} 695}
717EXPORT_SYMBOL(iwl_get_single_channel_number);
718 696
719/** 697/**
720 * iwl_set_rxon_channel - Set the band and channel values in staging RXON 698 * iwl_set_rxon_channel - Set the band and channel values in staging RXON
@@ -745,7 +723,6 @@ int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
745 723
746 return 0; 724 return 0;
747} 725}
748EXPORT_SYMBOL(iwl_set_rxon_channel);
749 726
750void iwl_set_flags_for_band(struct iwl_priv *priv, 727void iwl_set_flags_for_band(struct iwl_priv *priv,
751 struct iwl_rxon_context *ctx, 728 struct iwl_rxon_context *ctx,
@@ -769,7 +746,6 @@ void iwl_set_flags_for_band(struct iwl_priv *priv,
769 ctx->staging.flags &= ~RXON_FLG_CCK_MSK; 746 ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
770 } 747 }
771} 748}
772EXPORT_SYMBOL(iwl_set_flags_for_band);
773 749
774/* 750/*
775 * initialize rxon structure with default values from eeprom 751 * initialize rxon structure with default values from eeprom
@@ -841,7 +817,6 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv,
841 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff; 817 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
842 ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff; 818 ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff;
843} 819}
844EXPORT_SYMBOL(iwl_connection_init_rx_config);
845 820
846void iwl_set_rate(struct iwl_priv *priv) 821void iwl_set_rate(struct iwl_priv *priv)
847{ 822{
@@ -874,7 +849,6 @@ void iwl_set_rate(struct iwl_priv *priv)
874 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; 849 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
875 } 850 }
876} 851}
877EXPORT_SYMBOL(iwl_set_rate);
878 852
879void iwl_chswitch_done(struct iwl_priv *priv, bool is_success) 853void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
880{ 854{
@@ -894,35 +868,6 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
894 mutex_unlock(&priv->mutex); 868 mutex_unlock(&priv->mutex);
895 } 869 }
896} 870}
897EXPORT_SYMBOL(iwl_chswitch_done);
898
899void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
900{
901 struct iwl_rx_packet *pkt = rxb_addr(rxb);
902 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
903 /*
904 * MULTI-FIXME
905 * See iwl_mac_channel_switch.
906 */
907 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
908 struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
909
910 if (priv->switch_rxon.switch_in_progress) {
911 if (!le32_to_cpu(csa->status) &&
912 (csa->channel == priv->switch_rxon.channel)) {
913 rxon->channel = csa->channel;
914 ctx->staging.channel = csa->channel;
915 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
916 le16_to_cpu(csa->channel));
917 iwl_chswitch_done(priv, true);
918 } else {
919 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
920 le16_to_cpu(csa->channel));
921 iwl_chswitch_done(priv, false);
922 }
923 }
924}
925EXPORT_SYMBOL(iwl_rx_csa);
926 871
927#ifdef CONFIG_IWLWIFI_DEBUG 872#ifdef CONFIG_IWLWIFI_DEBUG
928void iwl_print_rx_config_cmd(struct iwl_priv *priv, 873void iwl_print_rx_config_cmd(struct iwl_priv *priv,
@@ -944,13 +889,15 @@ void iwl_print_rx_config_cmd(struct iwl_priv *priv,
944 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr); 889 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
945 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id)); 890 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
946} 891}
947EXPORT_SYMBOL(iwl_print_rx_config_cmd);
948#endif 892#endif
949/** 893/**
950 * iwl_irq_handle_error - called for HW or SW error interrupt from card 894 * iwl_irq_handle_error - called for HW or SW error interrupt from card
951 */ 895 */
952void iwl_irq_handle_error(struct iwl_priv *priv) 896void iwl_irq_handle_error(struct iwl_priv *priv)
953{ 897{
898 unsigned int reload_msec;
899 unsigned long reload_jiffies;
900
954 /* Set the FW error flag -- cleared on iwl_down */ 901 /* Set the FW error flag -- cleared on iwl_down */
955 set_bit(STATUS_FW_ERROR, &priv->status); 902 set_bit(STATUS_FW_ERROR, &priv->status);
956 903
@@ -994,6 +941,25 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
994 * commands by clearing the INIT status bit */ 941 * commands by clearing the INIT status bit */
995 clear_bit(STATUS_READY, &priv->status); 942 clear_bit(STATUS_READY, &priv->status);
996 943
944 /*
945 * If firmware keep reloading, then it indicate something
946 * serious wrong and firmware having problem to recover
947 * from it. Instead of keep trying which will fill the syslog
948 * and hang the system, let's just stop it
949 */
950 reload_jiffies = jiffies;
951 reload_msec = jiffies_to_msecs((long) reload_jiffies -
952 (long) priv->reload_jiffies);
953 priv->reload_jiffies = reload_jiffies;
954 if (reload_msec <= IWL_MIN_RELOAD_DURATION) {
955 priv->reload_count++;
956 if (priv->reload_count >= IWL_MAX_CONTINUE_RELOAD_CNT) {
957 IWL_ERR(priv, "BUG_ON, Stop restarting\n");
958 return;
959 }
960 } else
961 priv->reload_count = 0;
962
997 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) { 963 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
998 IWL_DEBUG(priv, IWL_DL_FW_ERRORS, 964 IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
999 "Restarting adapter due to uCode error.\n"); 965 "Restarting adapter due to uCode error.\n");
@@ -1002,7 +968,6 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
1002 queue_work(priv->workqueue, &priv->restart); 968 queue_work(priv->workqueue, &priv->restart);
1003 } 969 }
1004} 970}
1005EXPORT_SYMBOL(iwl_irq_handle_error);
1006 971
1007static int iwl_apm_stop_master(struct iwl_priv *priv) 972static int iwl_apm_stop_master(struct iwl_priv *priv)
1008{ 973{
@@ -1039,7 +1004,6 @@ void iwl_apm_stop(struct iwl_priv *priv)
1039 */ 1004 */
1040 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 1005 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1041} 1006}
1042EXPORT_SYMBOL(iwl_apm_stop);
1043 1007
1044 1008
1045/* 1009/*
@@ -1154,13 +1118,14 @@ int iwl_apm_init(struct iwl_priv *priv)
1154out: 1118out:
1155 return ret; 1119 return ret;
1156} 1120}
1157EXPORT_SYMBOL(iwl_apm_init);
1158 1121
1159 1122
1160int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) 1123int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1161{ 1124{
1162 int ret; 1125 int ret;
1163 s8 prev_tx_power; 1126 s8 prev_tx_power;
1127 bool defer;
1128 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1164 1129
1165 lockdep_assert_held(&priv->mutex); 1130 lockdep_assert_held(&priv->mutex);
1166 1131
@@ -1188,10 +1153,15 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1188 if (!iwl_is_ready_rf(priv)) 1153 if (!iwl_is_ready_rf(priv))
1189 return -EIO; 1154 return -EIO;
1190 1155
1191 /* scan complete use tx_power_next, need to be updated */ 1156 /* scan complete and commit_rxon use tx_power_next value,
1157 * it always need to be updated for newest request */
1192 priv->tx_power_next = tx_power; 1158 priv->tx_power_next = tx_power;
1193 if (test_bit(STATUS_SCANNING, &priv->status) && !force) { 1159
1194 IWL_DEBUG_INFO(priv, "Deferring tx power set while scanning\n"); 1160 /* do not set tx power when scanning or channel changing */
1161 defer = test_bit(STATUS_SCANNING, &priv->status) ||
1162 memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
1163 if (defer && !force) {
1164 IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
1195 return 0; 1165 return 0;
1196 } 1166 }
1197 1167
@@ -1207,7 +1177,6 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1207 } 1177 }
1208 return ret; 1178 return ret;
1209} 1179}
1210EXPORT_SYMBOL(iwl_set_tx_power);
1211 1180
1212void iwl_send_bt_config(struct iwl_priv *priv) 1181void iwl_send_bt_config(struct iwl_priv *priv)
1213{ 1182{
@@ -1231,7 +1200,6 @@ void iwl_send_bt_config(struct iwl_priv *priv)
1231 sizeof(struct iwl_bt_cmd), &bt_cmd)) 1200 sizeof(struct iwl_bt_cmd), &bt_cmd))
1232 IWL_ERR(priv, "failed to send BT Coex Config\n"); 1201 IWL_ERR(priv, "failed to send BT Coex Config\n");
1233} 1202}
1234EXPORT_SYMBOL(iwl_send_bt_config);
1235 1203
1236int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear) 1204int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
1237{ 1205{
@@ -1249,46 +1217,6 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
1249 sizeof(struct iwl_statistics_cmd), 1217 sizeof(struct iwl_statistics_cmd),
1250 &statistics_cmd); 1218 &statistics_cmd);
1251} 1219}
1252EXPORT_SYMBOL(iwl_send_statistics_request);
1253
1254void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
1255 struct iwl_rx_mem_buffer *rxb)
1256{
1257#ifdef CONFIG_IWLWIFI_DEBUG
1258 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1259 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
1260 IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
1261 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
1262#endif
1263}
1264EXPORT_SYMBOL(iwl_rx_pm_sleep_notif);
1265
1266void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
1267 struct iwl_rx_mem_buffer *rxb)
1268{
1269 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1270 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
1271 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
1272 "notification for %s:\n", len,
1273 get_cmd_string(pkt->hdr.cmd));
1274 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
1275}
1276EXPORT_SYMBOL(iwl_rx_pm_debug_statistics_notif);
1277
1278void iwl_rx_reply_error(struct iwl_priv *priv,
1279 struct iwl_rx_mem_buffer *rxb)
1280{
1281 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1282
1283 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
1284 "seq 0x%04X ser 0x%08X\n",
1285 le32_to_cpu(pkt->u.err_resp.error_type),
1286 get_cmd_string(pkt->u.err_resp.cmd_id),
1287 pkt->u.err_resp.cmd_id,
1288 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
1289 le32_to_cpu(pkt->u.err_resp.error_info));
1290}
1291EXPORT_SYMBOL(iwl_rx_reply_error);
1292 1220
1293void iwl_clear_isr_stats(struct iwl_priv *priv) 1221void iwl_clear_isr_stats(struct iwl_priv *priv)
1294{ 1222{
@@ -1340,7 +1268,6 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
1340 IWL_DEBUG_MAC80211(priv, "leave\n"); 1268 IWL_DEBUG_MAC80211(priv, "leave\n");
1341 return 0; 1269 return 0;
1342} 1270}
1343EXPORT_SYMBOL(iwl_mac_conf_tx);
1344 1271
1345int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw) 1272int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw)
1346{ 1273{
@@ -1348,7 +1275,6 @@ int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw)
1348 1275
1349 return priv->ibss_manager == IWL_IBSS_MANAGER; 1276 return priv->ibss_manager == IWL_IBSS_MANAGER;
1350} 1277}
1351EXPORT_SYMBOL_GPL(iwl_mac_tx_last_beacon);
1352 1278
1353static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx) 1279static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1354{ 1280{
@@ -1403,9 +1329,10 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1403 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 1329 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1404 struct iwl_rxon_context *tmp, *ctx = NULL; 1330 struct iwl_rxon_context *tmp, *ctx = NULL;
1405 int err; 1331 int err;
1332 enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
1406 1333
1407 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n", 1334 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
1408 vif->type, vif->addr); 1335 viftype, vif->addr);
1409 1336
1410 mutex_lock(&priv->mutex); 1337 mutex_lock(&priv->mutex);
1411 1338
@@ -1429,7 +1356,7 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1429 continue; 1356 continue;
1430 } 1357 }
1431 1358
1432 if (!(possible_modes & BIT(vif->type))) 1359 if (!(possible_modes & BIT(viftype)))
1433 continue; 1360 continue;
1434 1361
1435 /* have maybe usable context w/o interface */ 1362 /* have maybe usable context w/o interface */
@@ -1457,7 +1384,6 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1457 IWL_DEBUG_MAC80211(priv, "leave\n"); 1384 IWL_DEBUG_MAC80211(priv, "leave\n");
1458 return err; 1385 return err;
1459} 1386}
1460EXPORT_SYMBOL(iwl_mac_add_interface);
1461 1387
1462static void iwl_teardown_interface(struct iwl_priv *priv, 1388static void iwl_teardown_interface(struct iwl_priv *priv,
1463 struct ieee80211_vif *vif, 1389 struct ieee80211_vif *vif,
@@ -1510,7 +1436,6 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
1510 IWL_DEBUG_MAC80211(priv, "leave\n"); 1436 IWL_DEBUG_MAC80211(priv, "leave\n");
1511 1437
1512} 1438}
1513EXPORT_SYMBOL(iwl_mac_remove_interface);
1514 1439
1515int iwl_alloc_txq_mem(struct iwl_priv *priv) 1440int iwl_alloc_txq_mem(struct iwl_priv *priv)
1516{ 1441{
@@ -1525,14 +1450,12 @@ int iwl_alloc_txq_mem(struct iwl_priv *priv)
1525 } 1450 }
1526 return 0; 1451 return 0;
1527} 1452}
1528EXPORT_SYMBOL(iwl_alloc_txq_mem);
1529 1453
1530void iwl_free_txq_mem(struct iwl_priv *priv) 1454void iwl_free_txq_mem(struct iwl_priv *priv)
1531{ 1455{
1532 kfree(priv->txq); 1456 kfree(priv->txq);
1533 priv->txq = NULL; 1457 priv->txq = NULL;
1534} 1458}
1535EXPORT_SYMBOL(iwl_free_txq_mem);
1536 1459
1537#ifdef CONFIG_IWLWIFI_DEBUGFS 1460#ifdef CONFIG_IWLWIFI_DEBUGFS
1538 1461
@@ -1571,7 +1494,6 @@ int iwl_alloc_traffic_mem(struct iwl_priv *priv)
1571 iwl_reset_traffic_log(priv); 1494 iwl_reset_traffic_log(priv);
1572 return 0; 1495 return 0;
1573} 1496}
1574EXPORT_SYMBOL(iwl_alloc_traffic_mem);
1575 1497
1576void iwl_free_traffic_mem(struct iwl_priv *priv) 1498void iwl_free_traffic_mem(struct iwl_priv *priv)
1577{ 1499{
@@ -1581,7 +1503,6 @@ void iwl_free_traffic_mem(struct iwl_priv *priv)
1581 kfree(priv->rx_traffic); 1503 kfree(priv->rx_traffic);
1582 priv->rx_traffic = NULL; 1504 priv->rx_traffic = NULL;
1583} 1505}
1584EXPORT_SYMBOL(iwl_free_traffic_mem);
1585 1506
1586void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv, 1507void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
1587 u16 length, struct ieee80211_hdr *header) 1508 u16 length, struct ieee80211_hdr *header)
@@ -1606,7 +1527,6 @@ void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
1606 (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES; 1527 (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1607 } 1528 }
1608} 1529}
1609EXPORT_SYMBOL(iwl_dbg_log_tx_data_frame);
1610 1530
1611void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv, 1531void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
1612 u16 length, struct ieee80211_hdr *header) 1532 u16 length, struct ieee80211_hdr *header)
@@ -1631,7 +1551,6 @@ void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
1631 (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES; 1551 (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1632 } 1552 }
1633} 1553}
1634EXPORT_SYMBOL(iwl_dbg_log_rx_data_frame);
1635 1554
1636const char *get_mgmt_string(int cmd) 1555const char *get_mgmt_string(int cmd)
1637{ 1556{
@@ -1675,7 +1594,6 @@ void iwl_clear_traffic_stats(struct iwl_priv *priv)
1675{ 1594{
1676 memset(&priv->tx_stats, 0, sizeof(struct traffic_stats)); 1595 memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
1677 memset(&priv->rx_stats, 0, sizeof(struct traffic_stats)); 1596 memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
1678 priv->led_tpt = 0;
1679} 1597}
1680 1598
1681/* 1599/*
@@ -1768,9 +1686,7 @@ void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
1768 stats->data_cnt++; 1686 stats->data_cnt++;
1769 stats->data_bytes += len; 1687 stats->data_bytes += len;
1770 } 1688 }
1771 iwl_leds_background(priv);
1772} 1689}
1773EXPORT_SYMBOL(iwl_update_stats);
1774#endif 1690#endif
1775 1691
1776static void iwl_force_rf_reset(struct iwl_priv *priv) 1692static void iwl_force_rf_reset(struct iwl_priv *priv)
@@ -1909,7 +1825,6 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1909 mutex_unlock(&priv->mutex); 1825 mutex_unlock(&priv->mutex);
1910 return err; 1826 return err;
1911} 1827}
1912EXPORT_SYMBOL(iwl_mac_change_interface);
1913 1828
1914/* 1829/*
1915 * On every watchdog tick we check (latest) time stamp. If it does not 1830 * On every watchdog tick we check (latest) time stamp. If it does not
@@ -1981,7 +1896,6 @@ void iwl_bg_watchdog(unsigned long data)
1981 mod_timer(&priv->watchdog, jiffies + 1896 mod_timer(&priv->watchdog, jiffies +
1982 msecs_to_jiffies(IWL_WD_TICK(timeout))); 1897 msecs_to_jiffies(IWL_WD_TICK(timeout)));
1983} 1898}
1984EXPORT_SYMBOL(iwl_bg_watchdog);
1985 1899
1986void iwl_setup_watchdog(struct iwl_priv *priv) 1900void iwl_setup_watchdog(struct iwl_priv *priv)
1987{ 1901{
@@ -1993,7 +1907,6 @@ void iwl_setup_watchdog(struct iwl_priv *priv)
1993 else 1907 else
1994 del_timer(&priv->watchdog); 1908 del_timer(&priv->watchdog);
1995} 1909}
1996EXPORT_SYMBOL(iwl_setup_watchdog);
1997 1910
1998/* 1911/*
1999 * extended beacon time format 1912 * extended beacon time format
@@ -2019,7 +1932,6 @@ u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval)
2019 1932
2020 return (quot << priv->hw_params.beacon_time_tsf_bits) + rem; 1933 return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
2021} 1934}
2022EXPORT_SYMBOL(iwl_usecs_to_beacons);
2023 1935
2024/* base is usually what we get from ucode with each received frame, 1936/* base is usually what we get from ucode with each received frame,
2025 * the same as HW timer counter counting down 1937 * the same as HW timer counter counting down
@@ -2047,7 +1959,6 @@ __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
2047 1959
2048 return cpu_to_le32(res); 1960 return cpu_to_le32(res);
2049} 1961}
2050EXPORT_SYMBOL(iwl_add_beacon_time);
2051 1962
2052#ifdef CONFIG_PM 1963#ifdef CONFIG_PM
2053 1964
@@ -2067,7 +1978,6 @@ int iwl_pci_suspend(struct device *device)
2067 1978
2068 return 0; 1979 return 0;
2069} 1980}
2070EXPORT_SYMBOL(iwl_pci_suspend);
2071 1981
2072int iwl_pci_resume(struct device *device) 1982int iwl_pci_resume(struct device *device)
2073{ 1983{
@@ -2096,7 +2006,6 @@ int iwl_pci_resume(struct device *device)
2096 2006
2097 return 0; 2007 return 0;
2098} 2008}
2099EXPORT_SYMBOL(iwl_pci_resume);
2100 2009
2101const struct dev_pm_ops iwl_pm_ops = { 2010const struct dev_pm_ops iwl_pm_ops = {
2102 .suspend = iwl_pci_suspend, 2011 .suspend = iwl_pci_suspend,
@@ -2106,6 +2015,5 @@ const struct dev_pm_ops iwl_pm_ops = {
2106 .poweroff = iwl_pci_suspend, 2015 .poweroff = iwl_pci_suspend,
2107 .restore = iwl_pci_resume, 2016 .restore = iwl_pci_resume,
2108}; 2017};
2109EXPORT_SYMBOL(iwl_pm_ops);
2110 2018
2111#endif /* CONFIG_PM */ 2019#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index a3474376fdbc..b316d833d9a2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -63,6 +63,8 @@
63#ifndef __iwl_core_h__ 63#ifndef __iwl_core_h__
64#define __iwl_core_h__ 64#define __iwl_core_h__
65 65
66#include "iwl-dev.h"
67
66/************************ 68/************************
67 * forward declarations * 69 * forward declarations *
68 ************************/ 70 ************************/
@@ -210,12 +212,7 @@ struct iwl_lib_ops {
210 212
211 /* temperature */ 213 /* temperature */
212 struct iwl_temp_ops temp_ops; 214 struct iwl_temp_ops temp_ops;
213 /* check for plcp health */ 215
214 bool (*check_plcp_health)(struct iwl_priv *priv,
215 struct iwl_rx_packet *pkt);
216 /* check for ack health */
217 bool (*check_ack_health)(struct iwl_priv *priv,
218 struct iwl_rx_packet *pkt);
219 int (*txfifo_flush)(struct iwl_priv *priv, u16 flush_control); 216 int (*txfifo_flush)(struct iwl_priv *priv, u16 flush_control);
220 void (*dev_txfifo_flush)(struct iwl_priv *priv, u16 flush_control); 217 void (*dev_txfifo_flush)(struct iwl_priv *priv, u16 flush_control);
221 218
@@ -227,8 +224,6 @@ struct iwl_lib_ops {
227 224
228struct iwl_led_ops { 225struct iwl_led_ops {
229 int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd); 226 int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd);
230 int (*on)(struct iwl_priv *priv);
231 int (*off)(struct iwl_priv *priv);
232}; 227};
233 228
234/* NIC specific ops */ 229/* NIC specific ops */
@@ -263,6 +258,8 @@ struct iwl_mod_params {
263 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */ 258 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
264 int antenna; /* def: 0 = both antennas (use diversity) */ 259 int antenna; /* def: 0 = both antennas (use diversity) */
265 int restart_fw; /* def: 1 = restart firmware */ 260 int restart_fw; /* def: 1 = restart firmware */
261 bool plcp_check; /* def: true = enable plcp health check */
262 bool ack_check; /* def: false = disable ack health check */
266}; 263};
267 264
268/* 265/*
@@ -307,7 +304,6 @@ struct iwl_base_params {
307 u16 led_compensation; 304 u16 led_compensation;
308 const bool broken_powersave; 305 const bool broken_powersave;
309 int chain_noise_num_beacons; 306 int chain_noise_num_beacons;
310 const bool supports_idle;
311 bool adv_thermal_throttle; 307 bool adv_thermal_throttle;
312 bool support_ct_kill_exit; 308 bool support_ct_kill_exit;
313 const bool support_wimax_coexist; 309 const bool support_wimax_coexist;
@@ -342,6 +338,7 @@ struct iwl_bt_params {
342 u8 ampdu_factor; 338 u8 ampdu_factor;
343 u8 ampdu_density; 339 u8 ampdu_density;
344 bool bt_sco_disable; 340 bool bt_sco_disable;
341 bool bt_session_2;
345}; 342};
346/* 343/*
347 * @use_rts_for_aggregation: use rts/cts protection for HT traffic 344 * @use_rts_for_aggregation: use rts/cts protection for HT traffic
@@ -366,6 +363,7 @@ struct iwl_ht_params {
366 * @adv_pm: advance power management 363 * @adv_pm: advance power management
367 * @rx_with_siso_diversity: 1x1 device with rx antenna diversity 364 * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
368 * @internal_wimax_coex: internal wifi/wimax combo device 365 * @internal_wimax_coex: internal wifi/wimax combo device
366 * @iq_invert: I/Q inversion
369 * 367 *
370 * We enable the driver to be backward compatible wrt API version. The 368 * We enable the driver to be backward compatible wrt API version. The
371 * driver specifies which APIs it supports (with @ucode_api_max being the 369 * driver specifies which APIs it supports (with @ucode_api_max being the
@@ -415,6 +413,7 @@ struct iwl_cfg {
415 const bool adv_pm; 413 const bool adv_pm;
416 const bool rx_with_siso_diversity; 414 const bool rx_with_siso_diversity;
417 const bool internal_wimax_coex; 415 const bool internal_wimax_coex;
416 const bool iq_invert;
418}; 417};
419 418
420/*************************** 419/***************************
@@ -444,10 +443,6 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
444void iwl_connection_init_rx_config(struct iwl_priv *priv, 443void iwl_connection_init_rx_config(struct iwl_priv *priv,
445 struct iwl_rxon_context *ctx); 444 struct iwl_rxon_context *ctx);
446void iwl_set_rate(struct iwl_priv *priv); 445void iwl_set_rate(struct iwl_priv *priv);
447int iwl_set_decrypted_flag(struct iwl_priv *priv,
448 struct ieee80211_hdr *hdr,
449 u32 decrypt_res,
450 struct ieee80211_rx_status *stats);
451void iwl_irq_handle_error(struct iwl_priv *priv); 446void iwl_irq_handle_error(struct iwl_priv *priv);
452int iwl_mac_add_interface(struct ieee80211_hw *hw, 447int iwl_mac_add_interface(struct ieee80211_hw *hw,
453 struct ieee80211_vif *vif); 448 struct ieee80211_vif *vif);
@@ -494,46 +489,21 @@ static inline void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
494static inline void iwl_update_stats(struct iwl_priv *priv, bool is_tx, 489static inline void iwl_update_stats(struct iwl_priv *priv, bool is_tx,
495 __le16 fc, u16 len) 490 __le16 fc, u16 len)
496{ 491{
497 struct traffic_stats *stats;
498
499 if (is_tx)
500 stats = &priv->tx_stats;
501 else
502 stats = &priv->rx_stats;
503
504 if (ieee80211_is_data(fc)) {
505 /* data */
506 stats->data_bytes += len;
507 }
508 iwl_leds_background(priv);
509} 492}
510#endif 493#endif
511/*****************************************************
512 * RX handlers.
513 * **************************************************/
514void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
515 struct iwl_rx_mem_buffer *rxb);
516void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
517 struct iwl_rx_mem_buffer *rxb);
518void iwl_rx_reply_error(struct iwl_priv *priv,
519 struct iwl_rx_mem_buffer *rxb);
520 494
521/***************************************************** 495/*****************************************************
522* RX 496* RX
523******************************************************/ 497******************************************************/
524void iwl_cmd_queue_free(struct iwl_priv *priv); 498void iwl_cmd_queue_free(struct iwl_priv *priv);
499void iwl_cmd_queue_unmap(struct iwl_priv *priv);
525int iwl_rx_queue_alloc(struct iwl_priv *priv); 500int iwl_rx_queue_alloc(struct iwl_priv *priv);
526void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, 501void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
527 struct iwl_rx_queue *q); 502 struct iwl_rx_queue *q);
528int iwl_rx_queue_space(const struct iwl_rx_queue *q); 503int iwl_rx_queue_space(const struct iwl_rx_queue *q);
529void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); 504void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
530/* Handlers */ 505
531void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
532 struct iwl_rx_mem_buffer *rxb);
533void iwl_recover_from_statistics(struct iwl_priv *priv,
534 struct iwl_rx_packet *pkt);
535void iwl_chswitch_done(struct iwl_priv *priv, bool is_success); 506void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
536void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
537 507
538/* TX helpers */ 508/* TX helpers */
539 509
@@ -546,6 +516,7 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
546void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, 516void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
547 int slots_num, u32 txq_id); 517 int slots_num, u32 txq_id);
548void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); 518void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
519void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id);
549void iwl_setup_watchdog(struct iwl_priv *priv); 520void iwl_setup_watchdog(struct iwl_priv *priv);
550/***************************************************** 521/*****************************************************
551 * TX power 522 * TX power
@@ -582,6 +553,10 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
582 struct ieee80211_vif *vif); 553 struct ieee80211_vif *vif);
583void iwl_setup_scan_deferred_work(struct iwl_priv *priv); 554void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
584void iwl_cancel_scan_deferred_work(struct iwl_priv *priv); 555void iwl_cancel_scan_deferred_work(struct iwl_priv *priv);
556int __must_check iwl_scan_initiate(struct iwl_priv *priv,
557 struct ieee80211_vif *vif,
558 enum iwl_scan_type scan_type,
559 enum ieee80211_band band);
585 560
586/* For faster active scanning, scan will move to the next channel if fewer than 561/* For faster active scanning, scan will move to the next channel if fewer than
587 * PLCP_QUIET_THRESH packets are heard on this channel within 562 * PLCP_QUIET_THRESH packets are heard on this channel within
@@ -755,6 +730,17 @@ static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
755 return priv->hw->wiphy->bands[band]; 730 return priv->hw->wiphy->bands[band];
756} 731}
757 732
733static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
734{
735 return priv->cfg->bt_params &&
736 priv->cfg->bt_params->advanced_bt_coexist;
737}
738
739static inline bool iwl_bt_statistics(struct iwl_priv *priv)
740{
741 return priv->cfg->bt_params && priv->cfg->bt_params->bt_statistics;
742}
743
758extern bool bt_coex_active; 744extern bool bt_coex_active;
759extern bool bt_siso_mode; 745extern bool bt_siso_mode;
760 746
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index b80bf7dff55b..f52bc040bcbf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -290,7 +290,7 @@
290 290
291 291
292/* HW REV */ 292/* HW REV */
293#define CSR_HW_REV_TYPE_MSK (0x00000F0) 293#define CSR_HW_REV_TYPE_MSK (0x00001F0)
294#define CSR_HW_REV_TYPE_3945 (0x00000D0) 294#define CSR_HW_REV_TYPE_3945 (0x00000D0)
295#define CSR_HW_REV_TYPE_4965 (0x0000000) 295#define CSR_HW_REV_TYPE_4965 (0x0000000)
296#define CSR_HW_REV_TYPE_5300 (0x0000020) 296#define CSR_HW_REV_TYPE_5300 (0x0000020)
@@ -300,9 +300,15 @@
300#define CSR_HW_REV_TYPE_1000 (0x0000060) 300#define CSR_HW_REV_TYPE_1000 (0x0000060)
301#define CSR_HW_REV_TYPE_6x00 (0x0000070) 301#define CSR_HW_REV_TYPE_6x00 (0x0000070)
302#define CSR_HW_REV_TYPE_6x50 (0x0000080) 302#define CSR_HW_REV_TYPE_6x50 (0x0000080)
303#define CSR_HW_REV_TYPE_6x50g2 (0x0000084) 303#define CSR_HW_REV_TYPE_6150 (0x0000084)
304#define CSR_HW_REV_TYPE_6x00g2 (0x00000B0) 304#define CSR_HW_REV_TYPE_6x05 (0x00000B0)
305#define CSR_HW_REV_TYPE_NONE (0x00000F0) 305#define CSR_HW_REV_TYPE_6x30 CSR_HW_REV_TYPE_6x05
306#define CSR_HW_REV_TYPE_6x35 CSR_HW_REV_TYPE_6x05
307#define CSR_HW_REV_TYPE_2x30 (0x00000C0)
308#define CSR_HW_REV_TYPE_2x00 (0x0000100)
309#define CSR_HW_REV_TYPE_200 (0x0000110)
310#define CSR_HW_REV_TYPE_230 (0x0000120)
311#define CSR_HW_REV_TYPE_NONE (0x00001F0)
306 312
307/* EEPROM REG */ 313/* EEPROM REG */
308#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) 314#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
@@ -376,6 +382,8 @@
376#define CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6 (0x00000004) 382#define CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6 (0x00000004)
377#define CSR_GP_DRIVER_REG_BIT_6050_1x2 (0x00000008) 383#define CSR_GP_DRIVER_REG_BIT_6050_1x2 (0x00000008)
378 384
385#define CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER (0x00000080)
386
379/* GIO Chicken Bits (PCI Express bus link power management) */ 387/* GIO Chicken Bits (PCI Express bus link power management) */
380#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000) 388#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000)
381#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000) 389#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 6fe80b5e7a15..8842411f1cf3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -207,18 +207,19 @@ static ssize_t iwl_dbgfs_rx_statistics_read(struct file *file,
207 return ret; 207 return ret;
208} 208}
209 209
210#define BYTE1_MASK 0x000000ff;
211#define BYTE2_MASK 0x0000ffff;
212#define BYTE3_MASK 0x00ffffff;
213static ssize_t iwl_dbgfs_sram_read(struct file *file, 210static ssize_t iwl_dbgfs_sram_read(struct file *file,
214 char __user *user_buf, 211 char __user *user_buf,
215 size_t count, loff_t *ppos) 212 size_t count, loff_t *ppos)
216{ 213{
217 u32 val; 214 u32 val = 0;
218 char *buf; 215 char *buf;
219 ssize_t ret; 216 ssize_t ret;
220 int i; 217 int i = 0;
218 bool device_format = false;
219 int offset = 0;
220 int len = 0;
221 int pos = 0; 221 int pos = 0;
222 int sram;
222 struct iwl_priv *priv = file->private_data; 223 struct iwl_priv *priv = file->private_data;
223 size_t bufsz; 224 size_t bufsz;
224 225
@@ -230,35 +231,62 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
230 else 231 else
231 priv->dbgfs_sram_len = priv->ucode_data.len; 232 priv->dbgfs_sram_len = priv->ucode_data.len;
232 } 233 }
233 bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10; 234 len = priv->dbgfs_sram_len;
235
236 if (len == -4) {
237 device_format = true;
238 len = 4;
239 }
240
241 bufsz = 50 + len * 4;
234 buf = kmalloc(bufsz, GFP_KERNEL); 242 buf = kmalloc(bufsz, GFP_KERNEL);
235 if (!buf) 243 if (!buf)
236 return -ENOMEM; 244 return -ENOMEM;
245
237 pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n", 246 pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
238 priv->dbgfs_sram_len); 247 len);
239 pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n", 248 pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
240 priv->dbgfs_sram_offset); 249 priv->dbgfs_sram_offset);
241 for (i = priv->dbgfs_sram_len; i > 0; i -= 4) { 250
242 val = iwl_read_targ_mem(priv, priv->dbgfs_sram_offset + \ 251 /* adjust sram address since reads are only on even u32 boundaries */
243 priv->dbgfs_sram_len - i); 252 offset = priv->dbgfs_sram_offset & 0x3;
244 if (i < 4) { 253 sram = priv->dbgfs_sram_offset & ~0x3;
245 switch (i) { 254
246 case 1: 255 /* read the first u32 from sram */
247 val &= BYTE1_MASK; 256 val = iwl_read_targ_mem(priv, sram);
248 break; 257
249 case 2: 258 for (; len; len--) {
250 val &= BYTE2_MASK; 259 /* put the address at the start of every line */
251 break; 260 if (i == 0)
252 case 3: 261 pos += scnprintf(buf + pos, bufsz - pos,
253 val &= BYTE3_MASK; 262 "%08X: ", sram + offset);
254 break; 263
255 } 264 if (device_format)
265 pos += scnprintf(buf + pos, bufsz - pos,
266 "%02x", (val >> (8 * (3 - offset))) & 0xff);
267 else
268 pos += scnprintf(buf + pos, bufsz - pos,
269 "%02x ", (val >> (8 * offset)) & 0xff);
270
271 /* if all bytes processed, read the next u32 from sram */
272 if (++offset == 4) {
273 sram += 4;
274 offset = 0;
275 val = iwl_read_targ_mem(priv, sram);
256 } 276 }
257 if (!(i % 16)) 277
278 /* put in extra spaces and split lines for human readability */
279 if (++i == 16) {
280 i = 0;
258 pos += scnprintf(buf + pos, bufsz - pos, "\n"); 281 pos += scnprintf(buf + pos, bufsz - pos, "\n");
259 pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val); 282 } else if (!(i & 7)) {
283 pos += scnprintf(buf + pos, bufsz - pos, " ");
284 } else if (!(i & 3)) {
285 pos += scnprintf(buf + pos, bufsz - pos, " ");
286 }
260 } 287 }
261 pos += scnprintf(buf + pos, bufsz - pos, "\n"); 288 if (i)
289 pos += scnprintf(buf + pos, bufsz - pos, "\n");
262 290
263 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 291 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
264 kfree(buf); 292 kfree(buf);
@@ -282,6 +310,9 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
282 if (sscanf(buf, "%x,%x", &offset, &len) == 2) { 310 if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
283 priv->dbgfs_sram_offset = offset; 311 priv->dbgfs_sram_offset = offset;
284 priv->dbgfs_sram_len = len; 312 priv->dbgfs_sram_len = len;
313 } else if (sscanf(buf, "%x", &offset) == 1) {
314 priv->dbgfs_sram_offset = offset;
315 priv->dbgfs_sram_len = -4;
285 } else { 316 } else {
286 priv->dbgfs_sram_offset = 0; 317 priv->dbgfs_sram_offset = 0;
287 priv->dbgfs_sram_len = 0; 318 priv->dbgfs_sram_len = 0;
@@ -668,29 +699,6 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
668 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 699 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
669} 700}
670 701
671static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
672 size_t count, loff_t *ppos)
673{
674 struct iwl_priv *priv = file->private_data;
675 int pos = 0;
676 char buf[256];
677 const size_t bufsz = sizeof(buf);
678
679 pos += scnprintf(buf + pos, bufsz - pos,
680 "allow blinking: %s\n",
681 (priv->allow_blinking) ? "True" : "False");
682 if (priv->allow_blinking) {
683 pos += scnprintf(buf + pos, bufsz - pos,
684 "Led blinking rate: %u\n",
685 priv->last_blink_rate);
686 pos += scnprintf(buf + pos, bufsz - pos,
687 "Last blink time: %lu\n",
688 priv->last_blink_time);
689 }
690
691 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
692}
693
694static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file, 702static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
695 char __user *user_buf, 703 char __user *user_buf,
696 size_t count, loff_t *ppos) 704 size_t count, loff_t *ppos)
@@ -856,7 +864,6 @@ DEBUGFS_READ_FILE_OPS(channels);
856DEBUGFS_READ_FILE_OPS(status); 864DEBUGFS_READ_FILE_OPS(status);
857DEBUGFS_READ_WRITE_FILE_OPS(interrupt); 865DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
858DEBUGFS_READ_FILE_OPS(qos); 866DEBUGFS_READ_FILE_OPS(qos);
859DEBUGFS_READ_FILE_OPS(led);
860DEBUGFS_READ_FILE_OPS(thermal_throttling); 867DEBUGFS_READ_FILE_OPS(thermal_throttling);
861DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40); 868DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
862DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override); 869DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
@@ -1580,10 +1587,9 @@ static ssize_t iwl_dbgfs_bt_traffic_read(struct file *file,
1580 "last traffic notif: %d\n", 1587 "last traffic notif: %d\n",
1581 priv->bt_status ? "On" : "Off", priv->last_bt_traffic_load); 1588 priv->bt_status ? "On" : "Off", priv->last_bt_traffic_load);
1582 pos += scnprintf(buf + pos, bufsz - pos, "ch_announcement: %d, " 1589 pos += scnprintf(buf + pos, bufsz - pos, "ch_announcement: %d, "
1583 "sco_active: %d, kill_ack_mask: %x, " 1590 "kill_ack_mask: %x, kill_cts_mask: %x\n",
1584 "kill_cts_mask: %x\n", 1591 priv->bt_ch_announce, priv->kill_ack_mask,
1585 priv->bt_ch_announce, priv->bt_sco_active, 1592 priv->kill_cts_mask);
1586 priv->kill_ack_mask, priv->kill_cts_mask);
1587 1593
1588 pos += scnprintf(buf + pos, bufsz - pos, "bluetooth traffic load: "); 1594 pos += scnprintf(buf + pos, bufsz - pos, "bluetooth traffic load: ");
1589 switch (priv->bt_traffic_load) { 1595 switch (priv->bt_traffic_load) {
@@ -1725,7 +1731,6 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
1725 DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR); 1731 DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
1726 DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR); 1732 DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
1727 DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR); 1733 DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
1728 DEBUGFS_ADD_FILE(led, dir_data, S_IRUSR);
1729 if (!priv->cfg->base_params->broken_powersave) { 1734 if (!priv->cfg->base_params->broken_powersave) {
1730 DEBUGFS_ADD_FILE(sleep_level_override, dir_data, 1735 DEBUGFS_ADD_FILE(sleep_level_override, dir_data,
1731 S_IWUSR | S_IRUSR); 1736 S_IWUSR | S_IRUSR);
@@ -1759,13 +1764,13 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
1759 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR); 1764 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
1760 if (priv->cfg->base_params->ucode_tracing) 1765 if (priv->cfg->base_params->ucode_tracing)
1761 DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR); 1766 DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
1762 if (priv->cfg->bt_params && priv->cfg->bt_params->bt_statistics) 1767 if (iwl_bt_statistics(priv))
1763 DEBUGFS_ADD_FILE(ucode_bt_stats, dir_debug, S_IRUSR); 1768 DEBUGFS_ADD_FILE(ucode_bt_stats, dir_debug, S_IRUSR);
1764 DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, S_IRUSR); 1769 DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, S_IRUSR);
1765 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR); 1770 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
1766 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR); 1771 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
1767 DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR); 1772 DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
1768 if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist) 1773 if (iwl_advanced_bt_coexist(priv))
1769 DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR); 1774 DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
1770 if (priv->cfg->base_params->sensitivity_calib_by_driver) 1775 if (priv->cfg->base_params->sensitivity_calib_by_driver)
1771 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf, 1776 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
@@ -1783,7 +1788,6 @@ err:
1783 iwl_dbgfs_unregister(priv); 1788 iwl_dbgfs_unregister(priv);
1784 return -ENOMEM; 1789 return -ENOMEM;
1785} 1790}
1786EXPORT_SYMBOL(iwl_dbgfs_register);
1787 1791
1788/** 1792/**
1789 * Remove the debugfs files and directories 1793 * Remove the debugfs files and directories
@@ -1797,7 +1801,6 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
1797 debugfs_remove_recursive(priv->debugfs_dir); 1801 debugfs_remove_recursive(priv->debugfs_dir);
1798 priv->debugfs_dir = NULL; 1802 priv->debugfs_dir = NULL;
1799} 1803}
1800EXPORT_SYMBOL(iwl_dbgfs_unregister);
1801 1804
1802 1805
1803 1806
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 8dda67850af4..68b953f2bdc7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -34,6 +34,8 @@
34 34
35#include <linux/pci.h> /* for struct pci_device_id */ 35#include <linux/pci.h> /* for struct pci_device_id */
36#include <linux/kernel.h> 36#include <linux/kernel.h>
37#include <linux/wait.h>
38#include <linux/leds.h>
37#include <net/ieee80211_radiotap.h> 39#include <net/ieee80211_radiotap.h>
38 40
39#include "iwl-eeprom.h" 41#include "iwl-eeprom.h"
@@ -41,14 +43,14 @@
41#include "iwl-prph.h" 43#include "iwl-prph.h"
42#include "iwl-fh.h" 44#include "iwl-fh.h"
43#include "iwl-debug.h" 45#include "iwl-debug.h"
44#include "iwl-4965-hw.h"
45#include "iwl-3945-hw.h"
46#include "iwl-agn-hw.h" 46#include "iwl-agn-hw.h"
47#include "iwl-led.h" 47#include "iwl-led.h"
48#include "iwl-power.h" 48#include "iwl-power.h"
49#include "iwl-agn-rs.h" 49#include "iwl-agn-rs.h"
50#include "iwl-agn-tt.h" 50#include "iwl-agn-tt.h"
51 51
52#define U32_PAD(n) ((4-(n))&0x3)
53
52struct iwl_tx_queue; 54struct iwl_tx_queue;
53 55
54/* CT-KILL constants */ 56/* CT-KILL constants */
@@ -136,7 +138,7 @@ struct iwl_queue {
136 * space more than this */ 138 * space more than this */
137 int high_mark; /* high watermark, stop queue if free 139 int high_mark; /* high watermark, stop queue if free
138 * space less than this */ 140 * space less than this */
139} __packed; 141};
140 142
141/* One for each TFD */ 143/* One for each TFD */
142struct iwl_tx_info { 144struct iwl_tx_info {
@@ -507,6 +509,7 @@ struct iwl_station_priv {
507 atomic_t pending_frames; 509 atomic_t pending_frames;
508 bool client; 510 bool client;
509 bool asleep; 511 bool asleep;
512 u8 max_agg_bufsize;
510}; 513};
511 514
512/** 515/**
@@ -995,7 +998,6 @@ struct reply_agg_tx_error_statistics {
995 u32 unknown; 998 u32 unknown;
996}; 999};
997 1000
998#ifdef CONFIG_IWLWIFI_DEBUGFS
999/* management statistics */ 1001/* management statistics */
1000enum iwl_mgmt_stats { 1002enum iwl_mgmt_stats {
1001 MANAGEMENT_ASSOC_REQ = 0, 1003 MANAGEMENT_ASSOC_REQ = 0,
@@ -1026,16 +1028,13 @@ enum iwl_ctrl_stats {
1026}; 1028};
1027 1029
1028struct traffic_stats { 1030struct traffic_stats {
1031#ifdef CONFIG_IWLWIFI_DEBUGFS
1029 u32 mgmt[MANAGEMENT_MAX]; 1032 u32 mgmt[MANAGEMENT_MAX];
1030 u32 ctrl[CONTROL_MAX]; 1033 u32 ctrl[CONTROL_MAX];
1031 u32 data_cnt; 1034 u32 data_cnt;
1032 u64 data_bytes; 1035 u64 data_bytes;
1033};
1034#else
1035struct traffic_stats {
1036 u64 data_bytes;
1037};
1038#endif 1036#endif
1037};
1039 1038
1040/* 1039/*
1041 * iwl_switch_rxon: "channel switch" structure 1040 * iwl_switch_rxon: "channel switch" structure
@@ -1111,6 +1110,11 @@ struct iwl_event_log {
1111/* BT Antenna Coupling Threshold (dB) */ 1110/* BT Antenna Coupling Threshold (dB) */
1112#define IWL_BT_ANTENNA_COUPLING_THRESHOLD (35) 1111#define IWL_BT_ANTENNA_COUPLING_THRESHOLD (35)
1113 1112
1113/* Firmware reload counter and Timestamp */
1114#define IWL_MIN_RELOAD_DURATION 1000 /* 1000 ms */
1115#define IWL_MAX_CONTINUE_RELOAD_CNT 4
1116
1117
1114enum iwl_reset { 1118enum iwl_reset {
1115 IWL_RF_RESET = 0, 1119 IWL_RF_RESET = 0,
1116 IWL_FW_RESET, 1120 IWL_FW_RESET,
@@ -1139,6 +1143,33 @@ struct iwl_force_reset {
1139 */ 1143 */
1140#define IWLAGN_EXT_BEACON_TIME_POS 22 1144#define IWLAGN_EXT_BEACON_TIME_POS 22
1141 1145
1146/**
1147 * struct iwl_notification_wait - notification wait entry
1148 * @list: list head for global list
1149 * @fn: function called with the notification
1150 * @cmd: command ID
1151 *
1152 * This structure is not used directly, to wait for a
1153 * notification declare it on the stack, and call
1154 * iwlagn_init_notification_wait() with appropriate
1155 * parameters. Then do whatever will cause the ucode
1156 * to notify the driver, and to wait for that then
1157 * call iwlagn_wait_notification().
1158 *
1159 * Each notification is one-shot. If at some point we
1160 * need to support multi-shot notifications (which
1161 * can't be allocated on the stack) we need to modify
1162 * the code for them.
1163 */
1164struct iwl_notification_wait {
1165 struct list_head list;
1166
1167 void (*fn)(struct iwl_priv *priv, struct iwl_rx_packet *pkt);
1168
1169 u8 cmd;
1170 bool triggered;
1171};
1172
1142enum iwl_rxon_context_id { 1173enum iwl_rxon_context_id {
1143 IWL_RXON_CTX_BSS, 1174 IWL_RXON_CTX_BSS,
1144 IWL_RXON_CTX_PAN, 1175 IWL_RXON_CTX_PAN,
@@ -1199,6 +1230,12 @@ struct iwl_rxon_context {
1199 } ht; 1230 } ht;
1200}; 1231};
1201 1232
1233enum iwl_scan_type {
1234 IWL_SCAN_NORMAL,
1235 IWL_SCAN_RADIO_RESET,
1236 IWL_SCAN_OFFCH_TX,
1237};
1238
1202struct iwl_priv { 1239struct iwl_priv {
1203 1240
1204 /* ieee device used by generic ieee processing code */ 1241 /* ieee device used by generic ieee processing code */
@@ -1230,12 +1267,16 @@ struct iwl_priv {
1230 /* track IBSS manager (last beacon) status */ 1267 /* track IBSS manager (last beacon) status */
1231 u32 ibss_manager; 1268 u32 ibss_manager;
1232 1269
1233 /* storing the jiffies when the plcp error rate is received */ 1270 /* jiffies when last recovery from statistics was performed */
1234 unsigned long plcp_jiffies; 1271 unsigned long rx_statistics_jiffies;
1235 1272
1236 /* force reset */ 1273 /* force reset */
1237 struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET]; 1274 struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];
1238 1275
1276 /* firmware reload counter and timestamp */
1277 unsigned long reload_jiffies;
1278 int reload_count;
1279
1239 /* we allocate array of iwl_channel_info for NIC's valid channels. 1280 /* we allocate array of iwl_channel_info for NIC's valid channels.
1240 * Access via channel # using indirect index array */ 1281 * Access via channel # using indirect index array */
1241 struct iwl_channel_info *channel_info; /* channel info array */ 1282 struct iwl_channel_info *channel_info; /* channel info array */
@@ -1255,7 +1296,7 @@ struct iwl_priv {
1255 enum ieee80211_band scan_band; 1296 enum ieee80211_band scan_band;
1256 struct cfg80211_scan_request *scan_request; 1297 struct cfg80211_scan_request *scan_request;
1257 struct ieee80211_vif *scan_vif; 1298 struct ieee80211_vif *scan_vif;
1258 bool is_internal_short_scan; 1299 enum iwl_scan_type scan_type;
1259 u8 scan_tx_ant[IEEE80211_NUM_BANDS]; 1300 u8 scan_tx_ant[IEEE80211_NUM_BANDS];
1260 u8 mgmt_tx_ant; 1301 u8 mgmt_tx_ant;
1261 1302
@@ -1310,11 +1351,6 @@ struct iwl_priv {
1310 struct iwl_init_alive_resp card_alive_init; 1351 struct iwl_init_alive_resp card_alive_init;
1311 struct iwl_alive_resp card_alive; 1352 struct iwl_alive_resp card_alive;
1312 1353
1313 unsigned long last_blink_time;
1314 u8 last_blink_rate;
1315 u8 allow_blinking;
1316 u64 led_tpt;
1317
1318 u16 active_rate; 1354 u16 active_rate;
1319 1355
1320 u8 start_calib; 1356 u8 start_calib;
@@ -1463,6 +1499,21 @@ struct iwl_priv {
1463 struct iwl_bt_notif_statistics delta_statistics_bt; 1499 struct iwl_bt_notif_statistics delta_statistics_bt;
1464 struct iwl_bt_notif_statistics max_delta_bt; 1500 struct iwl_bt_notif_statistics max_delta_bt;
1465#endif 1501#endif
1502
1503 /* notification wait support */
1504 struct list_head notif_waits;
1505 spinlock_t notif_wait_lock;
1506 wait_queue_head_t notif_waitq;
1507
1508 /* remain-on-channel offload support */
1509 struct ieee80211_channel *hw_roc_channel;
1510 struct delayed_work hw_roc_work;
1511 enum nl80211_channel_type hw_roc_chantype;
1512 int hw_roc_duration;
1513
1514 struct sk_buff *offchan_tx_skb;
1515 int offchan_tx_timeout;
1516 struct ieee80211_channel *offchan_tx_chan;
1466 } _agn; 1517 } _agn;
1467#endif 1518#endif
1468 }; 1519 };
@@ -1472,7 +1523,6 @@ struct iwl_priv {
1472 u8 bt_status; 1523 u8 bt_status;
1473 u8 bt_traffic_load, last_bt_traffic_load; 1524 u8 bt_traffic_load, last_bt_traffic_load;
1474 bool bt_ch_announce; 1525 bool bt_ch_announce;
1475 bool bt_sco_active;
1476 bool bt_full_concurrent; 1526 bool bt_full_concurrent;
1477 bool bt_ant_couple_ok; 1527 bool bt_ant_couple_ok;
1478 __le32 kill_ack_mask; 1528 __le32 kill_ack_mask;
@@ -1547,6 +1597,10 @@ struct iwl_priv {
1547 bool hw_ready; 1597 bool hw_ready;
1548 1598
1549 struct iwl_event_log event_log; 1599 struct iwl_event_log event_log;
1600
1601 struct led_classdev led;
1602 unsigned long blink_on, blink_off;
1603 bool led_registered;
1550}; /*iwl_priv */ 1604}; /*iwl_priv */
1551 1605
1552static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id) 1606static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 358cfd7e5af1..833194a2c639 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -222,7 +222,6 @@ const u8 *iwlcore_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
222 BUG_ON(offset >= priv->cfg->base_params->eeprom_size); 222 BUG_ON(offset >= priv->cfg->base_params->eeprom_size);
223 return &priv->eeprom[offset]; 223 return &priv->eeprom[offset];
224} 224}
225EXPORT_SYMBOL(iwlcore_eeprom_query_addr);
226 225
227static int iwl_init_otp_access(struct iwl_priv *priv) 226static int iwl_init_otp_access(struct iwl_priv *priv)
228{ 227{
@@ -382,7 +381,6 @@ const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
382{ 381{
383 return priv->cfg->ops->lib->eeprom_ops.query_addr(priv, offset); 382 return priv->cfg->ops->lib->eeprom_ops.query_addr(priv, offset);
384} 383}
385EXPORT_SYMBOL(iwl_eeprom_query_addr);
386 384
387u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset) 385u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
388{ 386{
@@ -390,7 +388,6 @@ u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
390 return 0; 388 return 0;
391 return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8); 389 return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
392} 390}
393EXPORT_SYMBOL(iwl_eeprom_query16);
394 391
395/** 392/**
396 * iwl_eeprom_init - read EEPROM contents 393 * iwl_eeprom_init - read EEPROM contents
@@ -509,14 +506,12 @@ err:
509alloc_err: 506alloc_err:
510 return ret; 507 return ret;
511} 508}
512EXPORT_SYMBOL(iwl_eeprom_init);
513 509
514void iwl_eeprom_free(struct iwl_priv *priv) 510void iwl_eeprom_free(struct iwl_priv *priv)
515{ 511{
516 kfree(priv->eeprom); 512 kfree(priv->eeprom);
517 priv->eeprom = NULL; 513 priv->eeprom = NULL;
518} 514}
519EXPORT_SYMBOL(iwl_eeprom_free);
520 515
521static void iwl_init_band_reference(const struct iwl_priv *priv, 516static void iwl_init_band_reference(const struct iwl_priv *priv,
522 int eep_band, int *eeprom_ch_count, 517 int eep_band, int *eeprom_ch_count,
@@ -779,7 +774,6 @@ int iwl_init_channel_map(struct iwl_priv *priv)
779 774
780 return 0; 775 return 0;
781} 776}
782EXPORT_SYMBOL(iwl_init_channel_map);
783 777
784/* 778/*
785 * iwl_free_channel_map - undo allocations in iwl_init_channel_map 779 * iwl_free_channel_map - undo allocations in iwl_init_channel_map
@@ -789,7 +783,6 @@ void iwl_free_channel_map(struct iwl_priv *priv)
789 kfree(priv->channel_info); 783 kfree(priv->channel_info);
790 priv->channel_count = 0; 784 priv->channel_count = 0;
791} 785}
792EXPORT_SYMBOL(iwl_free_channel_map);
793 786
794/** 787/**
795 * iwl_get_channel_info - Find driver's private channel info 788 * iwl_get_channel_info - Find driver's private channel info
@@ -818,4 +811,3 @@ const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv,
818 811
819 return NULL; 812 return NULL;
820} 813}
821EXPORT_SYMBOL(iwl_get_channel_info);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 9e6f31355eee..98aa8af01192 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -247,13 +247,26 @@ struct iwl_eeprom_enhanced_txpwr {
247#define EEPROM_6050_TX_POWER_VERSION (4) 247#define EEPROM_6050_TX_POWER_VERSION (4)
248#define EEPROM_6050_EEPROM_VERSION (0x532) 248#define EEPROM_6050_EEPROM_VERSION (0x532)
249 249
250/* 6x50g2 Specific */ 250/* 6150 Specific */
251#define EEPROM_6050G2_TX_POWER_VERSION (6) 251#define EEPROM_6150_TX_POWER_VERSION (6)
252#define EEPROM_6050G2_EEPROM_VERSION (0x553) 252#define EEPROM_6150_EEPROM_VERSION (0x553)
253
254/* 6x05 Specific */
255#define EEPROM_6005_TX_POWER_VERSION (6)
256#define EEPROM_6005_EEPROM_VERSION (0x709)
257
258/* 6x30 Specific */
259#define EEPROM_6030_TX_POWER_VERSION (6)
260#define EEPROM_6030_EEPROM_VERSION (0x709)
261
262/* 2x00 Specific */
263#define EEPROM_2000_TX_POWER_VERSION (6)
264#define EEPROM_2000_EEPROM_VERSION (0x805)
265
266/* 6x35 Specific */
267#define EEPROM_6035_TX_POWER_VERSION (6)
268#define EEPROM_6035_EEPROM_VERSION (0x753)
253 269
254/* 6x00g2 Specific */
255#define EEPROM_6000G2_TX_POWER_VERSION (6)
256#define EEPROM_6000G2_EEPROM_VERSION (0x709)
257 270
258/* OTP */ 271/* OTP */
259/* lower blocks contain EEPROM image and calibration data */ 272/* lower blocks contain EEPROM image and calibration data */
@@ -264,6 +277,7 @@ struct iwl_eeprom_enhanced_txpwr {
264#define OTP_MAX_LL_ITEMS_1000 (3) /* OTP blocks for 1000 */ 277#define OTP_MAX_LL_ITEMS_1000 (3) /* OTP blocks for 1000 */
265#define OTP_MAX_LL_ITEMS_6x00 (4) /* OTP blocks for 6x00 */ 278#define OTP_MAX_LL_ITEMS_6x00 (4) /* OTP blocks for 6x00 */
266#define OTP_MAX_LL_ITEMS_6x50 (7) /* OTP blocks for 6x50 */ 279#define OTP_MAX_LL_ITEMS_6x50 (7) /* OTP blocks for 6x50 */
280#define OTP_MAX_LL_ITEMS_2x00 (4) /* OTP blocks for 2x00 */
267 281
268/* 2.4 GHz */ 282/* 2.4 GHz */
269extern const u8 iwl_eeprom_band_1[14]; 283extern const u8 iwl_eeprom_band_1[14];
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index c373b53babea..02499f684683 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -108,12 +108,12 @@ const char *get_cmd_string(u8 cmd)
108 IWL_CMD(REPLY_WIPAN_WEPKEY); 108 IWL_CMD(REPLY_WIPAN_WEPKEY);
109 IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH); 109 IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH);
110 IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION); 110 IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION);
111 IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE);
111 default: 112 default:
112 return "UNKNOWN"; 113 return "UNKNOWN";
113 114
114 } 115 }
115} 116}
116EXPORT_SYMBOL(get_cmd_string);
117 117
118#define HOST_COMPLETE_TIMEOUT (HZ / 2) 118#define HOST_COMPLETE_TIMEOUT (HZ / 2)
119 119
@@ -252,7 +252,6 @@ out:
252 mutex_unlock(&priv->sync_cmd_mutex); 252 mutex_unlock(&priv->sync_cmd_mutex);
253 return ret; 253 return ret;
254} 254}
255EXPORT_SYMBOL(iwl_send_cmd_sync);
256 255
257int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) 256int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
258{ 257{
@@ -261,7 +260,6 @@ int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
261 260
262 return iwl_send_cmd_sync(priv, cmd); 261 return iwl_send_cmd_sync(priv, cmd);
263} 262}
264EXPORT_SYMBOL(iwl_send_cmd);
265 263
266int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data) 264int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
267{ 265{
@@ -273,7 +271,6 @@ int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
273 271
274 return iwl_send_cmd_sync(priv, &cmd); 272 return iwl_send_cmd_sync(priv, &cmd);
275} 273}
276EXPORT_SYMBOL(iwl_send_cmd_pdu);
277 274
278int iwl_send_cmd_pdu_async(struct iwl_priv *priv, 275int iwl_send_cmd_pdu_async(struct iwl_priv *priv,
279 u8 id, u16 len, const void *data, 276 u8 id, u16 len, const void *data,
@@ -292,4 +289,3 @@ int iwl_send_cmd_pdu_async(struct iwl_priv *priv,
292 289
293 return iwl_send_cmd_async(priv, &cmd); 290 return iwl_send_cmd_async(priv, &cmd);
294} 291}
295EXPORT_SYMBOL(iwl_send_cmd_pdu_async);
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 46ccdf406e8e..d7f2a0bb32c9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -48,31 +48,19 @@ module_param(led_mode, int, S_IRUGO);
48MODULE_PARM_DESC(led_mode, "0=system default, " 48MODULE_PARM_DESC(led_mode, "0=system default, "
49 "1=On(RF On)/Off(RF Off), 2=blinking"); 49 "1=On(RF On)/Off(RF Off), 2=blinking");
50 50
51static const struct { 51static const struct ieee80211_tpt_blink iwl_blink[] = {
52 u16 tpt; /* Mb/s */ 52 { .throughput = 0 * 1024 - 1, .blink_time = 334 },
53 u8 on_time; 53 { .throughput = 1 * 1024 - 1, .blink_time = 260 },
54 u8 off_time; 54 { .throughput = 5 * 1024 - 1, .blink_time = 220 },
55} blink_tbl[] = 55 { .throughput = 10 * 1024 - 1, .blink_time = 190 },
56{ 56 { .throughput = 20 * 1024 - 1, .blink_time = 170 },
57 {300, 25, 25}, 57 { .throughput = 50 * 1024 - 1, .blink_time = 150 },
58 {200, 40, 40}, 58 { .throughput = 70 * 1024 - 1, .blink_time = 130 },
59 {100, 55, 55}, 59 { .throughput = 100 * 1024 - 1, .blink_time = 110 },
60 {70, 65, 65}, 60 { .throughput = 200 * 1024 - 1, .blink_time = 80 },
61 {50, 75, 75}, 61 { .throughput = 300 * 1024 - 1, .blink_time = 50 },
62 {20, 85, 85},
63 {10, 95, 95},
64 {5, 110, 110},
65 {1, 130, 130},
66 {0, 167, 167},
67 /* SOLID_ON */
68 {-1, IWL_LED_SOLID, 0}
69}; 62};
70 63
71#define IWL_1MB_RATE (128 * 1024)
72#define IWL_LED_THRESHOLD (16)
73#define IWL_MAX_BLINK_TBL (ARRAY_SIZE(blink_tbl) - 1) /* exclude SOLID_ON */
74#define IWL_SOLID_BLINK_IDX (ARRAY_SIZE(blink_tbl) - 1)
75
76/* 64/*
77 * Adjust led blink rate to compensate on a MAC Clock difference on every HW 65 * Adjust led blink rate to compensate on a MAC Clock difference on every HW
78 * Led blink rate analysis showed an average deviation of 0% on 3945, 66 * Led blink rate analysis showed an average deviation of 0% on 3945,
@@ -97,133 +85,102 @@ static inline u8 iwl_blink_compensation(struct iwl_priv *priv,
97} 85}
98 86
99/* Set led pattern command */ 87/* Set led pattern command */
100static int iwl_led_pattern(struct iwl_priv *priv, unsigned int idx) 88static int iwl_led_cmd(struct iwl_priv *priv,
89 unsigned long on,
90 unsigned long off)
101{ 91{
102 struct iwl_led_cmd led_cmd = { 92 struct iwl_led_cmd led_cmd = {
103 .id = IWL_LED_LINK, 93 .id = IWL_LED_LINK,
104 .interval = IWL_DEF_LED_INTRVL 94 .interval = IWL_DEF_LED_INTRVL
105 }; 95 };
96 int ret;
97
98 if (!test_bit(STATUS_READY, &priv->status))
99 return -EBUSY;
106 100
107 BUG_ON(idx > IWL_MAX_BLINK_TBL); 101 if (priv->blink_on == on && priv->blink_off == off)
102 return 0;
108 103
109 IWL_DEBUG_LED(priv, "Led blink time compensation= %u\n", 104 IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
110 priv->cfg->base_params->led_compensation); 105 priv->cfg->base_params->led_compensation);
111 led_cmd.on = 106 led_cmd.on = iwl_blink_compensation(priv, on,
112 iwl_blink_compensation(priv, blink_tbl[idx].on_time,
113 priv->cfg->base_params->led_compensation); 107 priv->cfg->base_params->led_compensation);
114 led_cmd.off = 108 led_cmd.off = iwl_blink_compensation(priv, off,
115 iwl_blink_compensation(priv, blink_tbl[idx].off_time,
116 priv->cfg->base_params->led_compensation); 109 priv->cfg->base_params->led_compensation);
117 110
118 return priv->cfg->ops->led->cmd(priv, &led_cmd); 111 ret = priv->cfg->ops->led->cmd(priv, &led_cmd);
112 if (!ret) {
113 priv->blink_on = on;
114 priv->blink_off = off;
115 }
116 return ret;
119} 117}
120 118
121int iwl_led_start(struct iwl_priv *priv) 119static void iwl_led_brightness_set(struct led_classdev *led_cdev,
120 enum led_brightness brightness)
122{ 121{
123 return priv->cfg->ops->led->on(priv); 122 struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
124} 123 unsigned long on = 0;
125EXPORT_SYMBOL(iwl_led_start);
126 124
127int iwl_led_associate(struct iwl_priv *priv) 125 if (brightness > 0)
128{ 126 on = IWL_LED_SOLID;
129 IWL_DEBUG_LED(priv, "Associated\n");
130 if (priv->cfg->led_mode == IWL_LED_BLINK)
131 priv->allow_blinking = 1;
132 priv->last_blink_time = jiffies;
133 127
134 return 0; 128 iwl_led_cmd(priv, on, 0);
135} 129}
136EXPORT_SYMBOL(iwl_led_associate);
137 130
138int iwl_led_disassociate(struct iwl_priv *priv) 131static int iwl_led_blink_set(struct led_classdev *led_cdev,
132 unsigned long *delay_on,
133 unsigned long *delay_off)
139{ 134{
140 priv->allow_blinking = 0; 135 struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
141 136
142 return 0; 137 return iwl_led_cmd(priv, *delay_on, *delay_off);
143} 138}
144EXPORT_SYMBOL(iwl_led_disassociate);
145 139
146/* 140void iwl_leds_init(struct iwl_priv *priv)
147 * calculate blink rate according to last second Tx/Rx activities
148 */
149static int iwl_get_blink_rate(struct iwl_priv *priv)
150{
151 int i;
152 /* count both tx and rx traffic to be able to
153 * handle traffic in either direction
154 */
155 u64 current_tpt = priv->tx_stats.data_bytes +
156 priv->rx_stats.data_bytes;
157 s64 tpt = current_tpt - priv->led_tpt;
158
159 if (tpt < 0) /* wraparound */
160 tpt = -tpt;
161
162 IWL_DEBUG_LED(priv, "tpt %lld current_tpt %llu\n",
163 (long long)tpt,
164 (unsigned long long)current_tpt);
165 priv->led_tpt = current_tpt;
166
167 if (!priv->allow_blinking)
168 i = IWL_MAX_BLINK_TBL;
169 else
170 for (i = 0; i < IWL_MAX_BLINK_TBL; i++)
171 if (tpt > (blink_tbl[i].tpt * IWL_1MB_RATE))
172 break;
173
174 IWL_DEBUG_LED(priv, "LED BLINK IDX=%d\n", i);
175 return i;
176}
177
178/*
179 * this function called from handler. Since setting Led command can
180 * happen very frequent we postpone led command to be called from
181 * REPLY handler so we know ucode is up
182 */
183void iwl_leds_background(struct iwl_priv *priv)
184{ 141{
185 u8 blink_idx; 142 int mode = led_mode;
186 143 int ret;
187 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { 144
188 priv->last_blink_time = 0; 145 if (mode == IWL_LED_DEFAULT)
189 return; 146 mode = priv->cfg->led_mode;
190 } 147
191 if (iwl_is_rfkill(priv)) { 148 priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
192 priv->last_blink_time = 0; 149 wiphy_name(priv->hw->wiphy));
193 return; 150 priv->led.brightness_set = iwl_led_brightness_set;
151 priv->led.blink_set = iwl_led_blink_set;
152 priv->led.max_brightness = 1;
153
154 switch (mode) {
155 case IWL_LED_DEFAULT:
156 WARN_ON(1);
157 break;
158 case IWL_LED_BLINK:
159 priv->led.default_trigger =
160 ieee80211_create_tpt_led_trigger(priv->hw,
161 IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
162 iwl_blink, ARRAY_SIZE(iwl_blink));
163 break;
164 case IWL_LED_RF_STATE:
165 priv->led.default_trigger =
166 ieee80211_get_radio_led_name(priv->hw);
167 break;
194 } 168 }
195 169
196 if (!priv->allow_blinking) { 170 ret = led_classdev_register(&priv->pci_dev->dev, &priv->led);
197 priv->last_blink_time = 0; 171 if (ret) {
198 if (priv->last_blink_rate != IWL_SOLID_BLINK_IDX) { 172 kfree(priv->led.name);
199 priv->last_blink_rate = IWL_SOLID_BLINK_IDX;
200 iwl_led_pattern(priv, IWL_SOLID_BLINK_IDX);
201 }
202 return; 173 return;
203 } 174 }
204 if (!priv->last_blink_time ||
205 !time_after(jiffies, priv->last_blink_time +
206 msecs_to_jiffies(1000)))
207 return;
208
209 blink_idx = iwl_get_blink_rate(priv);
210
211 /* call only if blink rate change */
212 if (blink_idx != priv->last_blink_rate)
213 iwl_led_pattern(priv, blink_idx);
214 175
215 priv->last_blink_time = jiffies; 176 priv->led_registered = true;
216 priv->last_blink_rate = blink_idx;
217} 177}
218EXPORT_SYMBOL(iwl_leds_background);
219 178
220void iwl_leds_init(struct iwl_priv *priv) 179void iwl_leds_exit(struct iwl_priv *priv)
221{ 180{
222 priv->last_blink_rate = 0; 181 if (!priv->led_registered)
223 priv->last_blink_time = 0; 182 return;
224 priv->allow_blinking = 0; 183
225 if (led_mode != IWL_LED_DEFAULT && 184 led_classdev_unregister(&priv->led);
226 led_mode != priv->cfg->led_mode) 185 kfree(priv->led.name);
227 priv->cfg->led_mode = led_mode;
228} 186}
229EXPORT_SYMBOL(iwl_leds_init);
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h
index 9079b33486ef..101eef12b3bb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-led.h
@@ -31,23 +31,14 @@
31struct iwl_priv; 31struct iwl_priv;
32 32
33#define IWL_LED_SOLID 11 33#define IWL_LED_SOLID 11
34#define IWL_LED_NAME_LEN 31
35#define IWL_DEF_LED_INTRVL cpu_to_le32(1000) 34#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
36 35
37#define IWL_LED_ACTIVITY (0<<1) 36#define IWL_LED_ACTIVITY (0<<1)
38#define IWL_LED_LINK (1<<1) 37#define IWL_LED_LINK (1<<1)
39 38
40enum led_type {
41 IWL_LED_TRG_TX,
42 IWL_LED_TRG_RX,
43 IWL_LED_TRG_ASSOC,
44 IWL_LED_TRG_RADIO,
45 IWL_LED_TRG_MAX,
46};
47
48/* 39/*
49 * LED mode 40 * LED mode
50 * IWL_LED_DEFAULT: use system default 41 * IWL_LED_DEFAULT: use device default
51 * IWL_LED_RF_STATE: turn LED on/off based on RF state 42 * IWL_LED_RF_STATE: turn LED on/off based on RF state
52 * LED ON = RF ON 43 * LED ON = RF ON
53 * LED OFF = RF OFF 44 * LED OFF = RF OFF
@@ -60,9 +51,6 @@ enum iwl_led_mode {
60}; 51};
61 52
62void iwl_leds_init(struct iwl_priv *priv); 53void iwl_leds_init(struct iwl_priv *priv);
63void iwl_leds_background(struct iwl_priv *priv); 54void iwl_leds_exit(struct iwl_priv *priv);
64int iwl_led_start(struct iwl_priv *priv);
65int iwl_led_associate(struct iwl_priv *priv);
66int iwl_led_disassociate(struct iwl_priv *priv);
67 55
68#endif /* __iwl_leds_h__ */ 56#endif /* __iwl_leds_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-legacy.c b/drivers/net/wireless/iwlwifi/iwl-legacy.c
deleted file mode 100644
index bb1a742a98a0..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-legacy.c
+++ /dev/null
@@ -1,662 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <net/mac80211.h>
31
32#include "iwl-dev.h"
33#include "iwl-core.h"
34#include "iwl-helpers.h"
35#include "iwl-legacy.h"
36
37static void iwl_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
38{
39 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
40 return;
41
42 if (!ctx->is_active)
43 return;
44
45 ctx->qos_data.def_qos_parm.qos_flags = 0;
46
47 if (ctx->qos_data.qos_active)
48 ctx->qos_data.def_qos_parm.qos_flags |=
49 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
50
51 if (ctx->ht.enabled)
52 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
53
54 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
55 ctx->qos_data.qos_active,
56 ctx->qos_data.def_qos_parm.qos_flags);
57
58 iwl_send_cmd_pdu_async(priv, ctx->qos_cmd,
59 sizeof(struct iwl_qosparam_cmd),
60 &ctx->qos_data.def_qos_parm, NULL);
61}
62
63/**
64 * iwl_legacy_mac_config - mac80211 config callback
65 */
66int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
67{
68 struct iwl_priv *priv = hw->priv;
69 const struct iwl_channel_info *ch_info;
70 struct ieee80211_conf *conf = &hw->conf;
71 struct ieee80211_channel *channel = conf->channel;
72 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
73 struct iwl_rxon_context *ctx;
74 unsigned long flags = 0;
75 int ret = 0;
76 u16 ch;
77 int scan_active = 0;
78 bool ht_changed[NUM_IWL_RXON_CTX] = {};
79
80 if (WARN_ON(!priv->cfg->ops->legacy))
81 return -EOPNOTSUPP;
82
83 mutex_lock(&priv->mutex);
84
85 IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
86 channel->hw_value, changed);
87
88 if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
89 test_bit(STATUS_SCANNING, &priv->status))) {
90 scan_active = 1;
91 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
92 }
93
94 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
95 IEEE80211_CONF_CHANGE_CHANNEL)) {
96 /* mac80211 uses static for non-HT which is what we want */
97 priv->current_ht_config.smps = conf->smps_mode;
98
99 /*
100 * Recalculate chain counts.
101 *
102 * If monitor mode is enabled then mac80211 will
103 * set up the SM PS mode to OFF if an HT channel is
104 * configured.
105 */
106 if (priv->cfg->ops->hcmd->set_rxon_chain)
107 for_each_context(priv, ctx)
108 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
109 }
110
111 /* during scanning mac80211 will delay channel setting until
112 * scan finish with changed = 0
113 */
114 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
115 if (scan_active)
116 goto set_ch_out;
117
118 ch = channel->hw_value;
119 ch_info = iwl_get_channel_info(priv, channel->band, ch);
120 if (!is_channel_valid(ch_info)) {
121 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
122 ret = -EINVAL;
123 goto set_ch_out;
124 }
125
126 spin_lock_irqsave(&priv->lock, flags);
127
128 for_each_context(priv, ctx) {
129 /* Configure HT40 channels */
130 if (ctx->ht.enabled != conf_is_ht(conf)) {
131 ctx->ht.enabled = conf_is_ht(conf);
132 ht_changed[ctx->ctxid] = true;
133 }
134 if (ctx->ht.enabled) {
135 if (conf_is_ht40_minus(conf)) {
136 ctx->ht.extension_chan_offset =
137 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
138 ctx->ht.is_40mhz = true;
139 } else if (conf_is_ht40_plus(conf)) {
140 ctx->ht.extension_chan_offset =
141 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
142 ctx->ht.is_40mhz = true;
143 } else {
144 ctx->ht.extension_chan_offset =
145 IEEE80211_HT_PARAM_CHA_SEC_NONE;
146 ctx->ht.is_40mhz = false;
147 }
148 } else
149 ctx->ht.is_40mhz = false;
150
151 /*
152 * Default to no protection. Protection mode will
153 * later be set from BSS config in iwl_ht_conf
154 */
155 ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
156
157 /* if we are switching from ht to 2.4 clear flags
158 * from any ht related info since 2.4 does not
159 * support ht */
160 if ((le16_to_cpu(ctx->staging.channel) != ch))
161 ctx->staging.flags = 0;
162
163 iwl_set_rxon_channel(priv, channel, ctx);
164 iwl_set_rxon_ht(priv, ht_conf);
165
166 iwl_set_flags_for_band(priv, ctx, channel->band,
167 ctx->vif);
168 }
169
170 spin_unlock_irqrestore(&priv->lock, flags);
171
172 if (priv->cfg->ops->legacy->update_bcast_stations)
173 ret = priv->cfg->ops->legacy->update_bcast_stations(priv);
174
175 set_ch_out:
176 /* The list of supported rates and rate mask can be different
177 * for each band; since the band may have changed, reset
178 * the rate mask to what mac80211 lists */
179 iwl_set_rate(priv);
180 }
181
182 if (changed & (IEEE80211_CONF_CHANGE_PS |
183 IEEE80211_CONF_CHANGE_IDLE)) {
184 ret = iwl_power_update_mode(priv, false);
185 if (ret)
186 IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
187 }
188
189 if (changed & IEEE80211_CONF_CHANGE_POWER) {
190 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
191 priv->tx_power_user_lmt, conf->power_level);
192
193 iwl_set_tx_power(priv, conf->power_level, false);
194 }
195
196 if (!iwl_is_ready(priv)) {
197 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
198 goto out;
199 }
200
201 if (scan_active)
202 goto out;
203
204 for_each_context(priv, ctx) {
205 if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
206 iwlcore_commit_rxon(priv, ctx);
207 else
208 IWL_DEBUG_INFO(priv,
209 "Not re-sending same RXON configuration.\n");
210 if (ht_changed[ctx->ctxid])
211 iwl_update_qos(priv, ctx);
212 }
213
214out:
215 IWL_DEBUG_MAC80211(priv, "leave\n");
216 mutex_unlock(&priv->mutex);
217 return ret;
218}
219EXPORT_SYMBOL(iwl_legacy_mac_config);
220
221void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw)
222{
223 struct iwl_priv *priv = hw->priv;
224 unsigned long flags;
225 /* IBSS can only be the IWL_RXON_CTX_BSS context */
226 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
227
228 if (WARN_ON(!priv->cfg->ops->legacy))
229 return;
230
231 mutex_lock(&priv->mutex);
232 IWL_DEBUG_MAC80211(priv, "enter\n");
233
234 spin_lock_irqsave(&priv->lock, flags);
235 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
236 spin_unlock_irqrestore(&priv->lock, flags);
237
238 spin_lock_irqsave(&priv->lock, flags);
239
240 /* new association get rid of ibss beacon skb */
241 if (priv->beacon_skb)
242 dev_kfree_skb(priv->beacon_skb);
243
244 priv->beacon_skb = NULL;
245
246 priv->timestamp = 0;
247
248 spin_unlock_irqrestore(&priv->lock, flags);
249
250 iwl_scan_cancel_timeout(priv, 100);
251 if (!iwl_is_ready_rf(priv)) {
252 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
253 mutex_unlock(&priv->mutex);
254 return;
255 }
256
257 /* we are restarting association process
258 * clear RXON_FILTER_ASSOC_MSK bit
259 */
260 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
261 iwlcore_commit_rxon(priv, ctx);
262
263 iwl_set_rate(priv);
264
265 mutex_unlock(&priv->mutex);
266
267 IWL_DEBUG_MAC80211(priv, "leave\n");
268}
269EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf);
270
271static void iwl_ht_conf(struct iwl_priv *priv,
272 struct ieee80211_vif *vif)
273{
274 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
275 struct ieee80211_sta *sta;
276 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
277 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
278
279 IWL_DEBUG_ASSOC(priv, "enter:\n");
280
281 if (!ctx->ht.enabled)
282 return;
283
284 ctx->ht.protection =
285 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
286 ctx->ht.non_gf_sta_present =
287 !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
288
289 ht_conf->single_chain_sufficient = false;
290
291 switch (vif->type) {
292 case NL80211_IFTYPE_STATION:
293 rcu_read_lock();
294 sta = ieee80211_find_sta(vif, bss_conf->bssid);
295 if (sta) {
296 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
297 int maxstreams;
298
299 maxstreams = (ht_cap->mcs.tx_params &
300 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
301 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
302 maxstreams += 1;
303
304 if ((ht_cap->mcs.rx_mask[1] == 0) &&
305 (ht_cap->mcs.rx_mask[2] == 0))
306 ht_conf->single_chain_sufficient = true;
307 if (maxstreams <= 1)
308 ht_conf->single_chain_sufficient = true;
309 } else {
310 /*
311 * If at all, this can only happen through a race
312 * when the AP disconnects us while we're still
313 * setting up the connection, in that case mac80211
314 * will soon tell us about that.
315 */
316 ht_conf->single_chain_sufficient = true;
317 }
318 rcu_read_unlock();
319 break;
320 case NL80211_IFTYPE_ADHOC:
321 ht_conf->single_chain_sufficient = true;
322 break;
323 default:
324 break;
325 }
326
327 IWL_DEBUG_ASSOC(priv, "leave\n");
328}
329
330static inline void iwl_set_no_assoc(struct iwl_priv *priv,
331 struct ieee80211_vif *vif)
332{
333 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
334
335 iwl_led_disassociate(priv);
336 /*
337 * inform the ucode that there is no longer an
338 * association and that no more packets should be
339 * sent
340 */
341 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
342 ctx->staging.assoc_id = 0;
343 iwlcore_commit_rxon(priv, ctx);
344}
345
346static void iwlcore_beacon_update(struct ieee80211_hw *hw,
347 struct ieee80211_vif *vif)
348{
349 struct iwl_priv *priv = hw->priv;
350 unsigned long flags;
351 __le64 timestamp;
352 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
353
354 if (!skb)
355 return;
356
357 IWL_DEBUG_MAC80211(priv, "enter\n");
358
359 lockdep_assert_held(&priv->mutex);
360
361 if (!priv->beacon_ctx) {
362 IWL_ERR(priv, "update beacon but no beacon context!\n");
363 dev_kfree_skb(skb);
364 return;
365 }
366
367 spin_lock_irqsave(&priv->lock, flags);
368
369 if (priv->beacon_skb)
370 dev_kfree_skb(priv->beacon_skb);
371
372 priv->beacon_skb = skb;
373
374 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
375 priv->timestamp = le64_to_cpu(timestamp);
376
377 IWL_DEBUG_MAC80211(priv, "leave\n");
378 spin_unlock_irqrestore(&priv->lock, flags);
379
380 if (!iwl_is_ready_rf(priv)) {
381 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
382 return;
383 }
384
385 priv->cfg->ops->legacy->post_associate(priv);
386}
387
388void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
389 struct ieee80211_vif *vif,
390 struct ieee80211_bss_conf *bss_conf,
391 u32 changes)
392{
393 struct iwl_priv *priv = hw->priv;
394 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
395 int ret;
396
397 if (WARN_ON(!priv->cfg->ops->legacy))
398 return;
399
400 IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
401
402 if (!iwl_is_alive(priv))
403 return;
404
405 mutex_lock(&priv->mutex);
406
407 if (changes & BSS_CHANGED_QOS) {
408 unsigned long flags;
409
410 spin_lock_irqsave(&priv->lock, flags);
411 ctx->qos_data.qos_active = bss_conf->qos;
412 iwl_update_qos(priv, ctx);
413 spin_unlock_irqrestore(&priv->lock, flags);
414 }
415
416 if (changes & BSS_CHANGED_BEACON_ENABLED) {
417 /*
418 * the add_interface code must make sure we only ever
419 * have a single interface that could be beaconing at
420 * any time.
421 */
422 if (vif->bss_conf.enable_beacon)
423 priv->beacon_ctx = ctx;
424 else
425 priv->beacon_ctx = NULL;
426 }
427
428 if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_AP) {
429 dev_kfree_skb(priv->beacon_skb);
430 priv->beacon_skb = ieee80211_beacon_get(hw, vif);
431 }
432
433 if (changes & BSS_CHANGED_BEACON_INT && vif->type == NL80211_IFTYPE_AP)
434 iwl_send_rxon_timing(priv, ctx);
435
436 if (changes & BSS_CHANGED_BSSID) {
437 IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
438
439 /*
440 * If there is currently a HW scan going on in the
441 * background then we need to cancel it else the RXON
442 * below/in post_associate will fail.
443 */
444 if (iwl_scan_cancel_timeout(priv, 100)) {
445 IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
446 IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n");
447 mutex_unlock(&priv->mutex);
448 return;
449 }
450
451 /* mac80211 only sets assoc when in STATION mode */
452 if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
453 memcpy(ctx->staging.bssid_addr,
454 bss_conf->bssid, ETH_ALEN);
455
456 /* currently needed in a few places */
457 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
458 } else {
459 ctx->staging.filter_flags &=
460 ~RXON_FILTER_ASSOC_MSK;
461 }
462
463 }
464
465 /*
466 * This needs to be after setting the BSSID in case
467 * mac80211 decides to do both changes at once because
468 * it will invoke post_associate.
469 */
470 if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
471 iwlcore_beacon_update(hw, vif);
472
473 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
474 IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
475 bss_conf->use_short_preamble);
476 if (bss_conf->use_short_preamble)
477 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
478 else
479 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
480 }
481
482 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
483 IWL_DEBUG_MAC80211(priv, "ERP_CTS %d\n", bss_conf->use_cts_prot);
484 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
485 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
486 else
487 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
488 if (bss_conf->use_cts_prot)
489 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
490 else
491 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
492 }
493
494 if (changes & BSS_CHANGED_BASIC_RATES) {
495 /* XXX use this information
496 *
497 * To do that, remove code from iwl_set_rate() and put something
498 * like this here:
499 *
500 if (A-band)
501 ctx->staging.ofdm_basic_rates =
502 bss_conf->basic_rates;
503 else
504 ctx->staging.ofdm_basic_rates =
505 bss_conf->basic_rates >> 4;
506 ctx->staging.cck_basic_rates =
507 bss_conf->basic_rates & 0xF;
508 */
509 }
510
511 if (changes & BSS_CHANGED_HT) {
512 iwl_ht_conf(priv, vif);
513
514 if (priv->cfg->ops->hcmd->set_rxon_chain)
515 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
516 }
517
518 if (changes & BSS_CHANGED_ASSOC) {
519 IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
520 if (bss_conf->assoc) {
521 priv->timestamp = bss_conf->timestamp;
522
523 iwl_led_associate(priv);
524
525 if (!iwl_is_rfkill(priv))
526 priv->cfg->ops->legacy->post_associate(priv);
527 } else
528 iwl_set_no_assoc(priv, vif);
529 }
530
531 if (changes && iwl_is_associated_ctx(ctx) && bss_conf->aid) {
532 IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
533 changes);
534 ret = iwl_send_rxon_assoc(priv, ctx);
535 if (!ret) {
536 /* Sync active_rxon with latest change. */
537 memcpy((void *)&ctx->active,
538 &ctx->staging,
539 sizeof(struct iwl_rxon_cmd));
540 }
541 }
542
543 if (changes & BSS_CHANGED_BEACON_ENABLED) {
544 if (vif->bss_conf.enable_beacon) {
545 memcpy(ctx->staging.bssid_addr,
546 bss_conf->bssid, ETH_ALEN);
547 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
548 iwl_led_associate(priv);
549 priv->cfg->ops->legacy->config_ap(priv);
550 } else
551 iwl_set_no_assoc(priv, vif);
552 }
553
554 if (changes & BSS_CHANGED_IBSS) {
555 ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif,
556 bss_conf->ibss_joined);
557 if (ret)
558 IWL_ERR(priv, "failed to %s IBSS station %pM\n",
559 bss_conf->ibss_joined ? "add" : "remove",
560 bss_conf->bssid);
561 }
562
563 mutex_unlock(&priv->mutex);
564
565 IWL_DEBUG_MAC80211(priv, "leave\n");
566}
567EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed);
568
569irqreturn_t iwl_isr_legacy(int irq, void *data)
570{
571 struct iwl_priv *priv = data;
572 u32 inta, inta_mask;
573 u32 inta_fh;
574 unsigned long flags;
575 if (!priv)
576 return IRQ_NONE;
577
578 spin_lock_irqsave(&priv->lock, flags);
579
580 /* Disable (but don't clear!) interrupts here to avoid
581 * back-to-back ISRs and sporadic interrupts from our NIC.
582 * If we have something to service, the tasklet will re-enable ints.
583 * If we *don't* have something, we'll re-enable before leaving here. */
584 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
585 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
586
587 /* Discover which interrupts are active/pending */
588 inta = iwl_read32(priv, CSR_INT);
589 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
590
591 /* Ignore interrupt if there's nothing in NIC to service.
592 * This may be due to IRQ shared with another device,
593 * or due to sporadic interrupts thrown from our NIC. */
594 if (!inta && !inta_fh) {
595 IWL_DEBUG_ISR(priv,
596 "Ignore interrupt, inta == 0, inta_fh == 0\n");
597 goto none;
598 }
599
600 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
601 /* Hardware disappeared. It might have already raised
602 * an interrupt */
603 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
604 goto unplugged;
605 }
606
607 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
608 inta, inta_mask, inta_fh);
609
610 inta &= ~CSR_INT_BIT_SCD;
611
612 /* iwl_irq_tasklet() will service interrupts and re-enable them */
613 if (likely(inta || inta_fh))
614 tasklet_schedule(&priv->irq_tasklet);
615
616unplugged:
617 spin_unlock_irqrestore(&priv->lock, flags);
618 return IRQ_HANDLED;
619
620none:
621 /* re-enable interrupts here since we don't have anything to service. */
622 /* only Re-enable if disabled by irq */
623 if (test_bit(STATUS_INT_ENABLED, &priv->status))
624 iwl_enable_interrupts(priv);
625 spin_unlock_irqrestore(&priv->lock, flags);
626 return IRQ_NONE;
627}
628EXPORT_SYMBOL(iwl_isr_legacy);
629
630/*
631 * iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
632 * function.
633 */
634void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
635 struct ieee80211_tx_info *info,
636 __le16 fc, __le32 *tx_flags)
637{
638 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
639 *tx_flags |= TX_CMD_FLG_RTS_MSK;
640 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
641 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
642
643 if (!ieee80211_is_mgmt(fc))
644 return;
645
646 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
647 case cpu_to_le16(IEEE80211_STYPE_AUTH):
648 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
649 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
650 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
651 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
652 *tx_flags |= TX_CMD_FLG_CTS_MSK;
653 break;
654 }
655 } else if (info->control.rates[0].flags &
656 IEEE80211_TX_RC_USE_CTS_PROTECT) {
657 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
658 *tx_flags |= TX_CMD_FLG_CTS_MSK;
659 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
660 }
661}
662EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 1eec18d909d8..576795e2c75b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -226,8 +226,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
226 else 226 else
227 cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA; 227 cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
228 228
229 if (priv->cfg->bt_params && 229 if (iwl_advanced_bt_coexist(priv)) {
230 priv->cfg->bt_params->advanced_bt_coexist) {
231 if (!priv->cfg->bt_params->bt_sco_disable) 230 if (!priv->cfg->bt_params->bt_sco_disable)
232 cmd->flags |= IWL_POWER_BT_SCO_ENA; 231 cmd->flags |= IWL_POWER_BT_SCO_ENA;
233 else 232 else
@@ -313,8 +312,7 @@ static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv,
313 else 312 else
314 cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA; 313 cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
315 314
316 if (priv->cfg->bt_params && 315 if (iwl_advanced_bt_coexist(priv)) {
317 priv->cfg->bt_params->advanced_bt_coexist) {
318 if (!priv->cfg->bt_params->bt_sco_disable) 316 if (!priv->cfg->bt_params->bt_sco_disable)
319 cmd->flags |= IWL_POWER_BT_SCO_ENA; 317 cmd->flags |= IWL_POWER_BT_SCO_ENA;
320 else 318 else
@@ -358,8 +356,7 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
358 356
359 if (priv->cfg->base_params->broken_powersave) 357 if (priv->cfg->base_params->broken_powersave)
360 iwl_power_sleep_cam_cmd(priv, cmd); 358 iwl_power_sleep_cam_cmd(priv, cmd);
361 else if (priv->cfg->base_params->supports_idle && 359 else if (priv->hw->conf.flags & IEEE80211_CONF_IDLE)
362 priv->hw->conf.flags & IEEE80211_CONF_IDLE)
363 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20); 360 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20);
364 else if (priv->cfg->ops->lib->tt_ops.lower_power_detection && 361 else if (priv->cfg->ops->lib->tt_ops.lower_power_detection &&
365 priv->cfg->ops->lib->tt_ops.tt_power_mode && 362 priv->cfg->ops->lib->tt_ops.tt_power_mode &&
@@ -428,7 +425,6 @@ int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
428 425
429 return ret; 426 return ret;
430} 427}
431EXPORT_SYMBOL(iwl_power_set_mode);
432 428
433int iwl_power_update_mode(struct iwl_priv *priv, bool force) 429int iwl_power_update_mode(struct iwl_priv *priv, bool force)
434{ 430{
@@ -437,7 +433,6 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
437 iwl_power_build_cmd(priv, &cmd); 433 iwl_power_build_cmd(priv, &cmd);
438 return iwl_power_set_mode(priv, &cmd, force); 434 return iwl_power_set_mode(priv, &cmd, force);
439} 435}
440EXPORT_SYMBOL(iwl_power_update_mode);
441 436
442/* initialize to default */ 437/* initialize to default */
443void iwl_power_initialize(struct iwl_priv *priv) 438void iwl_power_initialize(struct iwl_priv *priv)
@@ -451,4 +446,3 @@ void iwl_power_initialize(struct iwl_priv *priv)
451 memset(&priv->power_data.sleep_cmd, 0, 446 memset(&priv->power_data.sleep_cmd, 0,
452 sizeof(priv->power_data.sleep_cmd)); 447 sizeof(priv->power_data.sleep_cmd));
453} 448}
454EXPORT_SYMBOL(iwl_power_initialize);
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 87a6fd84d4d2..6f9a2fa04763 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -29,6 +29,7 @@
29 29
30#include <linux/etherdevice.h> 30#include <linux/etherdevice.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/sched.h>
32#include <net/mac80211.h> 33#include <net/mac80211.h>
33#include <asm/unaligned.h> 34#include <asm/unaligned.h>
34#include "iwl-eeprom.h" 35#include "iwl-eeprom.h"
@@ -37,7 +38,15 @@
37#include "iwl-sta.h" 38#include "iwl-sta.h"
38#include "iwl-io.h" 39#include "iwl-io.h"
39#include "iwl-helpers.h" 40#include "iwl-helpers.h"
40/************************** RX-FUNCTIONS ****************************/ 41#include "iwl-agn-calib.h"
42#include "iwl-agn.h"
43
44/******************************************************************************
45 *
46 * RX path functions
47 *
48 ******************************************************************************/
49
41/* 50/*
42 * Rx theory of operation 51 * Rx theory of operation
43 * 52 *
@@ -118,7 +127,6 @@ int iwl_rx_queue_space(const struct iwl_rx_queue *q)
118 s = 0; 127 s = 0;
119 return s; 128 return s;
120} 129}
121EXPORT_SYMBOL(iwl_rx_queue_space);
122 130
123/** 131/**
124 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue 132 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
@@ -170,7 +178,6 @@ void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q
170 exit_unlock: 178 exit_unlock:
171 spin_unlock_irqrestore(&q->lock, flags); 179 spin_unlock_irqrestore(&q->lock, flags);
172} 180}
173EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr);
174 181
175int iwl_rx_queue_alloc(struct iwl_priv *priv) 182int iwl_rx_queue_alloc(struct iwl_priv *priv)
176{ 183{
@@ -211,10 +218,105 @@ err_rb:
211err_bd: 218err_bd:
212 return -ENOMEM; 219 return -ENOMEM;
213} 220}
214EXPORT_SYMBOL(iwl_rx_queue_alloc); 221
222/******************************************************************************
223 *
224 * Generic RX handler implementations
225 *
226 ******************************************************************************/
227
228static void iwl_rx_reply_alive(struct iwl_priv *priv,
229 struct iwl_rx_mem_buffer *rxb)
230{
231 struct iwl_rx_packet *pkt = rxb_addr(rxb);
232 struct iwl_alive_resp *palive;
233 struct delayed_work *pwork;
234
235 palive = &pkt->u.alive_frame;
236
237 IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
238 "0x%01X 0x%01X\n",
239 palive->is_valid, palive->ver_type,
240 palive->ver_subtype);
241
242 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
243 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
244 memcpy(&priv->card_alive_init,
245 &pkt->u.alive_frame,
246 sizeof(struct iwl_init_alive_resp));
247 pwork = &priv->init_alive_start;
248 } else {
249 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
250 memcpy(&priv->card_alive, &pkt->u.alive_frame,
251 sizeof(struct iwl_alive_resp));
252 pwork = &priv->alive_start;
253 }
254
255 /* We delay the ALIVE response by 5ms to
256 * give the HW RF Kill time to activate... */
257 if (palive->is_valid == UCODE_VALID_OK)
258 queue_delayed_work(priv->workqueue, pwork,
259 msecs_to_jiffies(5));
260 else {
261 IWL_WARN(priv, "%s uCode did not respond OK.\n",
262 (palive->ver_subtype == INITIALIZE_SUBTYPE) ?
263 "init" : "runtime");
264 /*
265 * If fail to load init uCode,
266 * let's try to load the init uCode again.
267 * We should not get into this situation, but if it
268 * does happen, we should not move on and loading "runtime"
269 * without proper calibrate the device.
270 */
271 if (palive->ver_subtype == INITIALIZE_SUBTYPE)
272 priv->ucode_type = UCODE_NONE;
273 queue_work(priv->workqueue, &priv->restart);
274 }
275}
276
277static void iwl_rx_reply_error(struct iwl_priv *priv,
278 struct iwl_rx_mem_buffer *rxb)
279{
280 struct iwl_rx_packet *pkt = rxb_addr(rxb);
281
282 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
283 "seq 0x%04X ser 0x%08X\n",
284 le32_to_cpu(pkt->u.err_resp.error_type),
285 get_cmd_string(pkt->u.err_resp.cmd_id),
286 pkt->u.err_resp.cmd_id,
287 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
288 le32_to_cpu(pkt->u.err_resp.error_info));
289}
290
291static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
292{
293 struct iwl_rx_packet *pkt = rxb_addr(rxb);
294 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
295 /*
296 * MULTI-FIXME
297 * See iwl_mac_channel_switch.
298 */
299 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
300 struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
301
302 if (priv->switch_rxon.switch_in_progress) {
303 if (!le32_to_cpu(csa->status) &&
304 (csa->channel == priv->switch_rxon.channel)) {
305 rxon->channel = csa->channel;
306 ctx->staging.channel = csa->channel;
307 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
308 le16_to_cpu(csa->channel));
309 iwl_chswitch_done(priv, true);
310 } else {
311 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
312 le16_to_cpu(csa->channel));
313 iwl_chswitch_done(priv, false);
314 }
315 }
316}
215 317
216 318
217void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, 319static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
218 struct iwl_rx_mem_buffer *rxb) 320 struct iwl_rx_mem_buffer *rxb)
219{ 321{
220 struct iwl_rx_packet *pkt = rxb_addr(rxb); 322 struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -229,48 +331,494 @@ void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
229 memcpy(&priv->measure_report, report, sizeof(*report)); 331 memcpy(&priv->measure_report, report, sizeof(*report));
230 priv->measurement_status |= MEASUREMENT_READY; 332 priv->measurement_status |= MEASUREMENT_READY;
231} 333}
232EXPORT_SYMBOL(iwl_rx_spectrum_measure_notif);
233 334
234void iwl_recover_from_statistics(struct iwl_priv *priv, 335static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
235 struct iwl_rx_packet *pkt) 336 struct iwl_rx_mem_buffer *rxb)
337{
338#ifdef CONFIG_IWLWIFI_DEBUG
339 struct iwl_rx_packet *pkt = rxb_addr(rxb);
340 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
341 IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
342 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
343#endif
344}
345
346static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
347 struct iwl_rx_mem_buffer *rxb)
348{
349 struct iwl_rx_packet *pkt = rxb_addr(rxb);
350 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
351 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
352 "notification for %s:\n", len,
353 get_cmd_string(pkt->hdr.cmd));
354 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
355}
356
357static void iwl_rx_beacon_notif(struct iwl_priv *priv,
358 struct iwl_rx_mem_buffer *rxb)
359{
360 struct iwl_rx_packet *pkt = rxb_addr(rxb);
361 struct iwlagn_beacon_notif *beacon = (void *)pkt->u.raw;
362#ifdef CONFIG_IWLWIFI_DEBUG
363 u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status);
364 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
365
366 IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
367 "tsf:0x%.8x%.8x rate:%d\n",
368 status & TX_STATUS_MSK,
369 beacon->beacon_notify_hdr.failure_frame,
370 le32_to_cpu(beacon->ibss_mgr_status),
371 le32_to_cpu(beacon->high_tsf),
372 le32_to_cpu(beacon->low_tsf), rate);
373#endif
374
375 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
376
377 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
378 queue_work(priv->workqueue, &priv->beacon_update);
379}
380
381/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
382#define ACK_CNT_RATIO (50)
383#define BA_TIMEOUT_CNT (5)
384#define BA_TIMEOUT_MAX (16)
385
386/**
387 * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
388 *
389 * When the ACK count ratio is low and aggregated BA timeout retries exceeding
390 * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
391 * operation state.
392 */
393static bool iwl_good_ack_health(struct iwl_priv *priv, struct iwl_rx_packet *pkt)
394{
395 int actual_delta, expected_delta, ba_timeout_delta;
396 struct statistics_tx *cur, *old;
397
398 if (priv->_agn.agg_tids_count)
399 return true;
400
401 if (iwl_bt_statistics(priv)) {
402 cur = &pkt->u.stats_bt.tx;
403 old = &priv->_agn.statistics_bt.tx;
404 } else {
405 cur = &pkt->u.stats.tx;
406 old = &priv->_agn.statistics.tx;
407 }
408
409 actual_delta = le32_to_cpu(cur->actual_ack_cnt) -
410 le32_to_cpu(old->actual_ack_cnt);
411 expected_delta = le32_to_cpu(cur->expected_ack_cnt) -
412 le32_to_cpu(old->expected_ack_cnt);
413
414 /* Values should not be negative, but we do not trust the firmware */
415 if (actual_delta <= 0 || expected_delta <= 0)
416 return true;
417
418 ba_timeout_delta = le32_to_cpu(cur->agg.ba_timeout) -
419 le32_to_cpu(old->agg.ba_timeout);
420
421 if ((actual_delta * 100 / expected_delta) < ACK_CNT_RATIO &&
422 ba_timeout_delta > BA_TIMEOUT_CNT) {
423 IWL_DEBUG_RADIO(priv, "deltas: actual %d expected %d ba_timeout %d\n",
424 actual_delta, expected_delta, ba_timeout_delta);
425
426#ifdef CONFIG_IWLWIFI_DEBUGFS
427 /*
428 * This is ifdef'ed on DEBUGFS because otherwise the
429 * statistics aren't available. If DEBUGFS is set but
430 * DEBUG is not, these will just compile out.
431 */
432 IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta %d\n",
433 priv->_agn.delta_statistics.tx.rx_detected_cnt);
434 IWL_DEBUG_RADIO(priv,
435 "ack_or_ba_timeout_collision delta %d\n",
436 priv->_agn.delta_statistics.tx.ack_or_ba_timeout_collision);
437#endif
438
439 if (ba_timeout_delta >= BA_TIMEOUT_MAX)
440 return false;
441 }
442
443 return true;
444}
445
446/**
447 * iwl_good_plcp_health - checks for plcp error.
448 *
449 * When the plcp error is exceeding the thresholds, reset the radio
450 * to improve the throughput.
451 */
452static bool iwl_good_plcp_health(struct iwl_priv *priv,
453 struct iwl_rx_packet *pkt, unsigned int msecs)
236{ 454{
455 int delta;
456 int threshold = priv->cfg->base_params->plcp_delta_threshold;
457
458 if (threshold == IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
459 IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
460 return true;
461 }
462
463 if (iwl_bt_statistics(priv)) {
464 struct statistics_rx_bt *cur, *old;
465
466 cur = &pkt->u.stats_bt.rx;
467 old = &priv->_agn.statistics_bt.rx;
468
469 delta = le32_to_cpu(cur->ofdm.plcp_err) -
470 le32_to_cpu(old->ofdm.plcp_err) +
471 le32_to_cpu(cur->ofdm_ht.plcp_err) -
472 le32_to_cpu(old->ofdm_ht.plcp_err);
473 } else {
474 struct statistics_rx *cur, *old;
475
476 cur = &pkt->u.stats.rx;
477 old = &priv->_agn.statistics.rx;
478
479 delta = le32_to_cpu(cur->ofdm.plcp_err) -
480 le32_to_cpu(old->ofdm.plcp_err) +
481 le32_to_cpu(cur->ofdm_ht.plcp_err) -
482 le32_to_cpu(old->ofdm_ht.plcp_err);
483 }
484
485 /* Can be negative if firmware reseted statistics */
486 if (delta <= 0)
487 return true;
488
489 if ((delta * 100 / msecs) > threshold) {
490 IWL_DEBUG_RADIO(priv,
491 "plcp health threshold %u delta %d msecs %u\n",
492 threshold, delta, msecs);
493 return false;
494 }
495
496 return true;
497}
498
499static void iwl_recover_from_statistics(struct iwl_priv *priv,
500 struct iwl_rx_packet *pkt)
501{
502 const struct iwl_mod_params *mod_params = priv->cfg->mod_params;
503 unsigned int msecs;
504 unsigned long stamp;
505
237 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 506 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
238 return; 507 return;
239 if (iwl_is_any_associated(priv)) { 508
240 if (priv->cfg->ops->lib->check_ack_health) { 509 stamp = jiffies;
241 if (!priv->cfg->ops->lib->check_ack_health( 510 msecs = jiffies_to_msecs(stamp - priv->rx_statistics_jiffies);
242 priv, pkt)) { 511
243 /* 512 /* Only gather statistics and update time stamp when not associated */
244 * low ack count detected 513 if (!iwl_is_any_associated(priv))
245 * restart Firmware 514 goto out;
246 */ 515
247 IWL_ERR(priv, "low ack count detected, " 516 /* Do not check/recover when do not have enough statistics data */
248 "restart firmware\n"); 517 if (msecs < 99)
249 if (!iwl_force_reset(priv, IWL_FW_RESET, false)) 518 return;
250 return; 519
251 } 520 if (mod_params->ack_check && !iwl_good_ack_health(priv, pkt)) {
521 IWL_ERR(priv, "low ack count detected, restart firmware\n");
522 if (!iwl_force_reset(priv, IWL_FW_RESET, false))
523 return;
524 }
525
526 if (mod_params->plcp_check && !iwl_good_plcp_health(priv, pkt, msecs))
527 iwl_force_reset(priv, IWL_RF_RESET, false);
528
529out:
530 if (iwl_bt_statistics(priv))
531 memcpy(&priv->_agn.statistics_bt, &pkt->u.stats_bt,
532 sizeof(priv->_agn.statistics_bt));
533 else
534 memcpy(&priv->_agn.statistics, &pkt->u.stats,
535 sizeof(priv->_agn.statistics));
536
537 priv->rx_statistics_jiffies = stamp;
538}
539
540/* Calculate noise level, based on measurements during network silence just
541 * before arriving beacon. This measurement can be done only if we know
542 * exactly when to expect beacons, therefore only when we're associated. */
543static void iwl_rx_calc_noise(struct iwl_priv *priv)
544{
545 struct statistics_rx_non_phy *rx_info;
546 int num_active_rx = 0;
547 int total_silence = 0;
548 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
549 int last_rx_noise;
550
551 if (iwl_bt_statistics(priv))
552 rx_info = &(priv->_agn.statistics_bt.rx.general.common);
553 else
554 rx_info = &(priv->_agn.statistics.rx.general);
555 bcn_silence_a =
556 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
557 bcn_silence_b =
558 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
559 bcn_silence_c =
560 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
561
562 if (bcn_silence_a) {
563 total_silence += bcn_silence_a;
564 num_active_rx++;
565 }
566 if (bcn_silence_b) {
567 total_silence += bcn_silence_b;
568 num_active_rx++;
569 }
570 if (bcn_silence_c) {
571 total_silence += bcn_silence_c;
572 num_active_rx++;
573 }
574
575 /* Average among active antennas */
576 if (num_active_rx)
577 last_rx_noise = (total_silence / num_active_rx) - 107;
578 else
579 last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
580
581 IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
582 bcn_silence_a, bcn_silence_b, bcn_silence_c,
583 last_rx_noise);
584}
585
586/*
587 * based on the assumption of all statistics counter are in DWORD
588 * FIXME: This function is for debugging, do not deal with
589 * the case of counters roll-over.
590 */
591static void iwl_accumulative_statistics(struct iwl_priv *priv,
592 __le32 *stats)
593{
594#ifdef CONFIG_IWLWIFI_DEBUGFS
595 int i, size;
596 __le32 *prev_stats;
597 u32 *accum_stats;
598 u32 *delta, *max_delta;
599 struct statistics_general_common *general, *accum_general;
600 struct statistics_tx *tx, *accum_tx;
601
602 if (iwl_bt_statistics(priv)) {
603 prev_stats = (__le32 *)&priv->_agn.statistics_bt;
604 accum_stats = (u32 *)&priv->_agn.accum_statistics_bt;
605 size = sizeof(struct iwl_bt_notif_statistics);
606 general = &priv->_agn.statistics_bt.general.common;
607 accum_general = &priv->_agn.accum_statistics_bt.general.common;
608 tx = &priv->_agn.statistics_bt.tx;
609 accum_tx = &priv->_agn.accum_statistics_bt.tx;
610 delta = (u32 *)&priv->_agn.delta_statistics_bt;
611 max_delta = (u32 *)&priv->_agn.max_delta_bt;
612 } else {
613 prev_stats = (__le32 *)&priv->_agn.statistics;
614 accum_stats = (u32 *)&priv->_agn.accum_statistics;
615 size = sizeof(struct iwl_notif_statistics);
616 general = &priv->_agn.statistics.general.common;
617 accum_general = &priv->_agn.accum_statistics.general.common;
618 tx = &priv->_agn.statistics.tx;
619 accum_tx = &priv->_agn.accum_statistics.tx;
620 delta = (u32 *)&priv->_agn.delta_statistics;
621 max_delta = (u32 *)&priv->_agn.max_delta;
622 }
623 for (i = sizeof(__le32); i < size;
624 i += sizeof(__le32), stats++, prev_stats++, delta++,
625 max_delta++, accum_stats++) {
626 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
627 *delta = (le32_to_cpu(*stats) -
628 le32_to_cpu(*prev_stats));
629 *accum_stats += *delta;
630 if (*delta > *max_delta)
631 *max_delta = *delta;
252 } 632 }
253 if (priv->cfg->ops->lib->check_plcp_health) { 633 }
254 if (!priv->cfg->ops->lib->check_plcp_health( 634
255 priv, pkt)) { 635 /* reset accumulative statistics for "no-counter" type statistics */
256 /* 636 accum_general->temperature = general->temperature;
257 * high plcp error detected 637 accum_general->temperature_m = general->temperature_m;
258 * reset Radio 638 accum_general->ttl_timestamp = general->ttl_timestamp;
259 */ 639 accum_tx->tx_power.ant_a = tx->tx_power.ant_a;
260 iwl_force_reset(priv, IWL_RF_RESET, false); 640 accum_tx->tx_power.ant_b = tx->tx_power.ant_b;
261 } 641 accum_tx->tx_power.ant_c = tx->tx_power.ant_c;
642#endif
643}
644
645static void iwl_rx_statistics(struct iwl_priv *priv,
646 struct iwl_rx_mem_buffer *rxb)
647{
648 const int reg_recalib_period = 60;
649 int change;
650 struct iwl_rx_packet *pkt = rxb_addr(rxb);
651
652 if (iwl_bt_statistics(priv)) {
653 IWL_DEBUG_RX(priv,
654 "Statistics notification received (%d vs %d).\n",
655 (int)sizeof(struct iwl_bt_notif_statistics),
656 le32_to_cpu(pkt->len_n_flags) &
657 FH_RSCSR_FRAME_SIZE_MSK);
658
659 change = ((priv->_agn.statistics_bt.general.common.temperature !=
660 pkt->u.stats_bt.general.common.temperature) ||
661 ((priv->_agn.statistics_bt.flag &
662 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
663 (pkt->u.stats_bt.flag &
664 STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
665
666 iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats_bt);
667 } else {
668 IWL_DEBUG_RX(priv,
669 "Statistics notification received (%d vs %d).\n",
670 (int)sizeof(struct iwl_notif_statistics),
671 le32_to_cpu(pkt->len_n_flags) &
672 FH_RSCSR_FRAME_SIZE_MSK);
673
674 change = ((priv->_agn.statistics.general.common.temperature !=
675 pkt->u.stats.general.common.temperature) ||
676 ((priv->_agn.statistics.flag &
677 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
678 (pkt->u.stats.flag &
679 STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
680
681 iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
682 }
683
684 iwl_recover_from_statistics(priv, pkt);
685
686 set_bit(STATUS_STATISTICS, &priv->status);
687
688 /* Reschedule the statistics timer to occur in
689 * reg_recalib_period seconds to ensure we get a
690 * thermal update even if the uCode doesn't give
691 * us one */
692 mod_timer(&priv->statistics_periodic, jiffies +
693 msecs_to_jiffies(reg_recalib_period * 1000));
694
695 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
696 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
697 iwl_rx_calc_noise(priv);
698 queue_work(priv->workqueue, &priv->run_time_calib_work);
699 }
700 if (priv->cfg->ops->lib->temp_ops.temperature && change)
701 priv->cfg->ops->lib->temp_ops.temperature(priv);
702}
703
704static void iwl_rx_reply_statistics(struct iwl_priv *priv,
705 struct iwl_rx_mem_buffer *rxb)
706{
707 struct iwl_rx_packet *pkt = rxb_addr(rxb);
708
709 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
710#ifdef CONFIG_IWLWIFI_DEBUGFS
711 memset(&priv->_agn.accum_statistics, 0,
712 sizeof(struct iwl_notif_statistics));
713 memset(&priv->_agn.delta_statistics, 0,
714 sizeof(struct iwl_notif_statistics));
715 memset(&priv->_agn.max_delta, 0,
716 sizeof(struct iwl_notif_statistics));
717 memset(&priv->_agn.accum_statistics_bt, 0,
718 sizeof(struct iwl_bt_notif_statistics));
719 memset(&priv->_agn.delta_statistics_bt, 0,
720 sizeof(struct iwl_bt_notif_statistics));
721 memset(&priv->_agn.max_delta_bt, 0,
722 sizeof(struct iwl_bt_notif_statistics));
723#endif
724 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
725 }
726 iwl_rx_statistics(priv, rxb);
727}
728
729/* Handle notification from uCode that card's power state is changing
730 * due to software, hardware, or critical temperature RFKILL */
731static void iwl_rx_card_state_notif(struct iwl_priv *priv,
732 struct iwl_rx_mem_buffer *rxb)
733{
734 struct iwl_rx_packet *pkt = rxb_addr(rxb);
735 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
736 unsigned long status = priv->status;
737
738 IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
739 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
740 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
741 (flags & CT_CARD_DISABLED) ?
742 "Reached" : "Not reached");
743
744 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
745 CT_CARD_DISABLED)) {
746
747 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
748 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
749
750 iwl_write_direct32(priv, HBUS_TARG_MBX_C,
751 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
752
753 if (!(flags & RXON_CARD_DISABLED)) {
754 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
755 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
756 iwl_write_direct32(priv, HBUS_TARG_MBX_C,
757 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
262 } 758 }
759 if (flags & CT_CARD_DISABLED)
760 iwl_tt_enter_ct_kill(priv);
761 }
762 if (!(flags & CT_CARD_DISABLED))
763 iwl_tt_exit_ct_kill(priv);
764
765 if (flags & HW_CARD_DISABLED)
766 set_bit(STATUS_RF_KILL_HW, &priv->status);
767 else
768 clear_bit(STATUS_RF_KILL_HW, &priv->status);
769
770
771 if (!(flags & RXON_CARD_DISABLED))
772 iwl_scan_cancel(priv);
773
774 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
775 test_bit(STATUS_RF_KILL_HW, &priv->status)))
776 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
777 test_bit(STATUS_RF_KILL_HW, &priv->status));
778 else
779 wake_up_interruptible(&priv->wait_command_queue);
780}
781
782static void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
783 struct iwl_rx_mem_buffer *rxb)
784
785{
786 struct iwl_rx_packet *pkt = rxb_addr(rxb);
787 struct iwl_missed_beacon_notif *missed_beacon;
788
789 missed_beacon = &pkt->u.missed_beacon;
790 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
791 priv->missed_beacon_threshold) {
792 IWL_DEBUG_CALIB(priv,
793 "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
794 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
795 le32_to_cpu(missed_beacon->total_missed_becons),
796 le32_to_cpu(missed_beacon->num_recvd_beacons),
797 le32_to_cpu(missed_beacon->num_expected_beacons));
798 if (!test_bit(STATUS_SCANNING, &priv->status))
799 iwl_init_sensitivity(priv);
263 } 800 }
264} 801}
265EXPORT_SYMBOL(iwl_recover_from_statistics); 802
803/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
804 * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
805static void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
806 struct iwl_rx_mem_buffer *rxb)
807{
808 struct iwl_rx_packet *pkt = rxb_addr(rxb);
809
810 priv->_agn.last_phy_res_valid = true;
811 memcpy(&priv->_agn.last_phy_res, pkt->u.raw,
812 sizeof(struct iwl_rx_phy_res));
813}
266 814
267/* 815/*
268 * returns non-zero if packet should be dropped 816 * returns non-zero if packet should be dropped
269 */ 817 */
270int iwl_set_decrypted_flag(struct iwl_priv *priv, 818static int iwl_set_decrypted_flag(struct iwl_priv *priv,
271 struct ieee80211_hdr *hdr, 819 struct ieee80211_hdr *hdr,
272 u32 decrypt_res, 820 u32 decrypt_res,
273 struct ieee80211_rx_status *stats) 821 struct ieee80211_rx_status *stats)
274{ 822{
275 u16 fc = le16_to_cpu(hdr->frame_control); 823 u16 fc = le16_to_cpu(hdr->frame_control);
276 824
@@ -315,4 +863,264 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv,
315 } 863 }
316 return 0; 864 return 0;
317} 865}
318EXPORT_SYMBOL(iwl_set_decrypted_flag); 866
867static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
868 struct ieee80211_hdr *hdr,
869 u16 len,
870 u32 ampdu_status,
871 struct iwl_rx_mem_buffer *rxb,
872 struct ieee80211_rx_status *stats)
873{
874 struct sk_buff *skb;
875 __le16 fc = hdr->frame_control;
876
877 /* We only process data packets if the interface is open */
878 if (unlikely(!priv->is_open)) {
879 IWL_DEBUG_DROP_LIMIT(priv,
880 "Dropping packet while interface is not open.\n");
881 return;
882 }
883
884 /* In case of HW accelerated crypto and bad decryption, drop */
885 if (!priv->cfg->mod_params->sw_crypto &&
886 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
887 return;
888
889 skb = dev_alloc_skb(128);
890 if (!skb) {
891 IWL_ERR(priv, "dev_alloc_skb failed\n");
892 return;
893 }
894
895 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
896
897 iwl_update_stats(priv, false, fc, len);
898 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
899
900 ieee80211_rx(priv->hw, skb);
901 priv->alloc_rxb_page--;
902 rxb->page = NULL;
903}
904
905static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
906{
907 u32 decrypt_out = 0;
908
909 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
910 RX_RES_STATUS_STATION_FOUND)
911 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
912 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
913
914 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
915
916 /* packet was not encrypted */
917 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
918 RX_RES_STATUS_SEC_TYPE_NONE)
919 return decrypt_out;
920
921 /* packet was encrypted with unknown alg */
922 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
923 RX_RES_STATUS_SEC_TYPE_ERR)
924 return decrypt_out;
925
926 /* decryption was not done in HW */
927 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
928 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
929 return decrypt_out;
930
931 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
932
933 case RX_RES_STATUS_SEC_TYPE_CCMP:
934 /* alg is CCM: check MIC only */
935 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
936 /* Bad MIC */
937 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
938 else
939 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
940
941 break;
942
943 case RX_RES_STATUS_SEC_TYPE_TKIP:
944 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
945 /* Bad TTAK */
946 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
947 break;
948 }
949 /* fall through if TTAK OK */
950 default:
951 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
952 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
953 else
954 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
955 break;
956 }
957
958 IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
959 decrypt_in, decrypt_out);
960
961 return decrypt_out;
962}
963
964/* Called for REPLY_RX (legacy ABG frames), or
965 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
966static void iwl_rx_reply_rx(struct iwl_priv *priv,
967 struct iwl_rx_mem_buffer *rxb)
968{
969 struct ieee80211_hdr *header;
970 struct ieee80211_rx_status rx_status;
971 struct iwl_rx_packet *pkt = rxb_addr(rxb);
972 struct iwl_rx_phy_res *phy_res;
973 __le32 rx_pkt_status;
974 struct iwl_rx_mpdu_res_start *amsdu;
975 u32 len;
976 u32 ampdu_status;
977 u32 rate_n_flags;
978
979 /**
980 * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
981 * REPLY_RX: physical layer info is in this buffer
982 * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
983 * command and cached in priv->last_phy_res
984 *
985 * Here we set up local variables depending on which command is
986 * received.
987 */
988 if (pkt->hdr.cmd == REPLY_RX) {
989 phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
990 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
991 + phy_res->cfg_phy_cnt);
992
993 len = le16_to_cpu(phy_res->byte_count);
994 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
995 phy_res->cfg_phy_cnt + len);
996 ampdu_status = le32_to_cpu(rx_pkt_status);
997 } else {
998 if (!priv->_agn.last_phy_res_valid) {
999 IWL_ERR(priv, "MPDU frame without cached PHY data\n");
1000 return;
1001 }
1002 phy_res = &priv->_agn.last_phy_res;
1003 amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
1004 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
1005 len = le16_to_cpu(amsdu->byte_count);
1006 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
1007 ampdu_status = iwl_translate_rx_status(priv,
1008 le32_to_cpu(rx_pkt_status));
1009 }
1010
1011 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
1012 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
1013 phy_res->cfg_phy_cnt);
1014 return;
1015 }
1016
1017 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
1018 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
1019 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
1020 le32_to_cpu(rx_pkt_status));
1021 return;
1022 }
1023
1024 /* This will be used in several places later */
1025 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
1026
1027 /* rx_status carries information about the packet to mac80211 */
1028 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
1029 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
1030 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
1031 rx_status.freq =
1032 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
1033 rx_status.band);
1034 rx_status.rate_idx =
1035 iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
1036 rx_status.flag = 0;
1037
1038 /* TSF isn't reliable. In order to allow smooth user experience,
1039 * this W/A doesn't propagate it to the mac80211 */
1040 /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
1041
1042 priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
1043
1044 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
1045 rx_status.signal = priv->cfg->ops->utils->calc_rssi(priv, phy_res);
1046
1047 iwl_dbg_log_rx_data_frame(priv, len, header);
1048 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
1049 rx_status.signal, (unsigned long long)rx_status.mactime);
1050
1051 /*
1052 * "antenna number"
1053 *
1054 * It seems that the antenna field in the phy flags value
1055 * is actually a bit field. This is undefined by radiotap,
1056 * it wants an actual antenna number but I always get "7"
1057 * for most legacy frames I receive indicating that the
1058 * same frame was received on all three RX chains.
1059 *
1060 * I think this field should be removed in favor of a
1061 * new 802.11n radiotap field "RX chains" that is defined
1062 * as a bitmask.
1063 */
1064 rx_status.antenna =
1065 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
1066 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
1067
1068 /* set the preamble flag if appropriate */
1069 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
1070 rx_status.flag |= RX_FLAG_SHORTPRE;
1071
1072 /* Set up the HT phy flags */
1073 if (rate_n_flags & RATE_MCS_HT_MSK)
1074 rx_status.flag |= RX_FLAG_HT;
1075 if (rate_n_flags & RATE_MCS_HT40_MSK)
1076 rx_status.flag |= RX_FLAG_40MHZ;
1077 if (rate_n_flags & RATE_MCS_SGI_MSK)
1078 rx_status.flag |= RX_FLAG_SHORT_GI;
1079
1080 iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status,
1081 rxb, &rx_status);
1082}
1083
1084/**
1085 * iwl_setup_rx_handlers - Initialize Rx handler callbacks
1086 *
1087 * Setup the RX handlers for each of the reply types sent from the uCode
1088 * to the host.
1089 */
1090void iwl_setup_rx_handlers(struct iwl_priv *priv)
1091{
1092 void (**handlers)(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
1093
1094 handlers = priv->rx_handlers;
1095
1096 handlers[REPLY_ALIVE] = iwl_rx_reply_alive;
1097 handlers[REPLY_ERROR] = iwl_rx_reply_error;
1098 handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
1099 handlers[SPECTRUM_MEASURE_NOTIFICATION] = iwl_rx_spectrum_measure_notif;
1100 handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
1101 handlers[PM_DEBUG_STATISTIC_NOTIFIC] = iwl_rx_pm_debug_statistics_notif;
1102 handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif;
1103
1104 /*
1105 * The same handler is used for both the REPLY to a discrete
1106 * statistics request from the host as well as for the periodic
1107 * statistics notifications (after received beacons) from the uCode.
1108 */
1109 handlers[REPLY_STATISTICS_CMD] = iwl_rx_reply_statistics;
1110 handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics;
1111
1112 iwl_setup_rx_scan_handlers(priv);
1113
1114 handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif;
1115 handlers[MISSED_BEACONS_NOTIFICATION] = iwl_rx_missed_beacon_notif;
1116
1117 /* Rx handlers */
1118 handlers[REPLY_RX_PHY_CMD] = iwl_rx_reply_rx_phy;
1119 handlers[REPLY_RX_MPDU_CMD] = iwl_rx_reply_rx;
1120
1121 /* block ack */
1122 handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba;
1123
1124 /* Set up hardware specific Rx handlers */
1125 priv->cfg->ops->lib->rx_handler_setup(priv);
1126}
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 12d9363d0afe..3a4d9e6b0421 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -101,7 +101,7 @@ static void iwl_complete_scan(struct iwl_priv *priv, bool aborted)
101 ieee80211_scan_completed(priv->hw, aborted); 101 ieee80211_scan_completed(priv->hw, aborted);
102 } 102 }
103 103
104 priv->is_internal_short_scan = false; 104 priv->scan_type = IWL_SCAN_NORMAL;
105 priv->scan_vif = NULL; 105 priv->scan_vif = NULL;
106 priv->scan_request = NULL; 106 priv->scan_request = NULL;
107} 107}
@@ -155,7 +155,6 @@ int iwl_scan_cancel(struct iwl_priv *priv)
155 queue_work(priv->workqueue, &priv->abort_scan); 155 queue_work(priv->workqueue, &priv->abort_scan);
156 return 0; 156 return 0;
157} 157}
158EXPORT_SYMBOL(iwl_scan_cancel);
159 158
160/** 159/**
161 * iwl_scan_cancel_timeout - Cancel any currently executing HW scan 160 * iwl_scan_cancel_timeout - Cancel any currently executing HW scan
@@ -180,7 +179,6 @@ int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
180 179
181 return test_bit(STATUS_SCAN_HW, &priv->status); 180 return test_bit(STATUS_SCAN_HW, &priv->status);
182} 181}
183EXPORT_SYMBOL(iwl_scan_cancel_timeout);
184 182
185/* Service response to REPLY_SCAN_CMD (0x80) */ 183/* Service response to REPLY_SCAN_CMD (0x80) */
186static void iwl_rx_reply_scan(struct iwl_priv *priv, 184static void iwl_rx_reply_scan(struct iwl_priv *priv,
@@ -257,8 +255,7 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
257 queue_work(priv->workqueue, &priv->scan_completed); 255 queue_work(priv->workqueue, &priv->scan_completed);
258 256
259 if (priv->iw_mode != NL80211_IFTYPE_ADHOC && 257 if (priv->iw_mode != NL80211_IFTYPE_ADHOC &&
260 priv->cfg->bt_params && 258 iwl_advanced_bt_coexist(priv) &&
261 priv->cfg->bt_params->advanced_bt_coexist &&
262 priv->bt_status != scan_notif->bt_status) { 259 priv->bt_status != scan_notif->bt_status) {
263 if (scan_notif->bt_status) { 260 if (scan_notif->bt_status) {
264 /* BT on */ 261 /* BT on */
@@ -289,7 +286,6 @@ void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
289 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] = 286 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
290 iwl_rx_scan_complete_notif; 287 iwl_rx_scan_complete_notif;
291} 288}
292EXPORT_SYMBOL(iwl_setup_rx_scan_handlers);
293 289
294inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv, 290inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
295 enum ieee80211_band band, 291 enum ieee80211_band band,
@@ -302,7 +298,6 @@ inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
302 return IWL_ACTIVE_DWELL_TIME_24 + 298 return IWL_ACTIVE_DWELL_TIME_24 +
303 IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1); 299 IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
304} 300}
305EXPORT_SYMBOL(iwl_get_active_dwell_time);
306 301
307u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, 302u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
308 enum ieee80211_band band, 303 enum ieee80211_band band,
@@ -334,7 +329,6 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
334 329
335 return passive; 330 return passive;
336} 331}
337EXPORT_SYMBOL(iwl_get_passive_dwell_time);
338 332
339void iwl_init_scan_params(struct iwl_priv *priv) 333void iwl_init_scan_params(struct iwl_priv *priv)
340{ 334{
@@ -344,12 +338,11 @@ void iwl_init_scan_params(struct iwl_priv *priv)
344 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ]) 338 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
345 priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx; 339 priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
346} 340}
347EXPORT_SYMBOL(iwl_init_scan_params);
348 341
349static int __must_check iwl_scan_initiate(struct iwl_priv *priv, 342int __must_check iwl_scan_initiate(struct iwl_priv *priv,
350 struct ieee80211_vif *vif, 343 struct ieee80211_vif *vif,
351 bool internal, 344 enum iwl_scan_type scan_type,
352 enum ieee80211_band band) 345 enum ieee80211_band band)
353{ 346{
354 int ret; 347 int ret;
355 348
@@ -377,17 +370,19 @@ static int __must_check iwl_scan_initiate(struct iwl_priv *priv,
377 } 370 }
378 371
379 IWL_DEBUG_SCAN(priv, "Starting %sscan...\n", 372 IWL_DEBUG_SCAN(priv, "Starting %sscan...\n",
380 internal ? "internal short " : ""); 373 scan_type == IWL_SCAN_NORMAL ? "" :
374 scan_type == IWL_SCAN_OFFCH_TX ? "offchan TX " :
375 "internal short ");
381 376
382 set_bit(STATUS_SCANNING, &priv->status); 377 set_bit(STATUS_SCANNING, &priv->status);
383 priv->is_internal_short_scan = internal; 378 priv->scan_type = scan_type;
384 priv->scan_start = jiffies; 379 priv->scan_start = jiffies;
385 priv->scan_band = band; 380 priv->scan_band = band;
386 381
387 ret = priv->cfg->ops->utils->request_scan(priv, vif); 382 ret = priv->cfg->ops->utils->request_scan(priv, vif);
388 if (ret) { 383 if (ret) {
389 clear_bit(STATUS_SCANNING, &priv->status); 384 clear_bit(STATUS_SCANNING, &priv->status);
390 priv->is_internal_short_scan = false; 385 priv->scan_type = IWL_SCAN_NORMAL;
391 return ret; 386 return ret;
392 } 387 }
393 388
@@ -412,7 +407,7 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
412 mutex_lock(&priv->mutex); 407 mutex_lock(&priv->mutex);
413 408
414 if (test_bit(STATUS_SCANNING, &priv->status) && 409 if (test_bit(STATUS_SCANNING, &priv->status) &&
415 !priv->is_internal_short_scan) { 410 priv->scan_type != IWL_SCAN_NORMAL) {
416 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); 411 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
417 ret = -EAGAIN; 412 ret = -EAGAIN;
418 goto out_unlock; 413 goto out_unlock;
@@ -426,11 +421,11 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
426 * If an internal scan is in progress, just set 421 * If an internal scan is in progress, just set
427 * up the scan_request as per above. 422 * up the scan_request as per above.
428 */ 423 */
429 if (priv->is_internal_short_scan) { 424 if (priv->scan_type != IWL_SCAN_NORMAL) {
430 IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n"); 425 IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n");
431 ret = 0; 426 ret = 0;
432 } else 427 } else
433 ret = iwl_scan_initiate(priv, vif, false, 428 ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL,
434 req->channels[0]->band); 429 req->channels[0]->band);
435 430
436 IWL_DEBUG_MAC80211(priv, "leave\n"); 431 IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -440,7 +435,6 @@ out_unlock:
440 435
441 return ret; 436 return ret;
442} 437}
443EXPORT_SYMBOL(iwl_mac_hw_scan);
444 438
445/* 439/*
446 * internal short scan, this function should only been called while associated. 440 * internal short scan, this function should only been called while associated.
@@ -460,7 +454,7 @@ static void iwl_bg_start_internal_scan(struct work_struct *work)
460 454
461 mutex_lock(&priv->mutex); 455 mutex_lock(&priv->mutex);
462 456
463 if (priv->is_internal_short_scan == true) { 457 if (priv->scan_type == IWL_SCAN_RADIO_RESET) {
464 IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n"); 458 IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n");
465 goto unlock; 459 goto unlock;
466 } 460 }
@@ -470,7 +464,7 @@ static void iwl_bg_start_internal_scan(struct work_struct *work)
470 goto unlock; 464 goto unlock;
471 } 465 }
472 466
473 if (iwl_scan_initiate(priv, NULL, true, priv->band)) 467 if (iwl_scan_initiate(priv, NULL, IWL_SCAN_RADIO_RESET, priv->band))
474 IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n"); 468 IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n");
475 unlock: 469 unlock:
476 mutex_unlock(&priv->mutex); 470 mutex_unlock(&priv->mutex);
@@ -537,7 +531,6 @@ u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
537 531
538 return (u16)len; 532 return (u16)len;
539} 533}
540EXPORT_SYMBOL(iwl_fill_probe_req);
541 534
542static void iwl_bg_abort_scan(struct work_struct *work) 535static void iwl_bg_abort_scan(struct work_struct *work)
543{ 536{
@@ -558,8 +551,7 @@ static void iwl_bg_scan_completed(struct work_struct *work)
558 container_of(work, struct iwl_priv, scan_completed); 551 container_of(work, struct iwl_priv, scan_completed);
559 bool aborted; 552 bool aborted;
560 553
561 IWL_DEBUG_SCAN(priv, "Completed %sscan.\n", 554 IWL_DEBUG_SCAN(priv, "Completed scan.\n");
562 priv->is_internal_short_scan ? "internal short " : "");
563 555
564 cancel_delayed_work(&priv->scan_check); 556 cancel_delayed_work(&priv->scan_check);
565 557
@@ -574,7 +566,13 @@ static void iwl_bg_scan_completed(struct work_struct *work)
574 goto out_settings; 566 goto out_settings;
575 } 567 }
576 568
577 if (priv->is_internal_short_scan && !aborted) { 569 if (priv->scan_type == IWL_SCAN_OFFCH_TX && priv->_agn.offchan_tx_skb) {
570 ieee80211_tx_status_irqsafe(priv->hw,
571 priv->_agn.offchan_tx_skb);
572 priv->_agn.offchan_tx_skb = NULL;
573 }
574
575 if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) {
578 int err; 576 int err;
579 577
580 /* Check if mac80211 requested scan during our internal scan */ 578 /* Check if mac80211 requested scan during our internal scan */
@@ -582,7 +580,7 @@ static void iwl_bg_scan_completed(struct work_struct *work)
582 goto out_complete; 580 goto out_complete;
583 581
584 /* If so request a new scan */ 582 /* If so request a new scan */
585 err = iwl_scan_initiate(priv, priv->scan_vif, false, 583 err = iwl_scan_initiate(priv, priv->scan_vif, IWL_SCAN_NORMAL,
586 priv->scan_request->channels[0]->band); 584 priv->scan_request->channels[0]->band);
587 if (err) { 585 if (err) {
588 IWL_DEBUG_SCAN(priv, 586 IWL_DEBUG_SCAN(priv,
@@ -622,7 +620,6 @@ void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
622 INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan); 620 INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan);
623 INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check); 621 INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
624} 622}
625EXPORT_SYMBOL(iwl_setup_scan_deferred_work);
626 623
627void iwl_cancel_scan_deferred_work(struct iwl_priv *priv) 624void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
628{ 625{
@@ -636,4 +633,3 @@ void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
636 mutex_unlock(&priv->mutex); 633 mutex_unlock(&priv->mutex);
637 } 634 }
638} 635}
639EXPORT_SYMBOL(iwl_cancel_scan_deferred_work);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 49493d176515..bc90a12408a3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -169,7 +169,6 @@ int iwl_send_add_sta(struct iwl_priv *priv,
169 169
170 return ret; 170 return ret;
171} 171}
172EXPORT_SYMBOL(iwl_send_add_sta);
173 172
174static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index, 173static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
175 struct ieee80211_sta *sta, 174 struct ieee80211_sta *sta,
@@ -316,7 +315,6 @@ u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
316 return sta_id; 315 return sta_id;
317 316
318} 317}
319EXPORT_SYMBOL_GPL(iwl_prep_station);
320 318
321#define STA_WAIT_TIMEOUT (HZ/2) 319#define STA_WAIT_TIMEOUT (HZ/2)
322 320
@@ -379,7 +377,6 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
379 *sta_id_r = sta_id; 377 *sta_id_r = sta_id;
380 return ret; 378 return ret;
381} 379}
382EXPORT_SYMBOL(iwl_add_station_common);
383 380
384/** 381/**
385 * iwl_sta_ucode_deactivate - deactivate ucode status for a station 382 * iwl_sta_ucode_deactivate - deactivate ucode status for a station
@@ -513,7 +510,6 @@ out_err:
513 spin_unlock_irqrestore(&priv->sta_lock, flags); 510 spin_unlock_irqrestore(&priv->sta_lock, flags);
514 return -EINVAL; 511 return -EINVAL;
515} 512}
516EXPORT_SYMBOL_GPL(iwl_remove_station);
517 513
518/** 514/**
519 * iwl_clear_ucode_stations - clear ucode station table bits 515 * iwl_clear_ucode_stations - clear ucode station table bits
@@ -548,7 +544,6 @@ void iwl_clear_ucode_stations(struct iwl_priv *priv,
548 if (!cleared) 544 if (!cleared)
549 IWL_DEBUG_INFO(priv, "No active stations found to be cleared\n"); 545 IWL_DEBUG_INFO(priv, "No active stations found to be cleared\n");
550} 546}
551EXPORT_SYMBOL(iwl_clear_ucode_stations);
552 547
553/** 548/**
554 * iwl_restore_stations() - Restore driver known stations to device 549 * iwl_restore_stations() - Restore driver known stations to device
@@ -625,7 +620,6 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
625 else 620 else
626 IWL_DEBUG_INFO(priv, "Restoring all known stations .... complete.\n"); 621 IWL_DEBUG_INFO(priv, "Restoring all known stations .... complete.\n");
627} 622}
628EXPORT_SYMBOL(iwl_restore_stations);
629 623
630void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx) 624void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
631{ 625{
@@ -668,7 +662,6 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
668 priv->stations[sta_id].sta.sta.addr, ret); 662 priv->stations[sta_id].sta.sta.addr, ret);
669 iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true); 663 iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
670} 664}
671EXPORT_SYMBOL(iwl_reprogram_ap_sta);
672 665
673int iwl_get_free_ucode_key_index(struct iwl_priv *priv) 666int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
674{ 667{
@@ -680,7 +673,6 @@ int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
680 673
681 return WEP_INVALID_OFFSET; 674 return WEP_INVALID_OFFSET;
682} 675}
683EXPORT_SYMBOL(iwl_get_free_ucode_key_index);
684 676
685void iwl_dealloc_bcast_stations(struct iwl_priv *priv) 677void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
686{ 678{
@@ -700,7 +692,6 @@ void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
700 } 692 }
701 spin_unlock_irqrestore(&priv->sta_lock, flags); 693 spin_unlock_irqrestore(&priv->sta_lock, flags);
702} 694}
703EXPORT_SYMBOL_GPL(iwl_dealloc_bcast_stations);
704 695
705#ifdef CONFIG_IWLWIFI_DEBUG 696#ifdef CONFIG_IWLWIFI_DEBUG
706static void iwl_dump_lq_cmd(struct iwl_priv *priv, 697static void iwl_dump_lq_cmd(struct iwl_priv *priv,
@@ -810,7 +801,6 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
810 } 801 }
811 return ret; 802 return ret;
812} 803}
813EXPORT_SYMBOL(iwl_send_lq_cmd);
814 804
815int iwl_mac_sta_remove(struct ieee80211_hw *hw, 805int iwl_mac_sta_remove(struct ieee80211_hw *hw,
816 struct ieee80211_vif *vif, 806 struct ieee80211_vif *vif,
@@ -832,4 +822,3 @@ int iwl_mac_sta_remove(struct ieee80211_hw *hw,
832 mutex_unlock(&priv->mutex); 822 mutex_unlock(&priv->mutex);
833 return ret; 823 return ret;
834} 824}
835EXPORT_SYMBOL(iwl_mac_sta_remove);
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 073b6ce6141c..277c9175dcf6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -84,7 +84,23 @@ void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
84 } 84 }
85 txq->need_update = 0; 85 txq->need_update = 0;
86} 86}
87EXPORT_SYMBOL(iwl_txq_update_write_ptr); 87
88/**
89 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
90 */
91void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
92{
93 struct iwl_tx_queue *txq = &priv->txq[txq_id];
94 struct iwl_queue *q = &txq->q;
95
96 if (q->n_bd == 0)
97 return;
98
99 while (q->write_ptr != q->read_ptr) {
100 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
101 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
102 }
103}
88 104
89/** 105/**
90 * iwl_tx_queue_free - Deallocate DMA queue. 106 * iwl_tx_queue_free - Deallocate DMA queue.
@@ -97,17 +113,10 @@ EXPORT_SYMBOL(iwl_txq_update_write_ptr);
97void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id) 113void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
98{ 114{
99 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 115 struct iwl_tx_queue *txq = &priv->txq[txq_id];
100 struct iwl_queue *q = &txq->q;
101 struct device *dev = &priv->pci_dev->dev; 116 struct device *dev = &priv->pci_dev->dev;
102 int i; 117 int i;
103 118
104 if (q->n_bd == 0) 119 iwl_tx_queue_unmap(priv, txq_id);
105 return;
106
107 /* first, empty all BD's */
108 for (; q->write_ptr != q->read_ptr;
109 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
110 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
111 120
112 /* De-alloc array of command/tx buffers */ 121 /* De-alloc array of command/tx buffers */
113 for (i = 0; i < TFD_TX_CMD_SLOTS; i++) 122 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
@@ -131,42 +140,35 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
131 /* 0-fill queue descriptor structure */ 140 /* 0-fill queue descriptor structure */
132 memset(txq, 0, sizeof(*txq)); 141 memset(txq, 0, sizeof(*txq));
133} 142}
134EXPORT_SYMBOL(iwl_tx_queue_free);
135 143
136/** 144/**
137 * iwl_cmd_queue_free - Deallocate DMA queue. 145 * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
138 * @txq: Transmit queue to deallocate.
139 *
140 * Empty queue by removing and destroying all BD's.
141 * Free all buffers.
142 * 0-fill, but do not free "txq" descriptor structure.
143 */ 146 */
144void iwl_cmd_queue_free(struct iwl_priv *priv) 147void iwl_cmd_queue_unmap(struct iwl_priv *priv)
145{ 148{
146 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; 149 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
147 struct iwl_queue *q = &txq->q; 150 struct iwl_queue *q = &txq->q;
148 struct device *dev = &priv->pci_dev->dev;
149 int i; 151 int i;
150 bool huge = false; 152 bool huge = false;
151 153
152 if (q->n_bd == 0) 154 if (q->n_bd == 0)
153 return; 155 return;
154 156
155 for (; q->read_ptr != q->write_ptr; 157 while (q->read_ptr != q->write_ptr) {
156 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
157 /* we have no way to tell if it is a huge cmd ATM */ 158 /* we have no way to tell if it is a huge cmd ATM */
158 i = get_cmd_index(q, q->read_ptr, 0); 159 i = get_cmd_index(q, q->read_ptr, 0);
159 160
160 if (txq->meta[i].flags & CMD_SIZE_HUGE) { 161 if (txq->meta[i].flags & CMD_SIZE_HUGE)
161 huge = true; 162 huge = true;
162 continue; 163 else
163 } 164 pci_unmap_single(priv->pci_dev,
165 dma_unmap_addr(&txq->meta[i], mapping),
166 dma_unmap_len(&txq->meta[i], len),
167 PCI_DMA_BIDIRECTIONAL);
164 168
165 pci_unmap_single(priv->pci_dev, 169 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
166 dma_unmap_addr(&txq->meta[i], mapping),
167 dma_unmap_len(&txq->meta[i], len),
168 PCI_DMA_BIDIRECTIONAL);
169 } 170 }
171
170 if (huge) { 172 if (huge) {
171 i = q->n_window; 173 i = q->n_window;
172 pci_unmap_single(priv->pci_dev, 174 pci_unmap_single(priv->pci_dev,
@@ -174,6 +176,23 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
174 dma_unmap_len(&txq->meta[i], len), 176 dma_unmap_len(&txq->meta[i], len),
175 PCI_DMA_BIDIRECTIONAL); 177 PCI_DMA_BIDIRECTIONAL);
176 } 178 }
179}
180
181/**
182 * iwl_cmd_queue_free - Deallocate DMA queue.
183 * @txq: Transmit queue to deallocate.
184 *
185 * Empty queue by removing and destroying all BD's.
186 * Free all buffers.
187 * 0-fill, but do not free "txq" descriptor structure.
188 */
189void iwl_cmd_queue_free(struct iwl_priv *priv)
190{
191 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
192 struct device *dev = &priv->pci_dev->dev;
193 int i;
194
195 iwl_cmd_queue_unmap(priv);
177 196
178 /* De-alloc array of command/tx buffers */ 197 /* De-alloc array of command/tx buffers */
179 for (i = 0; i <= TFD_CMD_SLOTS; i++) 198 for (i = 0; i <= TFD_CMD_SLOTS; i++)
@@ -193,7 +212,6 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
193 /* 0-fill queue descriptor structure */ 212 /* 0-fill queue descriptor structure */
194 memset(txq, 0, sizeof(*txq)); 213 memset(txq, 0, sizeof(*txq));
195} 214}
196EXPORT_SYMBOL(iwl_cmd_queue_free);
197 215
198/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 216/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
199 * DMA services 217 * DMA services
@@ -233,7 +251,6 @@ int iwl_queue_space(const struct iwl_queue *q)
233 s = 0; 251 s = 0;
234 return s; 252 return s;
235} 253}
236EXPORT_SYMBOL(iwl_queue_space);
237 254
238 255
239/** 256/**
@@ -384,7 +401,6 @@ out_free_arrays:
384 401
385 return -ENOMEM; 402 return -ENOMEM;
386} 403}
387EXPORT_SYMBOL(iwl_tx_queue_init);
388 404
389void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, 405void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
390 int slots_num, u32 txq_id) 406 int slots_num, u32 txq_id)
@@ -404,7 +420,6 @@ void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
404 /* Tell device where to find queue */ 420 /* Tell device where to find queue */
405 priv->cfg->ops->lib->txq_init(priv, txq); 421 priv->cfg->ops->lib->txq_init(priv, txq);
406} 422}
407EXPORT_SYMBOL(iwl_tx_queue_reset);
408 423
409/*************** HOST COMMAND QUEUE FUNCTIONS *****/ 424/*************** HOST COMMAND QUEUE FUNCTIONS *****/
410 425
@@ -641,4 +656,3 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
641 } 656 }
642 meta->flags = 0; 657 meta->flags = 0;
643} 658}
644EXPORT_SYMBOL(iwl_tx_cmd_complete);
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
index 5a4982271e96..ed57e4402800 100644
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -287,7 +287,8 @@ int iwm_cfg80211_inform_bss(struct iwm_priv *iwm)
287 return -EINVAL; 287 return -EINVAL;
288 } 288 }
289 289
290 freq = ieee80211_channel_to_frequency(umac_bss->channel); 290 freq = ieee80211_channel_to_frequency(umac_bss->channel,
291 band->band);
291 channel = ieee80211_get_channel(wiphy, freq); 292 channel = ieee80211_get_channel(wiphy, freq);
292 signal = umac_bss->rssi * 100; 293 signal = umac_bss->rssi * 100;
293 294
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index a944893ae3ca..9a57cf6a488f 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -543,7 +543,10 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
543 switch (le32_to_cpu(complete->status)) { 543 switch (le32_to_cpu(complete->status)) {
544 case UMAC_ASSOC_COMPLETE_SUCCESS: 544 case UMAC_ASSOC_COMPLETE_SUCCESS:
545 chan = ieee80211_get_channel(wiphy, 545 chan = ieee80211_get_channel(wiphy,
546 ieee80211_channel_to_frequency(complete->channel)); 546 ieee80211_channel_to_frequency(complete->channel,
547 complete->band == UMAC_BAND_2GHZ ?
548 IEEE80211_BAND_2GHZ :
549 IEEE80211_BAND_5GHZ));
547 if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) { 550 if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) {
548 /* Associated to a unallowed channel, disassociate. */ 551 /* Associated to a unallowed channel, disassociate. */
549 __iwm_invalidate_mlme_profile(iwm); 552 __iwm_invalidate_mlme_profile(iwm);
@@ -841,7 +844,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
841 goto err; 844 goto err;
842 } 845 }
843 846
844 freq = ieee80211_channel_to_frequency(umac_bss->channel); 847 freq = ieee80211_channel_to_frequency(umac_bss->channel, band->band);
845 channel = ieee80211_get_channel(wiphy, freq); 848 channel = ieee80211_get_channel(wiphy, freq);
846 signal = umac_bss->rssi * 100; 849 signal = umac_bss->rssi * 100;
847 850
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 698a1f7694ed..30ef0351bfc4 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -607,7 +607,8 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
607 /* No channel, no luck */ 607 /* No channel, no luck */
608 if (chan_no != -1) { 608 if (chan_no != -1) {
609 struct wiphy *wiphy = priv->wdev->wiphy; 609 struct wiphy *wiphy = priv->wdev->wiphy;
610 int freq = ieee80211_channel_to_frequency(chan_no); 610 int freq = ieee80211_channel_to_frequency(chan_no,
611 IEEE80211_BAND_2GHZ);
611 struct ieee80211_channel *channel = 612 struct ieee80211_channel *channel =
612 ieee80211_get_channel(wiphy, freq); 613 ieee80211_get_channel(wiphy, freq);
613 614
@@ -1597,7 +1598,8 @@ static int lbs_get_survey(struct wiphy *wiphy, struct net_device *dev,
1597 lbs_deb_enter(LBS_DEB_CFG80211); 1598 lbs_deb_enter(LBS_DEB_CFG80211);
1598 1599
1599 survey->channel = ieee80211_get_channel(wiphy, 1600 survey->channel = ieee80211_get_channel(wiphy,
1600 ieee80211_channel_to_frequency(priv->channel)); 1601 ieee80211_channel_to_frequency(priv->channel,
1602 IEEE80211_BAND_2GHZ));
1601 1603
1602 ret = lbs_get_rssi(priv, &signal, &noise); 1604 ret = lbs_get_rssi(priv, &signal, &noise);
1603 if (ret == 0) { 1605 if (ret == 0) {
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 78c4da150a74..7e8a658b7670 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -145,9 +145,13 @@ int lbs_update_hw_spec(struct lbs_private *priv)
145 if (priv->current_addr[0] == 0xff) 145 if (priv->current_addr[0] == 0xff)
146 memmove(priv->current_addr, cmd.permanentaddr, ETH_ALEN); 146 memmove(priv->current_addr, cmd.permanentaddr, ETH_ALEN);
147 147
148 memcpy(priv->dev->dev_addr, priv->current_addr, ETH_ALEN); 148 if (!priv->copied_hwaddr) {
149 if (priv->mesh_dev) 149 memcpy(priv->dev->dev_addr, priv->current_addr, ETH_ALEN);
150 memcpy(priv->mesh_dev->dev_addr, priv->current_addr, ETH_ALEN); 150 if (priv->mesh_dev)
151 memcpy(priv->mesh_dev->dev_addr,
152 priv->current_addr, ETH_ALEN);
153 priv->copied_hwaddr = 1;
154 }
151 155
152out: 156out:
153 lbs_deb_leave(LBS_DEB_CMD); 157 lbs_deb_leave(LBS_DEB_CMD);
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 18dd9a02c459..bc461eb39660 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -90,6 +90,7 @@ struct lbs_private {
90 void *card; 90 void *card;
91 u8 fw_ready; 91 u8 fw_ready;
92 u8 surpriseremoved; 92 u8 surpriseremoved;
93 u8 setup_fw_on_resume;
93 int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb); 94 int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb);
94 void (*reset_card) (struct lbs_private *priv); 95 void (*reset_card) (struct lbs_private *priv);
95 int (*enter_deep_sleep) (struct lbs_private *priv); 96 int (*enter_deep_sleep) (struct lbs_private *priv);
@@ -101,6 +102,7 @@ struct lbs_private {
101 u32 fwcapinfo; 102 u32 fwcapinfo;
102 u16 regioncode; 103 u16 regioncode;
103 u8 current_addr[ETH_ALEN]; 104 u8 current_addr[ETH_ALEN];
105 u8 copied_hwaddr;
104 106
105 /* Command download */ 107 /* Command download */
106 u8 dnld_sent; 108 u8 dnld_sent;
diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h
index 5eac1351a021..6cb6935ee4a3 100644
--- a/drivers/net/wireless/libertas/host.h
+++ b/drivers/net/wireless/libertas/host.h
@@ -387,7 +387,7 @@ struct lbs_offset_value {
387struct mrvl_ie_domain_param_set { 387struct mrvl_ie_domain_param_set {
388 struct mrvl_ie_header header; 388 struct mrvl_ie_header header;
389 389
390 u8 country_code[3]; 390 u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
391 struct ieee80211_country_ie_triplet triplet[MAX_11D_TRIPLETS]; 391 struct ieee80211_country_ie_triplet triplet[MAX_11D_TRIPLETS];
392} __packed; 392} __packed;
393 393
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 00600239a053..f6c2cd665f49 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -20,10 +20,8 @@
20#include <linux/moduleparam.h> 20#include <linux/moduleparam.h>
21#include <linux/firmware.h> 21#include <linux/firmware.h>
22#include <linux/jiffies.h> 22#include <linux/jiffies.h>
23#include <linux/kthread.h>
24#include <linux/list.h> 23#include <linux/list.h>
25#include <linux/netdevice.h> 24#include <linux/netdevice.h>
26#include <linux/semaphore.h>
27#include <linux/slab.h> 25#include <linux/slab.h>
28#include <linux/spi/libertas_spi.h> 26#include <linux/spi/libertas_spi.h>
29#include <linux/spi/spi.h> 27#include <linux/spi/spi.h>
@@ -34,6 +32,12 @@
34#include "dev.h" 32#include "dev.h"
35#include "if_spi.h" 33#include "if_spi.h"
36 34
35struct if_spi_packet {
36 struct list_head list;
37 u16 blen;
38 u8 buffer[0] __attribute__((aligned(4)));
39};
40
37struct if_spi_card { 41struct if_spi_card {
38 struct spi_device *spi; 42 struct spi_device *spi;
39 struct lbs_private *priv; 43 struct lbs_private *priv;
@@ -51,18 +55,36 @@ struct if_spi_card {
51 unsigned long spu_reg_delay; 55 unsigned long spu_reg_delay;
52 56
53 /* Handles all SPI communication (except for FW load) */ 57 /* Handles all SPI communication (except for FW load) */
54 struct task_struct *spi_thread; 58 struct workqueue_struct *workqueue;
55 int run_thread; 59 struct work_struct packet_work;
56
57 /* Used to wake up the spi_thread */
58 struct semaphore spi_ready;
59 struct semaphore spi_thread_terminated;
60 60
61 u8 cmd_buffer[IF_SPI_CMD_BUF_SIZE]; 61 u8 cmd_buffer[IF_SPI_CMD_BUF_SIZE];
62
63 /* A buffer of incoming packets from libertas core.
64 * Since we can't sleep in hw_host_to_card, we have to buffer
65 * them. */
66 struct list_head cmd_packet_list;
67 struct list_head data_packet_list;
68
69 /* Protects cmd_packet_list and data_packet_list */
70 spinlock_t buffer_lock;
62}; 71};
63 72
64static void free_if_spi_card(struct if_spi_card *card) 73static void free_if_spi_card(struct if_spi_card *card)
65{ 74{
75 struct list_head *cursor, *next;
76 struct if_spi_packet *packet;
77
78 list_for_each_safe(cursor, next, &card->cmd_packet_list) {
79 packet = container_of(cursor, struct if_spi_packet, list);
80 list_del(&packet->list);
81 kfree(packet);
82 }
83 list_for_each_safe(cursor, next, &card->data_packet_list) {
84 packet = container_of(cursor, struct if_spi_packet, list);
85 list_del(&packet->list);
86 kfree(packet);
87 }
66 spi_set_drvdata(card->spi, NULL); 88 spi_set_drvdata(card->spi, NULL);
67 kfree(card); 89 kfree(card);
68} 90}
@@ -622,7 +644,7 @@ out:
622/* 644/*
623 * SPI Transfer Thread 645 * SPI Transfer Thread
624 * 646 *
625 * The SPI thread handles all SPI transfers, so there is no need for a lock. 647 * The SPI worker handles all SPI transfers, so there is no need for a lock.
626 */ 648 */
627 649
628/* Move a command from the card to the host */ 650/* Move a command from the card to the host */
@@ -742,6 +764,40 @@ out:
742 return err; 764 return err;
743} 765}
744 766
767/* Move data or a command from the host to the card. */
768static void if_spi_h2c(struct if_spi_card *card,
769 struct if_spi_packet *packet, int type)
770{
771 int err = 0;
772 u16 int_type, port_reg;
773
774 switch (type) {
775 case MVMS_DAT:
776 int_type = IF_SPI_CIC_TX_DOWNLOAD_OVER;
777 port_reg = IF_SPI_DATA_RDWRPORT_REG;
778 break;
779 case MVMS_CMD:
780 int_type = IF_SPI_CIC_CMD_DOWNLOAD_OVER;
781 port_reg = IF_SPI_CMD_RDWRPORT_REG;
782 break;
783 default:
784 lbs_pr_err("can't transfer buffer of type %d\n", type);
785 err = -EINVAL;
786 goto out;
787 }
788
789 /* Write the data to the card */
790 err = spu_write(card, port_reg, packet->buffer, packet->blen);
791 if (err)
792 goto out;
793
794out:
795 kfree(packet);
796
797 if (err)
798 lbs_pr_err("%s: error %d\n", __func__, err);
799}
800
745/* Inform the host about a card event */ 801/* Inform the host about a card event */
746static void if_spi_e2h(struct if_spi_card *card) 802static void if_spi_e2h(struct if_spi_card *card)
747{ 803{
@@ -766,71 +822,88 @@ out:
766 lbs_pr_err("%s: error %d\n", __func__, err); 822 lbs_pr_err("%s: error %d\n", __func__, err);
767} 823}
768 824
769static int lbs_spi_thread(void *data) 825static void if_spi_host_to_card_worker(struct work_struct *work)
770{ 826{
771 int err; 827 int err;
772 struct if_spi_card *card = data; 828 struct if_spi_card *card;
773 u16 hiStatus; 829 u16 hiStatus;
830 unsigned long flags;
831 struct if_spi_packet *packet;
774 832
775 while (1) { 833 card = container_of(work, struct if_spi_card, packet_work);
776 /* Wait to be woken up by one of two things. First, our ISR
777 * could tell us that something happened on the WLAN.
778 * Secondly, libertas could call hw_host_to_card with more
779 * data, which we might be able to send.
780 */
781 do {
782 err = down_interruptible(&card->spi_ready);
783 if (!card->run_thread) {
784 up(&card->spi_thread_terminated);
785 do_exit(0);
786 }
787 } while (err == -EINTR);
788 834
789 /* Read the host interrupt status register to see what we 835 lbs_deb_enter(LBS_DEB_SPI);
790 * can do. */ 836
791 err = spu_read_u16(card, IF_SPI_HOST_INT_STATUS_REG, 837 /* Read the host interrupt status register to see what we
792 &hiStatus); 838 * can do. */
793 if (err) { 839 err = spu_read_u16(card, IF_SPI_HOST_INT_STATUS_REG,
794 lbs_pr_err("I/O error\n"); 840 &hiStatus);
841 if (err) {
842 lbs_pr_err("I/O error\n");
843 goto err;
844 }
845
846 if (hiStatus & IF_SPI_HIST_CMD_UPLOAD_RDY) {
847 err = if_spi_c2h_cmd(card);
848 if (err)
795 goto err; 849 goto err;
796 } 850 }
851 if (hiStatus & IF_SPI_HIST_RX_UPLOAD_RDY) {
852 err = if_spi_c2h_data(card);
853 if (err)
854 goto err;
855 }
797 856
798 if (hiStatus & IF_SPI_HIST_CMD_UPLOAD_RDY) { 857 /* workaround: in PS mode, the card does not set the Command
799 err = if_spi_c2h_cmd(card); 858 * Download Ready bit, but it sets TX Download Ready. */
800 if (err) 859 if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY ||
801 goto err; 860 (card->priv->psstate != PS_STATE_FULL_POWER &&
802 } 861 (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY))) {
803 if (hiStatus & IF_SPI_HIST_RX_UPLOAD_RDY) { 862 /* This means two things. First of all,
804 err = if_spi_c2h_data(card); 863 * if there was a previous command sent, the card has
805 if (err) 864 * successfully received it.
806 goto err; 865 * Secondly, it is now ready to download another
866 * command.
867 */
868 lbs_host_to_card_done(card->priv);
869
870 /* Do we have any command packets from the host to
871 * send? */
872 packet = NULL;
873 spin_lock_irqsave(&card->buffer_lock, flags);
874 if (!list_empty(&card->cmd_packet_list)) {
875 packet = (struct if_spi_packet *)(card->
876 cmd_packet_list.next);
877 list_del(&packet->list);
807 } 878 }
879 spin_unlock_irqrestore(&card->buffer_lock, flags);
808 880
809 /* workaround: in PS mode, the card does not set the Command 881 if (packet)
810 * Download Ready bit, but it sets TX Download Ready. */ 882 if_spi_h2c(card, packet, MVMS_CMD);
811 if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY || 883 }
812 (card->priv->psstate != PS_STATE_FULL_POWER && 884 if (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY) {
813 (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY))) { 885 /* Do we have any data packets from the host to
814 lbs_host_to_card_done(card->priv); 886 * send? */
887 packet = NULL;
888 spin_lock_irqsave(&card->buffer_lock, flags);
889 if (!list_empty(&card->data_packet_list)) {
890 packet = (struct if_spi_packet *)(card->
891 data_packet_list.next);
892 list_del(&packet->list);
815 } 893 }
894 spin_unlock_irqrestore(&card->buffer_lock, flags);
816 895
817 if (hiStatus & IF_SPI_HIST_CARD_EVENT) 896 if (packet)
818 if_spi_e2h(card); 897 if_spi_h2c(card, packet, MVMS_DAT);
898 }
899 if (hiStatus & IF_SPI_HIST_CARD_EVENT)
900 if_spi_e2h(card);
819 901
820err: 902err:
821 if (err) 903 if (err)
822 lbs_pr_err("%s: got error %d\n", __func__, err); 904 lbs_pr_err("%s: got error %d\n", __func__, err);
823 }
824}
825 905
826/* Block until lbs_spi_thread thread has terminated */ 906 lbs_deb_leave(LBS_DEB_SPI);
827static void if_spi_terminate_spi_thread(struct if_spi_card *card)
828{
829 /* It would be nice to use kthread_stop here, but that function
830 * can't wake threads waiting for a semaphore. */
831 card->run_thread = 0;
832 up(&card->spi_ready);
833 down(&card->spi_thread_terminated);
834} 907}
835 908
836/* 909/*
@@ -842,18 +915,40 @@ static int if_spi_host_to_card(struct lbs_private *priv,
842 u8 type, u8 *buf, u16 nb) 915 u8 type, u8 *buf, u16 nb)
843{ 916{
844 int err = 0; 917 int err = 0;
918 unsigned long flags;
845 struct if_spi_card *card = priv->card; 919 struct if_spi_card *card = priv->card;
920 struct if_spi_packet *packet;
921 u16 blen;
846 922
847 lbs_deb_enter_args(LBS_DEB_SPI, "type %d, bytes %d", type, nb); 923 lbs_deb_enter_args(LBS_DEB_SPI, "type %d, bytes %d", type, nb);
848 924
849 nb = ALIGN(nb, 4); 925 if (nb == 0) {
926 lbs_pr_err("%s: invalid size requested: %d\n", __func__, nb);
927 err = -EINVAL;
928 goto out;
929 }
930 blen = ALIGN(nb, 4);
931 packet = kzalloc(sizeof(struct if_spi_packet) + blen, GFP_ATOMIC);
932 if (!packet) {
933 err = -ENOMEM;
934 goto out;
935 }
936 packet->blen = blen;
937 memcpy(packet->buffer, buf, nb);
938 memset(packet->buffer + nb, 0, blen - nb);
850 939
851 switch (type) { 940 switch (type) {
852 case MVMS_CMD: 941 case MVMS_CMD:
853 err = spu_write(card, IF_SPI_CMD_RDWRPORT_REG, buf, nb); 942 priv->dnld_sent = DNLD_CMD_SENT;
943 spin_lock_irqsave(&card->buffer_lock, flags);
944 list_add_tail(&packet->list, &card->cmd_packet_list);
945 spin_unlock_irqrestore(&card->buffer_lock, flags);
854 break; 946 break;
855 case MVMS_DAT: 947 case MVMS_DAT:
856 err = spu_write(card, IF_SPI_DATA_RDWRPORT_REG, buf, nb); 948 priv->dnld_sent = DNLD_DATA_SENT;
949 spin_lock_irqsave(&card->buffer_lock, flags);
950 list_add_tail(&packet->list, &card->data_packet_list);
951 spin_unlock_irqrestore(&card->buffer_lock, flags);
857 break; 952 break;
858 default: 953 default:
859 lbs_pr_err("can't transfer buffer of type %d", type); 954 lbs_pr_err("can't transfer buffer of type %d", type);
@@ -861,6 +956,9 @@ static int if_spi_host_to_card(struct lbs_private *priv,
861 break; 956 break;
862 } 957 }
863 958
959 /* Queue spi xfer work */
960 queue_work(card->workqueue, &card->packet_work);
961out:
864 lbs_deb_leave_args(LBS_DEB_SPI, "err=%d", err); 962 lbs_deb_leave_args(LBS_DEB_SPI, "err=%d", err);
865 return err; 963 return err;
866} 964}
@@ -869,13 +967,14 @@ static int if_spi_host_to_card(struct lbs_private *priv,
869 * Host Interrupts 967 * Host Interrupts
870 * 968 *
871 * Service incoming interrupts from the WLAN device. We can't sleep here, so 969 * Service incoming interrupts from the WLAN device. We can't sleep here, so
872 * don't try to talk on the SPI bus, just wake up the SPI thread. 970 * don't try to talk on the SPI bus, just queue the SPI xfer work.
873 */ 971 */
874static irqreturn_t if_spi_host_interrupt(int irq, void *dev_id) 972static irqreturn_t if_spi_host_interrupt(int irq, void *dev_id)
875{ 973{
876 struct if_spi_card *card = dev_id; 974 struct if_spi_card *card = dev_id;
877 975
878 up(&card->spi_ready); 976 queue_work(card->workqueue, &card->packet_work);
977
879 return IRQ_HANDLED; 978 return IRQ_HANDLED;
880} 979}
881 980
@@ -883,56 +982,26 @@ static irqreturn_t if_spi_host_interrupt(int irq, void *dev_id)
883 * SPI callbacks 982 * SPI callbacks
884 */ 983 */
885 984
886static int __devinit if_spi_probe(struct spi_device *spi) 985static int if_spi_init_card(struct if_spi_card *card)
887{ 986{
888 struct if_spi_card *card; 987 struct spi_device *spi = card->spi;
889 struct lbs_private *priv = NULL; 988 int err, i;
890 struct libertas_spi_platform_data *pdata = spi->dev.platform_data;
891 int err = 0, i;
892 u32 scratch; 989 u32 scratch;
893 struct sched_param param = { .sched_priority = 1 };
894 const struct firmware *helper = NULL; 990 const struct firmware *helper = NULL;
895 const struct firmware *mainfw = NULL; 991 const struct firmware *mainfw = NULL;
896 992
897 lbs_deb_enter(LBS_DEB_SPI); 993 lbs_deb_enter(LBS_DEB_SPI);
898 994
899 if (!pdata) { 995 err = spu_init(card, card->pdata->use_dummy_writes);
900 err = -EINVAL;
901 goto out;
902 }
903
904 if (pdata->setup) {
905 err = pdata->setup(spi);
906 if (err)
907 goto out;
908 }
909
910 /* Allocate card structure to represent this specific device */
911 card = kzalloc(sizeof(struct if_spi_card), GFP_KERNEL);
912 if (!card) {
913 err = -ENOMEM;
914 goto out;
915 }
916 spi_set_drvdata(spi, card);
917 card->pdata = pdata;
918 card->spi = spi;
919 card->prev_xfer_time = jiffies;
920
921 sema_init(&card->spi_ready, 0);
922 sema_init(&card->spi_thread_terminated, 0);
923
924 /* Initialize the SPI Interface Unit */
925 err = spu_init(card, pdata->use_dummy_writes);
926 if (err) 996 if (err)
927 goto free_card; 997 goto out;
928 err = spu_get_chip_revision(card, &card->card_id, &card->card_rev); 998 err = spu_get_chip_revision(card, &card->card_id, &card->card_rev);
929 if (err) 999 if (err)
930 goto free_card; 1000 goto out;
931 1001
932 /* Firmware load */
933 err = spu_read_u32(card, IF_SPI_SCRATCH_4_REG, &scratch); 1002 err = spu_read_u32(card, IF_SPI_SCRATCH_4_REG, &scratch);
934 if (err) 1003 if (err)
935 goto free_card; 1004 goto out;
936 if (scratch == SUCCESSFUL_FW_DOWNLOAD_MAGIC) 1005 if (scratch == SUCCESSFUL_FW_DOWNLOAD_MAGIC)
937 lbs_deb_spi("Firmware is already loaded for " 1006 lbs_deb_spi("Firmware is already loaded for "
938 "Marvell WLAN 802.11 adapter\n"); 1007 "Marvell WLAN 802.11 adapter\n");
@@ -946,7 +1015,7 @@ static int __devinit if_spi_probe(struct spi_device *spi)
946 lbs_pr_err("Unsupported chip_id: 0x%02x\n", 1015 lbs_pr_err("Unsupported chip_id: 0x%02x\n",
947 card->card_id); 1016 card->card_id);
948 err = -ENODEV; 1017 err = -ENODEV;
949 goto free_card; 1018 goto out;
950 } 1019 }
951 1020
952 err = lbs_get_firmware(&card->spi->dev, NULL, NULL, 1021 err = lbs_get_firmware(&card->spi->dev, NULL, NULL,
@@ -954,7 +1023,7 @@ static int __devinit if_spi_probe(struct spi_device *spi)
954 &mainfw); 1023 &mainfw);
955 if (err) { 1024 if (err) {
956 lbs_pr_err("failed to find firmware (%d)\n", err); 1025 lbs_pr_err("failed to find firmware (%d)\n", err);
957 goto free_card; 1026 goto out;
958 } 1027 }
959 1028
960 lbs_deb_spi("Initializing FW for Marvell WLAN 802.11 adapter " 1029 lbs_deb_spi("Initializing FW for Marvell WLAN 802.11 adapter "
@@ -966,15 +1035,68 @@ static int __devinit if_spi_probe(struct spi_device *spi)
966 spi->max_speed_hz); 1035 spi->max_speed_hz);
967 err = if_spi_prog_helper_firmware(card, helper); 1036 err = if_spi_prog_helper_firmware(card, helper);
968 if (err) 1037 if (err)
969 goto free_card; 1038 goto out;
970 err = if_spi_prog_main_firmware(card, mainfw); 1039 err = if_spi_prog_main_firmware(card, mainfw);
971 if (err) 1040 if (err)
972 goto free_card; 1041 goto out;
973 lbs_deb_spi("loaded FW for Marvell WLAN 802.11 adapter\n"); 1042 lbs_deb_spi("loaded FW for Marvell WLAN 802.11 adapter\n");
974 } 1043 }
975 1044
976 err = spu_set_interrupt_mode(card, 0, 1); 1045 err = spu_set_interrupt_mode(card, 0, 1);
977 if (err) 1046 if (err)
1047 goto out;
1048
1049out:
1050 if (helper)
1051 release_firmware(helper);
1052 if (mainfw)
1053 release_firmware(mainfw);
1054
1055 lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err);
1056
1057 return err;
1058}
1059
1060static int __devinit if_spi_probe(struct spi_device *spi)
1061{
1062 struct if_spi_card *card;
1063 struct lbs_private *priv = NULL;
1064 struct libertas_spi_platform_data *pdata = spi->dev.platform_data;
1065 int err = 0;
1066
1067 lbs_deb_enter(LBS_DEB_SPI);
1068
1069 if (!pdata) {
1070 err = -EINVAL;
1071 goto out;
1072 }
1073
1074 if (pdata->setup) {
1075 err = pdata->setup(spi);
1076 if (err)
1077 goto out;
1078 }
1079
1080 /* Allocate card structure to represent this specific device */
1081 card = kzalloc(sizeof(struct if_spi_card), GFP_KERNEL);
1082 if (!card) {
1083 err = -ENOMEM;
1084 goto teardown;
1085 }
1086 spi_set_drvdata(spi, card);
1087 card->pdata = pdata;
1088 card->spi = spi;
1089 card->prev_xfer_time = jiffies;
1090
1091 INIT_LIST_HEAD(&card->cmd_packet_list);
1092 INIT_LIST_HEAD(&card->data_packet_list);
1093 spin_lock_init(&card->buffer_lock);
1094
1095 /* Initialize the SPI Interface Unit */
1096
1097 /* Firmware load */
1098 err = if_spi_init_card(card);
1099 if (err)
978 goto free_card; 1100 goto free_card;
979 1101
980 /* Register our card with libertas. 1102 /* Register our card with libertas.
@@ -993,27 +1115,16 @@ static int __devinit if_spi_probe(struct spi_device *spi)
993 priv->fw_ready = 1; 1115 priv->fw_ready = 1;
994 1116
995 /* Initialize interrupt handling stuff. */ 1117 /* Initialize interrupt handling stuff. */
996 card->run_thread = 1; 1118 card->workqueue = create_workqueue("libertas_spi");
997 card->spi_thread = kthread_run(lbs_spi_thread, card, "lbs_spi_thread"); 1119 INIT_WORK(&card->packet_work, if_spi_host_to_card_worker);
998 if (IS_ERR(card->spi_thread)) {
999 card->run_thread = 0;
1000 err = PTR_ERR(card->spi_thread);
1001 lbs_pr_err("error creating SPI thread: err=%d\n", err);
1002 goto remove_card;
1003 }
1004 if (sched_setscheduler(card->spi_thread, SCHED_FIFO, &param))
1005 lbs_pr_err("Error setting scheduler, using default.\n");
1006 1120
1007 err = request_irq(spi->irq, if_spi_host_interrupt, 1121 err = request_irq(spi->irq, if_spi_host_interrupt,
1008 IRQF_TRIGGER_FALLING, "libertas_spi", card); 1122 IRQF_TRIGGER_FALLING, "libertas_spi", card);
1009 if (err) { 1123 if (err) {
1010 lbs_pr_err("can't get host irq line-- request_irq failed\n"); 1124 lbs_pr_err("can't get host irq line-- request_irq failed\n");
1011 goto terminate_thread; 1125 goto terminate_workqueue;
1012 } 1126 }
1013 1127
1014 /* poke the IRQ handler so that we don't miss the first interrupt */
1015 up(&card->spi_ready);
1016
1017 /* Start the card. 1128 /* Start the card.
1018 * This will call register_netdev, and we'll start 1129 * This will call register_netdev, and we'll start
1019 * getting interrupts... */ 1130 * getting interrupts... */
@@ -1028,18 +1139,16 @@ static int __devinit if_spi_probe(struct spi_device *spi)
1028 1139
1029release_irq: 1140release_irq:
1030 free_irq(spi->irq, card); 1141 free_irq(spi->irq, card);
1031terminate_thread: 1142terminate_workqueue:
1032 if_spi_terminate_spi_thread(card); 1143 flush_workqueue(card->workqueue);
1033remove_card: 1144 destroy_workqueue(card->workqueue);
1034 lbs_remove_card(priv); /* will call free_netdev */ 1145 lbs_remove_card(priv); /* will call free_netdev */
1035free_card: 1146free_card:
1036 free_if_spi_card(card); 1147 free_if_spi_card(card);
1148teardown:
1149 if (pdata->teardown)
1150 pdata->teardown(spi);
1037out: 1151out:
1038 if (helper)
1039 release_firmware(helper);
1040 if (mainfw)
1041 release_firmware(mainfw);
1042
1043 lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err); 1152 lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err);
1044 return err; 1153 return err;
1045} 1154}
@@ -1056,7 +1165,8 @@ static int __devexit libertas_spi_remove(struct spi_device *spi)
1056 lbs_remove_card(priv); /* will call free_netdev */ 1165 lbs_remove_card(priv); /* will call free_netdev */
1057 1166
1058 free_irq(spi->irq, card); 1167 free_irq(spi->irq, card);
1059 if_spi_terminate_spi_thread(card); 1168 flush_workqueue(card->workqueue);
1169 destroy_workqueue(card->workqueue);
1060 if (card->pdata->teardown) 1170 if (card->pdata->teardown)
1061 card->pdata->teardown(spi); 1171 card->pdata->teardown(spi);
1062 free_if_spi_card(card); 1172 free_if_spi_card(card);
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 6836a6dd9853..ca8149cd5bd9 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -539,6 +539,43 @@ static int lbs_thread(void *data)
539 return 0; 539 return 0;
540} 540}
541 541
542/**
543 * @brief This function gets the HW spec from the firmware and sets
544 * some basic parameters.
545 *
546 * @param priv A pointer to struct lbs_private structure
547 * @return 0 or -1
548 */
549static int lbs_setup_firmware(struct lbs_private *priv)
550{
551 int ret = -1;
552 s16 curlevel = 0, minlevel = 0, maxlevel = 0;
553
554 lbs_deb_enter(LBS_DEB_FW);
555
556 /* Read MAC address from firmware */
557 memset(priv->current_addr, 0xff, ETH_ALEN);
558 ret = lbs_update_hw_spec(priv);
559 if (ret)
560 goto done;
561
562 /* Read power levels if available */
563 ret = lbs_get_tx_power(priv, &curlevel, &minlevel, &maxlevel);
564 if (ret == 0) {
565 priv->txpower_cur = curlevel;
566 priv->txpower_min = minlevel;
567 priv->txpower_max = maxlevel;
568 }
569
570 /* Send cmd to FW to enable 11D function */
571 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_11D_ENABLE, 1);
572
573 lbs_set_mac_control(priv);
574done:
575 lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
576 return ret;
577}
578
542int lbs_suspend(struct lbs_private *priv) 579int lbs_suspend(struct lbs_private *priv)
543{ 580{
544 int ret; 581 int ret;
@@ -584,47 +621,13 @@ int lbs_resume(struct lbs_private *priv)
584 lbs_pr_err("deep sleep activation failed: %d\n", ret); 621 lbs_pr_err("deep sleep activation failed: %d\n", ret);
585 } 622 }
586 623
587 lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); 624 if (priv->setup_fw_on_resume)
588 return ret; 625 ret = lbs_setup_firmware(priv);
589}
590EXPORT_SYMBOL_GPL(lbs_resume);
591
592/**
593 * @brief This function gets the HW spec from the firmware and sets
594 * some basic parameters.
595 *
596 * @param priv A pointer to struct lbs_private structure
597 * @return 0 or -1
598 */
599static int lbs_setup_firmware(struct lbs_private *priv)
600{
601 int ret = -1;
602 s16 curlevel = 0, minlevel = 0, maxlevel = 0;
603
604 lbs_deb_enter(LBS_DEB_FW);
605
606 /* Read MAC address from firmware */
607 memset(priv->current_addr, 0xff, ETH_ALEN);
608 ret = lbs_update_hw_spec(priv);
609 if (ret)
610 goto done;
611
612 /* Read power levels if available */
613 ret = lbs_get_tx_power(priv, &curlevel, &minlevel, &maxlevel);
614 if (ret == 0) {
615 priv->txpower_cur = curlevel;
616 priv->txpower_min = minlevel;
617 priv->txpower_max = maxlevel;
618 }
619 626
620 /* Send cmd to FW to enable 11D function */
621 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_11D_ENABLE, 1);
622
623 lbs_set_mac_control(priv);
624done:
625 lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); 627 lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
626 return ret; 628 return ret;
627} 629}
630EXPORT_SYMBOL_GPL(lbs_resume);
628 631
629/** 632/**
630 * This function handles the timeout of command sending. 633 * This function handles the timeout of command sending.
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index acf3bf63ee33..9d097b9c8005 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -918,7 +918,6 @@ static ssize_t mesh_id_get(struct device *dev, struct device_attribute *attr,
918 char *buf) 918 char *buf)
919{ 919{
920 struct mrvl_mesh_defaults defs; 920 struct mrvl_mesh_defaults defs;
921 int maxlen;
922 int ret; 921 int ret;
923 922
924 ret = mesh_get_default_parameters(dev, &defs); 923 ret = mesh_get_default_parameters(dev, &defs);
@@ -931,13 +930,11 @@ static ssize_t mesh_id_get(struct device *dev, struct device_attribute *attr,
931 defs.meshie.val.mesh_id_len = IEEE80211_MAX_SSID_LEN; 930 defs.meshie.val.mesh_id_len = IEEE80211_MAX_SSID_LEN;
932 } 931 }
933 932
934 /* SSID not null terminated: reserve room for \0 + \n */ 933 memcpy(buf, defs.meshie.val.mesh_id, defs.meshie.val.mesh_id_len);
935 maxlen = defs.meshie.val.mesh_id_len + 2; 934 buf[defs.meshie.val.mesh_id_len] = '\n';
936 maxlen = (PAGE_SIZE > maxlen) ? maxlen : PAGE_SIZE; 935 buf[defs.meshie.val.mesh_id_len + 1] = '\0';
937 936
938 defs.meshie.val.mesh_id[defs.meshie.val.mesh_id_len] = '\0'; 937 return defs.meshie.val.mesh_id_len + 1;
939
940 return snprintf(buf, maxlen, "%s\n", defs.meshie.val.mesh_id);
941} 938}
942 939
943/** 940/**
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index 9278b3c8ee30..d4005081f1df 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -225,7 +225,7 @@ static void lbtf_free_adapter(struct lbtf_private *priv)
225 lbtf_deb_leave(LBTF_DEB_MAIN); 225 lbtf_deb_leave(LBTF_DEB_MAIN);
226} 226}
227 227
228static int lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 228static void lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
229{ 229{
230 struct lbtf_private *priv = hw->priv; 230 struct lbtf_private *priv = hw->priv;
231 231
@@ -236,7 +236,6 @@ static int lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
236 * there are no buffered multicast frames to send 236 * there are no buffered multicast frames to send
237 */ 237 */
238 ieee80211_stop_queues(priv->hw); 238 ieee80211_stop_queues(priv->hw);
239 return NETDEV_TX_OK;
240} 239}
241 240
242static void lbtf_tx_work(struct work_struct *work) 241static void lbtf_tx_work(struct work_struct *work)
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 454f045ddff3..56f439d58013 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -541,7 +541,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
541} 541}
542 542
543 543
544static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 544static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
545{ 545{
546 bool ack; 546 bool ack;
547 struct ieee80211_tx_info *txi; 547 struct ieee80211_tx_info *txi;
@@ -551,7 +551,7 @@ static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
551 if (skb->len < 10) { 551 if (skb->len < 10) {
552 /* Should not happen; just a sanity check for addr1 use */ 552 /* Should not happen; just a sanity check for addr1 use */
553 dev_kfree_skb(skb); 553 dev_kfree_skb(skb);
554 return NETDEV_TX_OK; 554 return;
555 } 555 }
556 556
557 ack = mac80211_hwsim_tx_frame(hw, skb); 557 ack = mac80211_hwsim_tx_frame(hw, skb);
@@ -571,7 +571,6 @@ static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
571 if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK) && ack) 571 if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK) && ack)
572 txi->flags |= IEEE80211_TX_STAT_ACK; 572 txi->flags |= IEEE80211_TX_STAT_ACK;
573 ieee80211_tx_status_irqsafe(hw, skb); 573 ieee80211_tx_status_irqsafe(hw, skb);
574 return NETDEV_TX_OK;
575} 574}
576 575
577 576
@@ -943,7 +942,8 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
943static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw, 942static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
944 struct ieee80211_vif *vif, 943 struct ieee80211_vif *vif,
945 enum ieee80211_ampdu_mlme_action action, 944 enum ieee80211_ampdu_mlme_action action,
946 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 945 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
946 u8 buf_size)
947{ 947{
948 switch (action) { 948 switch (action) {
949 case IEEE80211_AMPDU_TX_START: 949 case IEEE80211_AMPDU_TX_START:
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 9ecf8407cb1b..36952274950e 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -232,6 +232,9 @@ struct mwl8k_priv {
232 struct completion firmware_loading_complete; 232 struct completion firmware_loading_complete;
233}; 233};
234 234
235#define MAX_WEP_KEY_LEN 13
236#define NUM_WEP_KEYS 4
237
235/* Per interface specific private data */ 238/* Per interface specific private data */
236struct mwl8k_vif { 239struct mwl8k_vif {
237 struct list_head list; 240 struct list_head list;
@@ -242,8 +245,21 @@ struct mwl8k_vif {
242 245
243 /* Non AMPDU sequence number assigned by driver. */ 246 /* Non AMPDU sequence number assigned by driver. */
244 u16 seqno; 247 u16 seqno;
248
249 /* Saved WEP keys */
250 struct {
251 u8 enabled;
252 u8 key[sizeof(struct ieee80211_key_conf) + MAX_WEP_KEY_LEN];
253 } wep_key_conf[NUM_WEP_KEYS];
254
255 /* BSSID */
256 u8 bssid[ETH_ALEN];
257
258 /* A flag to indicate is HW crypto is enabled for this bssid */
259 bool is_hw_crypto_enabled;
245}; 260};
246#define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv)) 261#define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv))
262#define IEEE80211_KEY_CONF(_u8) ((struct ieee80211_key_conf *)(_u8))
247 263
248struct mwl8k_sta { 264struct mwl8k_sta {
249 /* Index into station database. Returned by UPDATE_STADB. */ 265 /* Index into station database. Returned by UPDATE_STADB. */
@@ -337,6 +353,7 @@ static const struct ieee80211_rate mwl8k_rates_50[] = {
337#define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203 353#define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203
338#define MWL8K_CMD_BSS_START 0x1100 /* per-vif */ 354#define MWL8K_CMD_BSS_START 0x1100 /* per-vif */
339#define MWL8K_CMD_SET_NEW_STN 0x1111 /* per-vif */ 355#define MWL8K_CMD_SET_NEW_STN 0x1111 /* per-vif */
356#define MWL8K_CMD_UPDATE_ENCRYPTION 0x1122 /* per-vif */
340#define MWL8K_CMD_UPDATE_STADB 0x1123 357#define MWL8K_CMD_UPDATE_STADB 0x1123
341 358
342static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize) 359static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize)
@@ -375,6 +392,7 @@ static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize)
375 MWL8K_CMDNAME(SET_RATEADAPT_MODE); 392 MWL8K_CMDNAME(SET_RATEADAPT_MODE);
376 MWL8K_CMDNAME(BSS_START); 393 MWL8K_CMDNAME(BSS_START);
377 MWL8K_CMDNAME(SET_NEW_STN); 394 MWL8K_CMDNAME(SET_NEW_STN);
395 MWL8K_CMDNAME(UPDATE_ENCRYPTION);
378 MWL8K_CMDNAME(UPDATE_STADB); 396 MWL8K_CMDNAME(UPDATE_STADB);
379 default: 397 default:
380 snprintf(buf, bufsize, "0x%x", cmd); 398 snprintf(buf, bufsize, "0x%x", cmd);
@@ -715,10 +733,12 @@ static inline void mwl8k_remove_dma_header(struct sk_buff *skb, __le16 qos)
715 skb_pull(skb, sizeof(*tr) - hdrlen); 733 skb_pull(skb, sizeof(*tr) - hdrlen);
716} 734}
717 735
718static inline void mwl8k_add_dma_header(struct sk_buff *skb) 736static void
737mwl8k_add_dma_header(struct sk_buff *skb, int tail_pad)
719{ 738{
720 struct ieee80211_hdr *wh; 739 struct ieee80211_hdr *wh;
721 int hdrlen; 740 int hdrlen;
741 int reqd_hdrlen;
722 struct mwl8k_dma_data *tr; 742 struct mwl8k_dma_data *tr;
723 743
724 /* 744 /*
@@ -730,11 +750,13 @@ static inline void mwl8k_add_dma_header(struct sk_buff *skb)
730 wh = (struct ieee80211_hdr *)skb->data; 750 wh = (struct ieee80211_hdr *)skb->data;
731 751
732 hdrlen = ieee80211_hdrlen(wh->frame_control); 752 hdrlen = ieee80211_hdrlen(wh->frame_control);
733 if (hdrlen != sizeof(*tr)) 753 reqd_hdrlen = sizeof(*tr);
734 skb_push(skb, sizeof(*tr) - hdrlen); 754
755 if (hdrlen != reqd_hdrlen)
756 skb_push(skb, reqd_hdrlen - hdrlen);
735 757
736 if (ieee80211_is_data_qos(wh->frame_control)) 758 if (ieee80211_is_data_qos(wh->frame_control))
737 hdrlen -= 2; 759 hdrlen -= IEEE80211_QOS_CTL_LEN;
738 760
739 tr = (struct mwl8k_dma_data *)skb->data; 761 tr = (struct mwl8k_dma_data *)skb->data;
740 if (wh != &tr->wh) 762 if (wh != &tr->wh)
@@ -747,9 +769,52 @@ static inline void mwl8k_add_dma_header(struct sk_buff *skb)
747 * payload". That is, everything except for the 802.11 header. 769 * payload". That is, everything except for the 802.11 header.
748 * This includes all crypto material including the MIC. 770 * This includes all crypto material including the MIC.
749 */ 771 */
750 tr->fwlen = cpu_to_le16(skb->len - sizeof(*tr)); 772 tr->fwlen = cpu_to_le16(skb->len - sizeof(*tr) + tail_pad);
751} 773}
752 774
775static void mwl8k_encapsulate_tx_frame(struct sk_buff *skb)
776{
777 struct ieee80211_hdr *wh;
778 struct ieee80211_tx_info *tx_info;
779 struct ieee80211_key_conf *key_conf;
780 int data_pad;
781
782 wh = (struct ieee80211_hdr *)skb->data;
783
784 tx_info = IEEE80211_SKB_CB(skb);
785
786 key_conf = NULL;
787 if (ieee80211_is_data(wh->frame_control))
788 key_conf = tx_info->control.hw_key;
789
790 /*
791 * Make sure the packet header is in the DMA header format (4-address
792 * without QoS), the necessary crypto padding between the header and the
793 * payload has already been provided by mac80211, but it doesn't add tail
794 * padding when HW crypto is enabled.
795 *
796 * We have the following trailer padding requirements:
797 * - WEP: 4 trailer bytes (ICV)
798 * - TKIP: 12 trailer bytes (8 MIC + 4 ICV)
799 * - CCMP: 8 trailer bytes (MIC)
800 */
801 data_pad = 0;
802 if (key_conf != NULL) {
803 switch (key_conf->cipher) {
804 case WLAN_CIPHER_SUITE_WEP40:
805 case WLAN_CIPHER_SUITE_WEP104:
806 data_pad = 4;
807 break;
808 case WLAN_CIPHER_SUITE_TKIP:
809 data_pad = 12;
810 break;
811 case WLAN_CIPHER_SUITE_CCMP:
812 data_pad = 8;
813 break;
814 }
815 }
816 mwl8k_add_dma_header(skb, data_pad);
817}
753 818
754/* 819/*
755 * Packet reception for 88w8366 AP firmware. 820 * Packet reception for 88w8366 AP firmware.
@@ -778,6 +843,13 @@ struct mwl8k_rxd_8366_ap {
778 843
779#define MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST 0x80 844#define MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST 0x80
780 845
846/* 8366 AP rx_status bits */
847#define MWL8K_8366_AP_RXSTAT_DECRYPT_ERR_MASK 0x80
848#define MWL8K_8366_AP_RXSTAT_GENERAL_DECRYPT_ERR 0xFF
849#define MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR 0x02
850#define MWL8K_8366_AP_RXSTAT_WEP_DECRYPT_ICV_ERR 0x04
851#define MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_ICV_ERR 0x08
852
781static void mwl8k_rxd_8366_ap_init(void *_rxd, dma_addr_t next_dma_addr) 853static void mwl8k_rxd_8366_ap_init(void *_rxd, dma_addr_t next_dma_addr)
782{ 854{
783 struct mwl8k_rxd_8366_ap *rxd = _rxd; 855 struct mwl8k_rxd_8366_ap *rxd = _rxd;
@@ -834,10 +906,16 @@ mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status,
834 } else { 906 } else {
835 status->band = IEEE80211_BAND_2GHZ; 907 status->band = IEEE80211_BAND_2GHZ;
836 } 908 }
837 status->freq = ieee80211_channel_to_frequency(rxd->channel); 909 status->freq = ieee80211_channel_to_frequency(rxd->channel,
910 status->band);
838 911
839 *qos = rxd->qos_control; 912 *qos = rxd->qos_control;
840 913
914 if ((rxd->rx_status != MWL8K_8366_AP_RXSTAT_GENERAL_DECRYPT_ERR) &&
915 (rxd->rx_status & MWL8K_8366_AP_RXSTAT_DECRYPT_ERR_MASK) &&
916 (rxd->rx_status & MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR))
917 status->flag |= RX_FLAG_MMIC_ERROR;
918
841 return le16_to_cpu(rxd->pkt_len); 919 return le16_to_cpu(rxd->pkt_len);
842} 920}
843 921
@@ -876,6 +954,11 @@ struct mwl8k_rxd_sta {
876#define MWL8K_STA_RATE_INFO_MCS_FORMAT 0x0001 954#define MWL8K_STA_RATE_INFO_MCS_FORMAT 0x0001
877 955
878#define MWL8K_STA_RX_CTRL_OWNED_BY_HOST 0x02 956#define MWL8K_STA_RX_CTRL_OWNED_BY_HOST 0x02
957#define MWL8K_STA_RX_CTRL_DECRYPT_ERROR 0x04
958/* ICV=0 or MIC=1 */
959#define MWL8K_STA_RX_CTRL_DEC_ERR_TYPE 0x08
960/* Key is uploaded only in failure case */
961#define MWL8K_STA_RX_CTRL_KEY_INDEX 0x30
879 962
880static void mwl8k_rxd_sta_init(void *_rxd, dma_addr_t next_dma_addr) 963static void mwl8k_rxd_sta_init(void *_rxd, dma_addr_t next_dma_addr)
881{ 964{
@@ -931,9 +1014,13 @@ mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
931 } else { 1014 } else {
932 status->band = IEEE80211_BAND_2GHZ; 1015 status->band = IEEE80211_BAND_2GHZ;
933 } 1016 }
934 status->freq = ieee80211_channel_to_frequency(rxd->channel); 1017 status->freq = ieee80211_channel_to_frequency(rxd->channel,
1018 status->band);
935 1019
936 *qos = rxd->qos_control; 1020 *qos = rxd->qos_control;
1021 if ((rxd->rx_ctrl & MWL8K_STA_RX_CTRL_DECRYPT_ERROR) &&
1022 (rxd->rx_ctrl & MWL8K_STA_RX_CTRL_DEC_ERR_TYPE))
1023 status->flag |= RX_FLAG_MMIC_ERROR;
937 1024
938 return le16_to_cpu(rxd->pkt_len); 1025 return le16_to_cpu(rxd->pkt_len);
939} 1026}
@@ -969,13 +1056,12 @@ static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index)
969 } 1056 }
970 memset(rxq->rxd, 0, size); 1057 memset(rxq->rxd, 0, size);
971 1058
972 rxq->buf = kmalloc(MWL8K_RX_DESCS * sizeof(*rxq->buf), GFP_KERNEL); 1059 rxq->buf = kcalloc(MWL8K_RX_DESCS, sizeof(*rxq->buf), GFP_KERNEL);
973 if (rxq->buf == NULL) { 1060 if (rxq->buf == NULL) {
974 wiphy_err(hw->wiphy, "failed to alloc RX skbuff list\n"); 1061 wiphy_err(hw->wiphy, "failed to alloc RX skbuff list\n");
975 pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma); 1062 pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma);
976 return -ENOMEM; 1063 return -ENOMEM;
977 } 1064 }
978 memset(rxq->buf, 0, MWL8K_RX_DESCS * sizeof(*rxq->buf));
979 1065
980 for (i = 0; i < MWL8K_RX_DESCS; i++) { 1066 for (i = 0; i < MWL8K_RX_DESCS; i++) {
981 int desc_size; 1067 int desc_size;
@@ -1092,9 +1178,25 @@ static inline void mwl8k_save_beacon(struct ieee80211_hw *hw,
1092 ieee80211_queue_work(hw, &priv->finalize_join_worker); 1178 ieee80211_queue_work(hw, &priv->finalize_join_worker);
1093} 1179}
1094 1180
1181static inline struct mwl8k_vif *mwl8k_find_vif_bss(struct list_head *vif_list,
1182 u8 *bssid)
1183{
1184 struct mwl8k_vif *mwl8k_vif;
1185
1186 list_for_each_entry(mwl8k_vif,
1187 vif_list, list) {
1188 if (memcmp(bssid, mwl8k_vif->bssid,
1189 ETH_ALEN) == 0)
1190 return mwl8k_vif;
1191 }
1192
1193 return NULL;
1194}
1195
1095static int rxq_process(struct ieee80211_hw *hw, int index, int limit) 1196static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
1096{ 1197{
1097 struct mwl8k_priv *priv = hw->priv; 1198 struct mwl8k_priv *priv = hw->priv;
1199 struct mwl8k_vif *mwl8k_vif = NULL;
1098 struct mwl8k_rx_queue *rxq = priv->rxq + index; 1200 struct mwl8k_rx_queue *rxq = priv->rxq + index;
1099 int processed; 1201 int processed;
1100 1202
@@ -1104,6 +1206,7 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
1104 void *rxd; 1206 void *rxd;
1105 int pkt_len; 1207 int pkt_len;
1106 struct ieee80211_rx_status status; 1208 struct ieee80211_rx_status status;
1209 struct ieee80211_hdr *wh;
1107 __le16 qos; 1210 __le16 qos;
1108 1211
1109 skb = rxq->buf[rxq->head].skb; 1212 skb = rxq->buf[rxq->head].skb;
@@ -1130,8 +1233,7 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
1130 1233
1131 rxq->rxd_count--; 1234 rxq->rxd_count--;
1132 1235
1133 skb_put(skb, pkt_len); 1236 wh = &((struct mwl8k_dma_data *)skb->data)->wh;
1134 mwl8k_remove_dma_header(skb, qos);
1135 1237
1136 /* 1238 /*
1137 * Check for a pending join operation. Save a 1239 * Check for a pending join operation. Save a
@@ -1141,6 +1243,46 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
1141 if (mwl8k_capture_bssid(priv, (void *)skb->data)) 1243 if (mwl8k_capture_bssid(priv, (void *)skb->data))
1142 mwl8k_save_beacon(hw, skb); 1244 mwl8k_save_beacon(hw, skb);
1143 1245
1246 if (ieee80211_has_protected(wh->frame_control)) {
1247
1248 /* Check if hw crypto has been enabled for
1249 * this bss. If yes, set the status flags
1250 * accordingly
1251 */
1252 mwl8k_vif = mwl8k_find_vif_bss(&priv->vif_list,
1253 wh->addr1);
1254
1255 if (mwl8k_vif != NULL &&
1256 mwl8k_vif->is_hw_crypto_enabled == true) {
1257 /*
1258 * When MMIC ERROR is encountered
1259 * by the firmware, payload is
1260 * dropped and only 32 bytes of
1261 * mwl8k Firmware header is sent
1262 * to the host.
1263 *
1264 * We need to add four bytes of
1265 * key information. In it
1266 * MAC80211 expects keyidx set to
1267 * 0 for triggering Counter
1268 * Measure of MMIC failure.
1269 */
1270 if (status.flag & RX_FLAG_MMIC_ERROR) {
1271 struct mwl8k_dma_data *tr;
1272 tr = (struct mwl8k_dma_data *)skb->data;
1273 memset((void *)&(tr->data), 0, 4);
1274 pkt_len += 4;
1275 }
1276
1277 if (!ieee80211_is_auth(wh->frame_control))
1278 status.flag |= RX_FLAG_IV_STRIPPED |
1279 RX_FLAG_DECRYPTED |
1280 RX_FLAG_MMIC_STRIPPED;
1281 }
1282 }
1283
1284 skb_put(skb, pkt_len);
1285 mwl8k_remove_dma_header(skb, qos);
1144 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); 1286 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
1145 ieee80211_rx_irqsafe(hw, skb); 1287 ieee80211_rx_irqsafe(hw, skb);
1146 1288
@@ -1204,13 +1346,12 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
1204 } 1346 }
1205 memset(txq->txd, 0, size); 1347 memset(txq->txd, 0, size);
1206 1348
1207 txq->skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->skb), GFP_KERNEL); 1349 txq->skb = kcalloc(MWL8K_TX_DESCS, sizeof(*txq->skb), GFP_KERNEL);
1208 if (txq->skb == NULL) { 1350 if (txq->skb == NULL) {
1209 wiphy_err(hw->wiphy, "failed to alloc TX skbuff list\n"); 1351 wiphy_err(hw->wiphy, "failed to alloc TX skbuff list\n");
1210 pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma); 1352 pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma);
1211 return -ENOMEM; 1353 return -ENOMEM;
1212 } 1354 }
1213 memset(txq->skb, 0, MWL8K_TX_DESCS * sizeof(*txq->skb));
1214 1355
1215 for (i = 0; i < MWL8K_TX_DESCS; i++) { 1356 for (i = 0; i < MWL8K_TX_DESCS; i++) {
1216 struct mwl8k_tx_desc *tx_desc; 1357 struct mwl8k_tx_desc *tx_desc;
@@ -1392,6 +1533,13 @@ mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force)
1392 1533
1393 info = IEEE80211_SKB_CB(skb); 1534 info = IEEE80211_SKB_CB(skb);
1394 ieee80211_tx_info_clear_status(info); 1535 ieee80211_tx_info_clear_status(info);
1536
1537 /* Rate control is happening in the firmware.
1538 * Ensure no tx rate is being reported.
1539 */
1540 info->status.rates[0].idx = -1;
1541 info->status.rates[0].count = 1;
1542
1395 if (MWL8K_TXD_SUCCESS(status)) 1543 if (MWL8K_TXD_SUCCESS(status))
1396 info->flags |= IEEE80211_TX_STAT_ACK; 1544 info->flags |= IEEE80211_TX_STAT_ACK;
1397 1545
@@ -1423,7 +1571,7 @@ static void mwl8k_txq_deinit(struct ieee80211_hw *hw, int index)
1423 txq->txd = NULL; 1571 txq->txd = NULL;
1424} 1572}
1425 1573
1426static int 1574static void
1427mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb) 1575mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1428{ 1576{
1429 struct mwl8k_priv *priv = hw->priv; 1577 struct mwl8k_priv *priv = hw->priv;
@@ -1443,7 +1591,11 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1443 else 1591 else
1444 qos = 0; 1592 qos = 0;
1445 1593
1446 mwl8k_add_dma_header(skb); 1594 if (priv->ap_fw)
1595 mwl8k_encapsulate_tx_frame(skb);
1596 else
1597 mwl8k_add_dma_header(skb, 0);
1598
1447 wh = &((struct mwl8k_dma_data *)skb->data)->wh; 1599 wh = &((struct mwl8k_dma_data *)skb->data)->wh;
1448 1600
1449 tx_info = IEEE80211_SKB_CB(skb); 1601 tx_info = IEEE80211_SKB_CB(skb);
@@ -1481,7 +1633,7 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1481 wiphy_debug(hw->wiphy, 1633 wiphy_debug(hw->wiphy,
1482 "failed to dma map skb, dropping TX frame.\n"); 1634 "failed to dma map skb, dropping TX frame.\n");
1483 dev_kfree_skb(skb); 1635 dev_kfree_skb(skb);
1484 return NETDEV_TX_OK; 1636 return;
1485 } 1637 }
1486 1638
1487 spin_lock_bh(&priv->tx_lock); 1639 spin_lock_bh(&priv->tx_lock);
@@ -1518,8 +1670,6 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1518 mwl8k_tx_start(priv); 1670 mwl8k_tx_start(priv);
1519 1671
1520 spin_unlock_bh(&priv->tx_lock); 1672 spin_unlock_bh(&priv->tx_lock);
1521
1522 return NETDEV_TX_OK;
1523} 1673}
1524 1674
1525 1675
@@ -1974,8 +2124,18 @@ static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
1974 cmd->ps_cookie = cpu_to_le32(priv->cookie_dma); 2124 cmd->ps_cookie = cpu_to_le32(priv->cookie_dma);
1975 cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma); 2125 cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma);
1976 cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES); 2126 cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES);
1977 for (i = 0; i < MWL8K_TX_QUEUES; i++) 2127
1978 cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma); 2128 /*
2129 * Mac80211 stack has Q0 as highest priority and Q3 as lowest in
2130 * that order. Firmware has Q3 as highest priority and Q0 as lowest
2131 * in that order. Map Q3 of mac80211 to Q0 of firmware so that the
2132 * priority is interpreted the right way in firmware.
2133 */
2134 for (i = 0; i < MWL8K_TX_QUEUES; i++) {
2135 int j = MWL8K_TX_QUEUES - 1 - i;
2136 cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[j].txd_dma);
2137 }
2138
1979 cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT | 2139 cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT |
1980 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP | 2140 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP |
1981 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON); 2141 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON);
@@ -3099,6 +3259,274 @@ static int mwl8k_cmd_set_new_stn_del(struct ieee80211_hw *hw,
3099} 3259}
3100 3260
3101/* 3261/*
3262 * CMD_UPDATE_ENCRYPTION.
3263 */
3264
3265#define MAX_ENCR_KEY_LENGTH 16
3266#define MIC_KEY_LENGTH 8
3267
3268struct mwl8k_cmd_update_encryption {
3269 struct mwl8k_cmd_pkt header;
3270
3271 __le32 action;
3272 __le32 reserved;
3273 __u8 mac_addr[6];
3274 __u8 encr_type;
3275
3276} __attribute__((packed));
3277
3278struct mwl8k_cmd_set_key {
3279 struct mwl8k_cmd_pkt header;
3280
3281 __le32 action;
3282 __le32 reserved;
3283 __le16 length;
3284 __le16 key_type_id;
3285 __le32 key_info;
3286 __le32 key_id;
3287 __le16 key_len;
3288 __u8 key_material[MAX_ENCR_KEY_LENGTH];
3289 __u8 tkip_tx_mic_key[MIC_KEY_LENGTH];
3290 __u8 tkip_rx_mic_key[MIC_KEY_LENGTH];
3291 __le16 tkip_rsc_low;
3292 __le32 tkip_rsc_high;
3293 __le16 tkip_tsc_low;
3294 __le32 tkip_tsc_high;
3295 __u8 mac_addr[6];
3296} __attribute__((packed));
3297
3298enum {
3299 MWL8K_ENCR_ENABLE,
3300 MWL8K_ENCR_SET_KEY,
3301 MWL8K_ENCR_REMOVE_KEY,
3302 MWL8K_ENCR_SET_GROUP_KEY,
3303};
3304
3305#define MWL8K_UPDATE_ENCRYPTION_TYPE_WEP 0
3306#define MWL8K_UPDATE_ENCRYPTION_TYPE_DISABLE 1
3307#define MWL8K_UPDATE_ENCRYPTION_TYPE_TKIP 4
3308#define MWL8K_UPDATE_ENCRYPTION_TYPE_MIXED 7
3309#define MWL8K_UPDATE_ENCRYPTION_TYPE_AES 8
3310
3311enum {
3312 MWL8K_ALG_WEP,
3313 MWL8K_ALG_TKIP,
3314 MWL8K_ALG_CCMP,
3315};
3316
3317#define MWL8K_KEY_FLAG_TXGROUPKEY 0x00000004
3318#define MWL8K_KEY_FLAG_PAIRWISE 0x00000008
3319#define MWL8K_KEY_FLAG_TSC_VALID 0x00000040
3320#define MWL8K_KEY_FLAG_WEP_TXKEY 0x01000000
3321#define MWL8K_KEY_FLAG_MICKEY_VALID 0x02000000
3322
3323static int mwl8k_cmd_update_encryption_enable(struct ieee80211_hw *hw,
3324 struct ieee80211_vif *vif,
3325 u8 *addr,
3326 u8 encr_type)
3327{
3328 struct mwl8k_cmd_update_encryption *cmd;
3329 int rc;
3330
3331 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
3332 if (cmd == NULL)
3333 return -ENOMEM;
3334
3335 cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_ENCRYPTION);
3336 cmd->header.length = cpu_to_le16(sizeof(*cmd));
3337 cmd->action = cpu_to_le32(MWL8K_ENCR_ENABLE);
3338 memcpy(cmd->mac_addr, addr, ETH_ALEN);
3339 cmd->encr_type = encr_type;
3340
3341 rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
3342 kfree(cmd);
3343
3344 return rc;
3345}
3346
3347static int mwl8k_encryption_set_cmd_info(struct mwl8k_cmd_set_key *cmd,
3348 u8 *addr,
3349 struct ieee80211_key_conf *key)
3350{
3351 cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_ENCRYPTION);
3352 cmd->header.length = cpu_to_le16(sizeof(*cmd));
3353 cmd->length = cpu_to_le16(sizeof(*cmd) -
3354 offsetof(struct mwl8k_cmd_set_key, length));
3355 cmd->key_id = cpu_to_le32(key->keyidx);
3356 cmd->key_len = cpu_to_le16(key->keylen);
3357 memcpy(cmd->mac_addr, addr, ETH_ALEN);
3358
3359 switch (key->cipher) {
3360 case WLAN_CIPHER_SUITE_WEP40:
3361 case WLAN_CIPHER_SUITE_WEP104:
3362 cmd->key_type_id = cpu_to_le16(MWL8K_ALG_WEP);
3363 if (key->keyidx == 0)
3364 cmd->key_info = cpu_to_le32(MWL8K_KEY_FLAG_WEP_TXKEY);
3365
3366 break;
3367 case WLAN_CIPHER_SUITE_TKIP:
3368 cmd->key_type_id = cpu_to_le16(MWL8K_ALG_TKIP);
3369 cmd->key_info = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
3370 ? cpu_to_le32(MWL8K_KEY_FLAG_PAIRWISE)
3371 : cpu_to_le32(MWL8K_KEY_FLAG_TXGROUPKEY);
3372 cmd->key_info |= cpu_to_le32(MWL8K_KEY_FLAG_MICKEY_VALID
3373 | MWL8K_KEY_FLAG_TSC_VALID);
3374 break;
3375 case WLAN_CIPHER_SUITE_CCMP:
3376 cmd->key_type_id = cpu_to_le16(MWL8K_ALG_CCMP);
3377 cmd->key_info = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
3378 ? cpu_to_le32(MWL8K_KEY_FLAG_PAIRWISE)
3379 : cpu_to_le32(MWL8K_KEY_FLAG_TXGROUPKEY);
3380 break;
3381 default:
3382 return -ENOTSUPP;
3383 }
3384
3385 return 0;
3386}
3387
3388static int mwl8k_cmd_encryption_set_key(struct ieee80211_hw *hw,
3389 struct ieee80211_vif *vif,
3390 u8 *addr,
3391 struct ieee80211_key_conf *key)
3392{
3393 struct mwl8k_cmd_set_key *cmd;
3394 int rc;
3395 int keymlen;
3396 u32 action;
3397 u8 idx;
3398 struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
3399
3400 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
3401 if (cmd == NULL)
3402 return -ENOMEM;
3403
3404 rc = mwl8k_encryption_set_cmd_info(cmd, addr, key);
3405 if (rc < 0)
3406 goto done;
3407
3408 idx = key->keyidx;
3409
3410 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
3411 action = MWL8K_ENCR_SET_KEY;
3412 else
3413 action = MWL8K_ENCR_SET_GROUP_KEY;
3414
3415 switch (key->cipher) {
3416 case WLAN_CIPHER_SUITE_WEP40:
3417 case WLAN_CIPHER_SUITE_WEP104:
3418 if (!mwl8k_vif->wep_key_conf[idx].enabled) {
3419 memcpy(mwl8k_vif->wep_key_conf[idx].key, key,
3420 sizeof(*key) + key->keylen);
3421 mwl8k_vif->wep_key_conf[idx].enabled = 1;
3422 }
3423
3424 keymlen = 0;
3425 action = MWL8K_ENCR_SET_KEY;
3426 break;
3427 case WLAN_CIPHER_SUITE_TKIP:
3428 keymlen = MAX_ENCR_KEY_LENGTH + 2 * MIC_KEY_LENGTH;
3429 break;
3430 case WLAN_CIPHER_SUITE_CCMP:
3431 keymlen = key->keylen;
3432 break;
3433 default:
3434 rc = -ENOTSUPP;
3435 goto done;
3436 }
3437
3438 memcpy(cmd->key_material, key->key, keymlen);
3439 cmd->action = cpu_to_le32(action);
3440
3441 rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
3442done:
3443 kfree(cmd);
3444
3445 return rc;
3446}
3447
3448static int mwl8k_cmd_encryption_remove_key(struct ieee80211_hw *hw,
3449 struct ieee80211_vif *vif,
3450 u8 *addr,
3451 struct ieee80211_key_conf *key)
3452{
3453 struct mwl8k_cmd_set_key *cmd;
3454 int rc;
3455 struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
3456
3457 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
3458 if (cmd == NULL)
3459 return -ENOMEM;
3460
3461 rc = mwl8k_encryption_set_cmd_info(cmd, addr, key);
3462 if (rc < 0)
3463 goto done;
3464
3465 if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3466 WLAN_CIPHER_SUITE_WEP104)
3467 mwl8k_vif->wep_key_conf[key->keyidx].enabled = 0;
3468
3469 cmd->action = cpu_to_le32(MWL8K_ENCR_REMOVE_KEY);
3470
3471 rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
3472done:
3473 kfree(cmd);
3474
3475 return rc;
3476}
3477
3478static int mwl8k_set_key(struct ieee80211_hw *hw,
3479 enum set_key_cmd cmd_param,
3480 struct ieee80211_vif *vif,
3481 struct ieee80211_sta *sta,
3482 struct ieee80211_key_conf *key)
3483{
3484 int rc = 0;
3485 u8 encr_type;
3486 u8 *addr;
3487 struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
3488
3489 if (vif->type == NL80211_IFTYPE_STATION)
3490 return -EOPNOTSUPP;
3491
3492 if (sta == NULL)
3493 addr = hw->wiphy->perm_addr;
3494 else
3495 addr = sta->addr;
3496
3497 if (cmd_param == SET_KEY) {
3498 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
3499 rc = mwl8k_cmd_encryption_set_key(hw, vif, addr, key);
3500 if (rc)
3501 goto out;
3502
3503 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40)
3504 || (key->cipher == WLAN_CIPHER_SUITE_WEP104))
3505 encr_type = MWL8K_UPDATE_ENCRYPTION_TYPE_WEP;
3506 else
3507 encr_type = MWL8K_UPDATE_ENCRYPTION_TYPE_MIXED;
3508
3509 rc = mwl8k_cmd_update_encryption_enable(hw, vif, addr,
3510 encr_type);
3511 if (rc)
3512 goto out;
3513
3514 mwl8k_vif->is_hw_crypto_enabled = true;
3515
3516 } else {
3517 rc = mwl8k_cmd_encryption_remove_key(hw, vif, addr, key);
3518
3519 if (rc)
3520 goto out;
3521
3522 mwl8k_vif->is_hw_crypto_enabled = false;
3523
3524 }
3525out:
3526 return rc;
3527}
3528
3529/*
3102 * CMD_UPDATE_STADB. 3530 * CMD_UPDATE_STADB.
3103 */ 3531 */
3104struct ewc_ht_info { 3532struct ewc_ht_info {
@@ -3310,22 +3738,19 @@ static void mwl8k_rx_poll(unsigned long data)
3310/* 3738/*
3311 * Core driver operations. 3739 * Core driver operations.
3312 */ 3740 */
3313static int mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 3741static void mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
3314{ 3742{
3315 struct mwl8k_priv *priv = hw->priv; 3743 struct mwl8k_priv *priv = hw->priv;
3316 int index = skb_get_queue_mapping(skb); 3744 int index = skb_get_queue_mapping(skb);
3317 int rc;
3318 3745
3319 if (!priv->radio_on) { 3746 if (!priv->radio_on) {
3320 wiphy_debug(hw->wiphy, 3747 wiphy_debug(hw->wiphy,
3321 "dropped TX frame since radio disabled\n"); 3748 "dropped TX frame since radio disabled\n");
3322 dev_kfree_skb(skb); 3749 dev_kfree_skb(skb);
3323 return NETDEV_TX_OK; 3750 return;
3324 } 3751 }
3325 3752
3326 rc = mwl8k_txq_xmit(hw, index, skb); 3753 mwl8k_txq_xmit(hw, index, skb);
3327
3328 return rc;
3329} 3754}
3330 3755
3331static int mwl8k_start(struct ieee80211_hw *hw) 3756static int mwl8k_start(struct ieee80211_hw *hw)
@@ -3469,6 +3894,8 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
3469 mwl8k_vif->vif = vif; 3894 mwl8k_vif->vif = vif;
3470 mwl8k_vif->macid = macid; 3895 mwl8k_vif->macid = macid;
3471 mwl8k_vif->seqno = 0; 3896 mwl8k_vif->seqno = 0;
3897 memcpy(mwl8k_vif->bssid, vif->addr, ETH_ALEN);
3898 mwl8k_vif->is_hw_crypto_enabled = false;
3472 3899
3473 /* Set the mac address. */ 3900 /* Set the mac address. */
3474 mwl8k_cmd_set_mac_addr(hw, vif, vif->addr); 3901 mwl8k_cmd_set_mac_addr(hw, vif, vif->addr);
@@ -3528,9 +3955,13 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
3528 if (rc) 3955 if (rc)
3529 goto out; 3956 goto out;
3530 3957
3531 rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x7); 3958 rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x3);
3532 if (!rc) 3959 if (rc)
3533 rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7); 3960 wiphy_warn(hw->wiphy, "failed to set # of RX antennas");
3961 rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7);
3962 if (rc)
3963 wiphy_warn(hw->wiphy, "failed to set # of TX antennas");
3964
3534 } else { 3965 } else {
3535 rc = mwl8k_cmd_rf_tx_power(hw, conf->power_level); 3966 rc = mwl8k_cmd_rf_tx_power(hw, conf->power_level);
3536 if (rc) 3967 if (rc)
@@ -3866,18 +4297,27 @@ static int mwl8k_sta_add(struct ieee80211_hw *hw,
3866{ 4297{
3867 struct mwl8k_priv *priv = hw->priv; 4298 struct mwl8k_priv *priv = hw->priv;
3868 int ret; 4299 int ret;
4300 int i;
4301 struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
4302 struct ieee80211_key_conf *key;
3869 4303
3870 if (!priv->ap_fw) { 4304 if (!priv->ap_fw) {
3871 ret = mwl8k_cmd_update_stadb_add(hw, vif, sta); 4305 ret = mwl8k_cmd_update_stadb_add(hw, vif, sta);
3872 if (ret >= 0) { 4306 if (ret >= 0) {
3873 MWL8K_STA(sta)->peer_id = ret; 4307 MWL8K_STA(sta)->peer_id = ret;
3874 return 0; 4308 ret = 0;
3875 } 4309 }
3876 4310
3877 return ret; 4311 } else {
4312 ret = mwl8k_cmd_set_new_stn_add(hw, vif, sta);
3878 } 4313 }
3879 4314
3880 return mwl8k_cmd_set_new_stn_add(hw, vif, sta); 4315 for (i = 0; i < NUM_WEP_KEYS; i++) {
4316 key = IEEE80211_KEY_CONF(mwl8k_vif->wep_key_conf[i].key);
4317 if (mwl8k_vif->wep_key_conf[i].enabled)
4318 mwl8k_set_key(hw, SET_KEY, vif, sta, key);
4319 }
4320 return ret;
3881} 4321}
3882 4322
3883static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue, 4323static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
@@ -3894,12 +4334,14 @@ static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
3894 if (!priv->wmm_enabled) 4334 if (!priv->wmm_enabled)
3895 rc = mwl8k_cmd_set_wmm_mode(hw, 1); 4335 rc = mwl8k_cmd_set_wmm_mode(hw, 1);
3896 4336
3897 if (!rc) 4337 if (!rc) {
3898 rc = mwl8k_cmd_set_edca_params(hw, queue, 4338 int q = MWL8K_TX_QUEUES - 1 - queue;
4339 rc = mwl8k_cmd_set_edca_params(hw, q,
3899 params->cw_min, 4340 params->cw_min,
3900 params->cw_max, 4341 params->cw_max,
3901 params->aifs, 4342 params->aifs,
3902 params->txop); 4343 params->txop);
4344 }
3903 4345
3904 mwl8k_fw_unlock(hw); 4346 mwl8k_fw_unlock(hw);
3905 } 4347 }
@@ -3932,7 +4374,8 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx,
3932static int 4374static int
3933mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 4375mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3934 enum ieee80211_ampdu_mlme_action action, 4376 enum ieee80211_ampdu_mlme_action action,
3935 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 4377 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4378 u8 buf_size)
3936{ 4379{
3937 switch (action) { 4380 switch (action) {
3938 case IEEE80211_AMPDU_RX_START: 4381 case IEEE80211_AMPDU_RX_START:
@@ -3955,6 +4398,7 @@ static const struct ieee80211_ops mwl8k_ops = {
3955 .bss_info_changed = mwl8k_bss_info_changed, 4398 .bss_info_changed = mwl8k_bss_info_changed,
3956 .prepare_multicast = mwl8k_prepare_multicast, 4399 .prepare_multicast = mwl8k_prepare_multicast,
3957 .configure_filter = mwl8k_configure_filter, 4400 .configure_filter = mwl8k_configure_filter,
4401 .set_key = mwl8k_set_key,
3958 .set_rts_threshold = mwl8k_set_rts_threshold, 4402 .set_rts_threshold = mwl8k_set_rts_threshold,
3959 .sta_add = mwl8k_sta_add, 4403 .sta_add = mwl8k_sta_add,
3960 .sta_remove = mwl8k_sta_remove, 4404 .sta_remove = mwl8k_sta_remove,
@@ -4332,7 +4776,7 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
4332 hw->queues = MWL8K_TX_QUEUES; 4776 hw->queues = MWL8K_TX_QUEUES;
4333 4777
4334 /* Set rssi values to dBm */ 4778 /* Set rssi values to dBm */
4335 hw->flags |= IEEE80211_HW_SIGNAL_DBM; 4779 hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_HAS_RATE_CONTROL;
4336 hw->vif_data_size = sizeof(struct mwl8k_vif); 4780 hw->vif_data_size = sizeof(struct mwl8k_vif);
4337 hw->sta_data_size = sizeof(struct mwl8k_sta); 4781 hw->sta_data_size = sizeof(struct mwl8k_sta);
4338 4782
diff --git a/drivers/net/wireless/orinoco/scan.c b/drivers/net/wireless/orinoco/scan.c
index 86cb54c842e7..e99ca1c1e0d8 100644
--- a/drivers/net/wireless/orinoco/scan.c
+++ b/drivers/net/wireless/orinoco/scan.c
@@ -111,6 +111,11 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv,
111 111
112 freq = ieee80211_dsss_chan_to_freq(le16_to_cpu(bss->a.channel)); 112 freq = ieee80211_dsss_chan_to_freq(le16_to_cpu(bss->a.channel));
113 channel = ieee80211_get_channel(wiphy, freq); 113 channel = ieee80211_get_channel(wiphy, freq);
114 if (!channel) {
115 printk(KERN_DEBUG "Invalid channel designation %04X(%04X)",
116 bss->a.channel, freq);
117 return; /* Then ignore it for now */
118 }
114 timestamp = 0; 119 timestamp = 0;
115 capability = le16_to_cpu(bss->a.capabilities); 120 capability = le16_to_cpu(bss->a.capabilities);
116 beacon_interval = le16_to_cpu(bss->a.beacon_interv); 121 beacon_interval = le16_to_cpu(bss->a.beacon_interv);
diff --git a/drivers/net/wireless/p54/Kconfig b/drivers/net/wireless/p54/Kconfig
index 25f965ffc889..0ec55b50798e 100644
--- a/drivers/net/wireless/p54/Kconfig
+++ b/drivers/net/wireless/p54/Kconfig
@@ -43,9 +43,8 @@ config P54_SPI
43 tristate "Prism54 SPI (stlc45xx) support" 43 tristate "Prism54 SPI (stlc45xx) support"
44 depends on P54_COMMON && SPI_MASTER && GENERIC_HARDIRQS 44 depends on P54_COMMON && SPI_MASTER && GENERIC_HARDIRQS
45 ---help--- 45 ---help---
46 This driver is for stlc4550 or stlc4560 based wireless chips. 46 This driver is for stlc4550 or stlc4560 based wireless chips
47 This driver is experimental, untested and will probably only work on 47 such as Nokia's N800/N810 Portable Internet Tablet.
48 Nokia's N800/N810 Portable Internet Tablet.
49 48
50 If you choose to build a module, it'll be called p54spi. 49 If you choose to build a module, it'll be called p54spi.
51 50
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index 35b09aa0529b..13d750da9301 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -55,6 +55,17 @@ static struct ieee80211_rate p54_arates[] = {
55 { .bitrate = 540, .hw_value = 11, }, 55 { .bitrate = 540, .hw_value = 11, },
56}; 56};
57 57
58static struct p54_rssi_db_entry p54_rssi_default = {
59 /*
60 * The defaults are taken from usb-logs of the
61 * vendor driver. So, they should be safe to
62 * use in case we can't get a match from the
63 * rssi <-> dBm conversion database.
64 */
65 .mul = 130,
66 .add = -398,
67};
68
58#define CHAN_HAS_CAL BIT(0) 69#define CHAN_HAS_CAL BIT(0)
59#define CHAN_HAS_LIMIT BIT(1) 70#define CHAN_HAS_LIMIT BIT(1)
60#define CHAN_HAS_CURVE BIT(2) 71#define CHAN_HAS_CURVE BIT(2)
@@ -87,13 +98,27 @@ static int p54_get_band_from_freq(u16 freq)
87 return -1; 98 return -1;
88} 99}
89 100
101static int same_band(u16 freq, u16 freq2)
102{
103 return p54_get_band_from_freq(freq) == p54_get_band_from_freq(freq2);
104}
105
90static int p54_compare_channels(const void *_a, 106static int p54_compare_channels(const void *_a,
91 const void *_b) 107 const void *_b)
92{ 108{
93 const struct p54_channel_entry *a = _a; 109 const struct p54_channel_entry *a = _a;
94 const struct p54_channel_entry *b = _b; 110 const struct p54_channel_entry *b = _b;
95 111
96 return a->index - b->index; 112 return a->freq - b->freq;
113}
114
115static int p54_compare_rssichan(const void *_a,
116 const void *_b)
117{
118 const struct p54_rssi_db_entry *a = _a;
119 const struct p54_rssi_db_entry *b = _b;
120
121 return a->freq - b->freq;
97} 122}
98 123
99static int p54_fill_band_bitrates(struct ieee80211_hw *dev, 124static int p54_fill_band_bitrates(struct ieee80211_hw *dev,
@@ -145,25 +170,26 @@ static int p54_generate_band(struct ieee80211_hw *dev,
145 170
146 for (i = 0, j = 0; (j < list->band_channel_num[band]) && 171 for (i = 0, j = 0; (j < list->band_channel_num[band]) &&
147 (i < list->entries); i++) { 172 (i < list->entries); i++) {
173 struct p54_channel_entry *chan = &list->channels[i];
148 174
149 if (list->channels[i].band != band) 175 if (chan->band != band)
150 continue; 176 continue;
151 177
152 if (list->channels[i].data != CHAN_HAS_ALL) { 178 if (chan->data != CHAN_HAS_ALL) {
153 wiphy_err(dev->wiphy, 179 wiphy_err(dev->wiphy, "%s%s%s is/are missing for "
154 "%s%s%s is/are missing for channel:%d [%d MHz].\n", 180 "channel:%d [%d MHz].\n",
155 (list->channels[i].data & CHAN_HAS_CAL ? "" : 181 (chan->data & CHAN_HAS_CAL ? "" :
156 " [iqauto calibration data]"), 182 " [iqauto calibration data]"),
157 (list->channels[i].data & CHAN_HAS_LIMIT ? "" : 183 (chan->data & CHAN_HAS_LIMIT ? "" :
158 " [output power limits]"), 184 " [output power limits]"),
159 (list->channels[i].data & CHAN_HAS_CURVE ? "" : 185 (chan->data & CHAN_HAS_CURVE ? "" :
160 " [curve data]"), 186 " [curve data]"),
161 list->channels[i].index, list->channels[i].freq); 187 chan->index, chan->freq);
162 continue; 188 continue;
163 } 189 }
164 190
165 tmp->channels[j].band = list->channels[i].band; 191 tmp->channels[j].band = chan->band;
166 tmp->channels[j].center_freq = list->channels[i].freq; 192 tmp->channels[j].center_freq = chan->freq;
167 j++; 193 j++;
168 } 194 }
169 195
@@ -291,7 +317,7 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
291 } 317 }
292 } 318 }
293 319
294 /* sort the list by the channel index */ 320 /* sort the channel list by frequency */
295 sort(list->channels, list->entries, sizeof(struct p54_channel_entry), 321 sort(list->channels, list->entries, sizeof(struct p54_channel_entry),
296 p54_compare_channels, NULL); 322 p54_compare_channels, NULL);
297 323
@@ -410,33 +436,121 @@ static int p54_convert_rev1(struct ieee80211_hw *dev,
410static const char *p54_rf_chips[] = { "INVALID-0", "Duette3", "Duette2", 436static const char *p54_rf_chips[] = { "INVALID-0", "Duette3", "Duette2",
411 "Frisbee", "Xbow", "Longbow", "INVALID-6", "INVALID-7" }; 437 "Frisbee", "Xbow", "Longbow", "INVALID-6", "INVALID-7" };
412 438
413static void p54_parse_rssical(struct ieee80211_hw *dev, void *data, int len, 439static int p54_parse_rssical(struct ieee80211_hw *dev,
414 u16 type) 440 u8 *data, int len, u16 type)
415{ 441{
416 struct p54_common *priv = dev->priv; 442 struct p54_common *priv = dev->priv;
417 int offset = (type == PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED) ? 2 : 0; 443 struct p54_rssi_db_entry *entry;
418 int entry_size = sizeof(struct pda_rssi_cal_entry) + offset; 444 size_t db_len, entries;
419 int num_entries = (type == PDR_RSSI_LINEAR_APPROXIMATION) ? 1 : 2; 445 int offset = 0, i;
420 int i; 446
447 if (type != PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED) {
448 entries = (type == PDR_RSSI_LINEAR_APPROXIMATION) ? 1 : 2;
449 if (len != sizeof(struct pda_rssi_cal_entry) * entries) {
450 wiphy_err(dev->wiphy, "rssical size mismatch.\n");
451 goto err_data;
452 }
453 } else {
454 /*
455 * Some devices (Dell 1450 USB, Xbow 5GHz card, etc...)
456 * have an empty two byte header.
457 */
458 if (*((__le16 *)&data[offset]) == cpu_to_le16(0))
459 offset += 2;
421 460
422 if (len != (entry_size * num_entries)) { 461 entries = (len - offset) /
423 wiphy_err(dev->wiphy, 462 sizeof(struct pda_rssi_cal_ext_entry);
424 "unknown rssi calibration data packing type:(%x) len:%d.\n",
425 type, len);
426 463
427 print_hex_dump_bytes("rssical:", DUMP_PREFIX_NONE, 464 if ((len - offset) % sizeof(struct pda_rssi_cal_ext_entry) ||
428 data, len); 465 entries <= 0) {
466 wiphy_err(dev->wiphy, "invalid rssi database.\n");
467 goto err_data;
468 }
469 }
429 470
430 wiphy_err(dev->wiphy, "please report this issue.\n"); 471 db_len = sizeof(*entry) * entries;
431 return; 472 priv->rssi_db = kzalloc(db_len + sizeof(*priv->rssi_db), GFP_KERNEL);
473 if (!priv->rssi_db)
474 return -ENOMEM;
475
476 priv->rssi_db->offset = 0;
477 priv->rssi_db->entries = entries;
478 priv->rssi_db->entry_size = sizeof(*entry);
479 priv->rssi_db->len = db_len;
480
481 entry = (void *)((unsigned long)priv->rssi_db->data + priv->rssi_db->offset);
482 if (type == PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED) {
483 struct pda_rssi_cal_ext_entry *cal = (void *) &data[offset];
484
485 for (i = 0; i < entries; i++) {
486 entry[i].freq = le16_to_cpu(cal[i].freq);
487 entry[i].mul = (s16) le16_to_cpu(cal[i].mul);
488 entry[i].add = (s16) le16_to_cpu(cal[i].add);
489 }
490 } else {
491 struct pda_rssi_cal_entry *cal = (void *) &data[offset];
492
493 for (i = 0; i < entries; i++) {
494 u16 freq;
495 switch (i) {
496 case IEEE80211_BAND_2GHZ:
497 freq = 2437;
498 break;
499 case IEEE80211_BAND_5GHZ:
500 freq = 5240;
501 break;
502 }
503
504 entry[i].freq = freq;
505 entry[i].mul = (s16) le16_to_cpu(cal[i].mul);
506 entry[i].add = (s16) le16_to_cpu(cal[i].add);
507 }
432 } 508 }
433 509
434 for (i = 0; i < num_entries; i++) { 510 /* sort the list by channel frequency */
435 struct pda_rssi_cal_entry *cal = data + 511 sort(entry, entries, sizeof(*entry), p54_compare_rssichan, NULL);
436 (offset + i * entry_size); 512 return 0;
437 priv->rssical_db[i].mul = (s16) le16_to_cpu(cal->mul); 513
438 priv->rssical_db[i].add = (s16) le16_to_cpu(cal->add); 514err_data:
515 wiphy_err(dev->wiphy,
516 "rssi calibration data packing type:(%x) len:%d.\n",
517 type, len);
518
519 print_hex_dump_bytes("rssical:", DUMP_PREFIX_NONE, data, len);
520
521 wiphy_err(dev->wiphy, "please report this issue.\n");
522 return -EINVAL;
523}
524
525struct p54_rssi_db_entry *p54_rssi_find(struct p54_common *priv, const u16 freq)
526{
527 struct p54_rssi_db_entry *entry;
528 int i, found = -1;
529
530 if (!priv->rssi_db)
531 return &p54_rssi_default;
532
533 entry = (void *)(priv->rssi_db->data + priv->rssi_db->offset);
534 for (i = 0; i < priv->rssi_db->entries; i++) {
535 if (!same_band(freq, entry[i].freq))
536 continue;
537
538 if (found == -1) {
539 found = i;
540 continue;
541 }
542
543 /* nearest match */
544 if (abs(freq - entry[i].freq) <
545 abs(freq - entry[found].freq)) {
546 found = i;
547 continue;
548 } else {
549 break;
550 }
439 } 551 }
552
553 return found < 0 ? &p54_rssi_default : &entry[found];
440} 554}
441 555
442static void p54_parse_default_country(struct ieee80211_hw *dev, 556static void p54_parse_default_country(struct ieee80211_hw *dev,
@@ -627,21 +741,30 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
627 case PDR_RSSI_LINEAR_APPROXIMATION: 741 case PDR_RSSI_LINEAR_APPROXIMATION:
628 case PDR_RSSI_LINEAR_APPROXIMATION_DUAL_BAND: 742 case PDR_RSSI_LINEAR_APPROXIMATION_DUAL_BAND:
629 case PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED: 743 case PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED:
630 p54_parse_rssical(dev, entry->data, data_len, 744 err = p54_parse_rssical(dev, entry->data, data_len,
631 le16_to_cpu(entry->code)); 745 le16_to_cpu(entry->code));
746 if (err)
747 goto err;
632 break; 748 break;
633 case PDR_RSSI_LINEAR_APPROXIMATION_CUSTOM: { 749 case PDR_RSSI_LINEAR_APPROXIMATION_CUSTOMV2: {
634 __le16 *src = (void *) entry->data; 750 struct pda_custom_wrapper *pda = (void *) entry->data;
635 s16 *dst = (void *) &priv->rssical_db; 751 __le16 *src;
752 u16 *dst;
636 int i; 753 int i;
637 754
638 if (data_len != sizeof(priv->rssical_db)) { 755 if (priv->rssi_db || data_len < sizeof(*pda))
639 err = -EINVAL; 756 break;
640 goto err; 757
641 } 758 priv->rssi_db = p54_convert_db(pda, data_len);
642 for (i = 0; i < sizeof(priv->rssical_db) / 759 if (!priv->rssi_db)
643 sizeof(*src); i++) 760 break;
761
762 src = (void *) priv->rssi_db->data;
763 dst = (void *) priv->rssi_db->data;
764
765 for (i = 0; i < priv->rssi_db->entries; i++)
644 *(dst++) = (s16) le16_to_cpu(*(src++)); 766 *(dst++) = (s16) le16_to_cpu(*(src++));
767
645 } 768 }
646 break; 769 break;
647 case PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS_CUSTOM: { 770 case PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS_CUSTOM: {
@@ -717,6 +840,8 @@ good_eeprom:
717 SET_IEEE80211_PERM_ADDR(dev, perm_addr); 840 SET_IEEE80211_PERM_ADDR(dev, perm_addr);
718 } 841 }
719 842
843 priv->cur_rssi = &p54_rssi_default;
844
720 wiphy_info(dev->wiphy, "hwaddr %pM, MAC:isl38%02x RF:%s\n", 845 wiphy_info(dev->wiphy, "hwaddr %pM, MAC:isl38%02x RF:%s\n",
721 dev->wiphy->perm_addr, priv->version, 846 dev->wiphy->perm_addr, priv->version,
722 p54_rf_chips[priv->rxhw]); 847 p54_rf_chips[priv->rxhw]);
@@ -727,9 +852,11 @@ err:
727 kfree(priv->iq_autocal); 852 kfree(priv->iq_autocal);
728 kfree(priv->output_limit); 853 kfree(priv->output_limit);
729 kfree(priv->curve_data); 854 kfree(priv->curve_data);
855 kfree(priv->rssi_db);
730 priv->iq_autocal = NULL; 856 priv->iq_autocal = NULL;
731 priv->output_limit = NULL; 857 priv->output_limit = NULL;
732 priv->curve_data = NULL; 858 priv->curve_data = NULL;
859 priv->rssi_db = NULL;
733 860
734 wiphy_err(dev->wiphy, "eeprom parse failed!\n"); 861 wiphy_err(dev->wiphy, "eeprom parse failed!\n");
735 return err; 862 return err;
diff --git a/drivers/net/wireless/p54/eeprom.h b/drivers/net/wireless/p54/eeprom.h
index 9051aef11249..afde72b84606 100644
--- a/drivers/net/wireless/p54/eeprom.h
+++ b/drivers/net/wireless/p54/eeprom.h
@@ -81,6 +81,12 @@ struct pda_pa_curve_data {
81 u8 data[0]; 81 u8 data[0];
82} __packed; 82} __packed;
83 83
84struct pda_rssi_cal_ext_entry {
85 __le16 freq;
86 __le16 mul;
87 __le16 add;
88} __packed;
89
84struct pda_rssi_cal_entry { 90struct pda_rssi_cal_entry {
85 __le16 mul; 91 __le16 mul;
86 __le16 add; 92 __le16 add;
@@ -179,6 +185,7 @@ struct pda_custom_wrapper {
179 185
180/* used by our modificated eeprom image */ 186/* used by our modificated eeprom image */
181#define PDR_RSSI_LINEAR_APPROXIMATION_CUSTOM 0xDEAD 187#define PDR_RSSI_LINEAR_APPROXIMATION_CUSTOM 0xDEAD
188#define PDR_RSSI_LINEAR_APPROXIMATION_CUSTOMV2 0xCAFF
182#define PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS_CUSTOM 0xBEEF 189#define PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS_CUSTOM 0xBEEF
183#define PDR_PRISM_PA_CAL_CURVE_DATA_CUSTOM 0xB05D 190#define PDR_PRISM_PA_CAL_CURVE_DATA_CUSTOM 0xB05D
184 191
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index 92b9b1f05fd5..2fab7d20ffc2 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -397,9 +397,9 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
397 union p54_scan_body_union *body; 397 union p54_scan_body_union *body;
398 struct p54_scan_tail_rate *rate; 398 struct p54_scan_tail_rate *rate;
399 struct pda_rssi_cal_entry *rssi; 399 struct pda_rssi_cal_entry *rssi;
400 struct p54_rssi_db_entry *rssi_data;
400 unsigned int i; 401 unsigned int i;
401 void *entry; 402 void *entry;
402 int band = priv->hw->conf.channel->band;
403 __le16 freq = cpu_to_le16(priv->hw->conf.channel->center_freq); 403 __le16 freq = cpu_to_le16(priv->hw->conf.channel->center_freq);
404 404
405 skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*head) + 405 skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*head) +
@@ -503,13 +503,14 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
503 } 503 }
504 504
505 rssi = (struct pda_rssi_cal_entry *) skb_put(skb, sizeof(*rssi)); 505 rssi = (struct pda_rssi_cal_entry *) skb_put(skb, sizeof(*rssi));
506 rssi->mul = cpu_to_le16(priv->rssical_db[band].mul); 506 rssi_data = p54_rssi_find(priv, le16_to_cpu(freq));
507 rssi->add = cpu_to_le16(priv->rssical_db[band].add); 507 rssi->mul = cpu_to_le16(rssi_data->mul);
508 rssi->add = cpu_to_le16(rssi_data->add);
508 if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) { 509 if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) {
509 /* Longbow frontend needs ever more */ 510 /* Longbow frontend needs ever more */
510 rssi = (void *) skb_put(skb, sizeof(*rssi)); 511 rssi = (void *) skb_put(skb, sizeof(*rssi));
511 rssi->mul = cpu_to_le16(priv->rssical_db[band].longbow_unkn); 512 rssi->mul = cpu_to_le16(rssi_data->longbow_unkn);
512 rssi->add = cpu_to_le16(priv->rssical_db[band].longbow_unk2); 513 rssi->add = cpu_to_le16(rssi_data->longbow_unk2);
513 } 514 }
514 515
515 if (priv->fw_var >= 0x509) { 516 if (priv->fw_var >= 0x509) {
@@ -523,6 +524,7 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
523 hdr->len = cpu_to_le16(skb->len - sizeof(*hdr)); 524 hdr->len = cpu_to_le16(skb->len - sizeof(*hdr));
524 525
525 p54_tx(priv, skb); 526 p54_tx(priv, skb);
527 priv->cur_rssi = rssi_data;
526 return 0; 528 return 0;
527 529
528err: 530err:
@@ -557,6 +559,7 @@ int p54_set_edcf(struct p54_common *priv)
557{ 559{
558 struct sk_buff *skb; 560 struct sk_buff *skb;
559 struct p54_edcf *edcf; 561 struct p54_edcf *edcf;
562 u8 rtd;
560 563
561 skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*edcf), 564 skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*edcf),
562 P54_CONTROL_TYPE_DCFINIT, GFP_ATOMIC); 565 P54_CONTROL_TYPE_DCFINIT, GFP_ATOMIC);
@@ -573,9 +576,15 @@ int p54_set_edcf(struct p54_common *priv)
573 edcf->sifs = 0x0a; 576 edcf->sifs = 0x0a;
574 edcf->eofpad = 0x06; 577 edcf->eofpad = 0x06;
575 } 578 }
579 /*
580 * calculate the extra round trip delay according to the
581 * formula from 802.11-2007 17.3.8.6.
582 */
583 rtd = 3 * priv->coverage_class;
584 edcf->slottime += rtd;
585 edcf->round_trip_delay = cpu_to_le16(rtd);
576 /* (see prism54/isl_oid.h for further details) */ 586 /* (see prism54/isl_oid.h for further details) */
577 edcf->frameburst = cpu_to_le16(0); 587 edcf->frameburst = cpu_to_le16(0);
578 edcf->round_trip_delay = cpu_to_le16(0);
579 edcf->flags = 0; 588 edcf->flags = 0;
580 memset(edcf->mapping, 0, sizeof(edcf->mapping)); 589 memset(edcf->mapping, 0, sizeof(edcf->mapping));
581 memcpy(edcf->queue, priv->qos_params, sizeof(edcf->queue)); 590 memcpy(edcf->queue, priv->qos_params, sizeof(edcf->queue));
diff --git a/drivers/net/wireless/p54/lmac.h b/drivers/net/wireless/p54/lmac.h
index 04b63ec80fa4..eb581abc1079 100644
--- a/drivers/net/wireless/p54/lmac.h
+++ b/drivers/net/wireless/p54/lmac.h
@@ -526,7 +526,7 @@ int p54_init_leds(struct p54_common *priv);
526void p54_unregister_leds(struct p54_common *priv); 526void p54_unregister_leds(struct p54_common *priv);
527 527
528/* xmit functions */ 528/* xmit functions */
529int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb); 529void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb);
530int p54_tx_cancel(struct p54_common *priv, __le32 req_id); 530int p54_tx_cancel(struct p54_common *priv, __le32 req_id);
531void p54_tx(struct p54_common *priv, struct sk_buff *skb); 531void p54_tx(struct p54_common *priv, struct sk_buff *skb);
532 532
@@ -551,6 +551,7 @@ int p54_upload_key(struct p54_common *priv, u8 algo, int slot,
551/* eeprom */ 551/* eeprom */
552int p54_download_eeprom(struct p54_common *priv, void *buf, 552int p54_download_eeprom(struct p54_common *priv, void *buf,
553 u16 offset, u16 len); 553 u16 offset, u16 len);
554struct p54_rssi_db_entry *p54_rssi_find(struct p54_common *p, const u16 freq);
554 555
555/* utility */ 556/* utility */
556u8 *p54_find_ie(struct sk_buff *skb, u8 ie); 557u8 *p54_find_ie(struct sk_buff *skb, u8 ie);
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index 622d27b6d8f2..356e6bb443a6 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -157,7 +157,7 @@ static int p54_beacon_update(struct p54_common *priv,
157 * to cancel the old beacon template by hand, instead the firmware 157 * to cancel the old beacon template by hand, instead the firmware
158 * will release the previous one through the feedback mechanism. 158 * will release the previous one through the feedback mechanism.
159 */ 159 */
160 WARN_ON(p54_tx_80211(priv->hw, beacon)); 160 p54_tx_80211(priv->hw, beacon);
161 priv->tsf_high32 = 0; 161 priv->tsf_high32 = 0;
162 priv->tsf_low32 = 0; 162 priv->tsf_low32 = 0;
163 163
@@ -524,6 +524,59 @@ static int p54_get_survey(struct ieee80211_hw *dev, int idx,
524 return 0; 524 return 0;
525} 525}
526 526
527static unsigned int p54_flush_count(struct p54_common *priv)
528{
529 unsigned int total = 0, i;
530
531 BUILD_BUG_ON(P54_QUEUE_NUM > ARRAY_SIZE(priv->tx_stats));
532
533 /*
534 * Because the firmware has the sole control over any frames
535 * in the P54_QUEUE_BEACON or P54_QUEUE_SCAN queues, they
536 * don't really count as pending or active.
537 */
538 for (i = P54_QUEUE_MGMT; i < P54_QUEUE_NUM; i++)
539 total += priv->tx_stats[i].len;
540 return total;
541}
542
543static void p54_flush(struct ieee80211_hw *dev, bool drop)
544{
545 struct p54_common *priv = dev->priv;
546 unsigned int total, i;
547
548 /*
549 * Currently, it wouldn't really matter if we wait for one second
550 * or 15 minutes. But once someone gets around and completes the
551 * TODOs [ancel stuck frames / reset device] in p54_work, it will
552 * suddenly make sense to wait that long.
553 */
554 i = P54_STATISTICS_UPDATE * 2 / 20;
555
556 /*
557 * In this case no locking is required because as we speak the
558 * queues have already been stopped and no new frames can sneak
559 * up from behind.
560 */
561 while ((total = p54_flush_count(priv) && i--)) {
562 /* waste time */
563 msleep(20);
564 }
565
566 WARN(total, "tx flush timeout, unresponsive firmware");
567}
568
569static void p54_set_coverage_class(struct ieee80211_hw *dev, u8 coverage_class)
570{
571 struct p54_common *priv = dev->priv;
572
573 mutex_lock(&priv->conf_mutex);
574 /* support all coverage class values as in 802.11-2007 Table 7-27 */
575 priv->coverage_class = clamp_t(u8, coverage_class, 0, 31);
576 p54_set_edcf(priv);
577 mutex_unlock(&priv->conf_mutex);
578}
579
527static const struct ieee80211_ops p54_ops = { 580static const struct ieee80211_ops p54_ops = {
528 .tx = p54_tx_80211, 581 .tx = p54_tx_80211,
529 .start = p54_start, 582 .start = p54_start,
@@ -536,11 +589,13 @@ static const struct ieee80211_ops p54_ops = {
536 .sta_remove = p54_sta_add_remove, 589 .sta_remove = p54_sta_add_remove,
537 .set_key = p54_set_key, 590 .set_key = p54_set_key,
538 .config = p54_config, 591 .config = p54_config,
592 .flush = p54_flush,
539 .bss_info_changed = p54_bss_info_changed, 593 .bss_info_changed = p54_bss_info_changed,
540 .configure_filter = p54_configure_filter, 594 .configure_filter = p54_configure_filter,
541 .conf_tx = p54_conf_tx, 595 .conf_tx = p54_conf_tx,
542 .get_stats = p54_get_stats, 596 .get_stats = p54_get_stats,
543 .get_survey = p54_get_survey, 597 .get_survey = p54_get_survey,
598 .set_coverage_class = p54_set_coverage_class,
544}; 599};
545 600
546struct ieee80211_hw *p54_init_common(size_t priv_data_len) 601struct ieee80211_hw *p54_init_common(size_t priv_data_len)
@@ -611,7 +666,7 @@ EXPORT_SYMBOL_GPL(p54_init_common);
611 666
612int p54_register_common(struct ieee80211_hw *dev, struct device *pdev) 667int p54_register_common(struct ieee80211_hw *dev, struct device *pdev)
613{ 668{
614 struct p54_common *priv = dev->priv; 669 struct p54_common __maybe_unused *priv = dev->priv;
615 int err; 670 int err;
616 671
617 err = ieee80211_register_hw(dev); 672 err = ieee80211_register_hw(dev);
@@ -642,10 +697,12 @@ void p54_free_common(struct ieee80211_hw *dev)
642 kfree(priv->iq_autocal); 697 kfree(priv->iq_autocal);
643 kfree(priv->output_limit); 698 kfree(priv->output_limit);
644 kfree(priv->curve_data); 699 kfree(priv->curve_data);
700 kfree(priv->rssi_db);
645 kfree(priv->used_rxkeys); 701 kfree(priv->used_rxkeys);
646 priv->iq_autocal = NULL; 702 priv->iq_autocal = NULL;
647 priv->output_limit = NULL; 703 priv->output_limit = NULL;
648 priv->curve_data = NULL; 704 priv->curve_data = NULL;
705 priv->rssi_db = NULL;
649 priv->used_rxkeys = NULL; 706 priv->used_rxkeys = NULL;
650 ieee80211_free_hw(dev); 707 ieee80211_free_hw(dev);
651} 708}
diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h
index 43a3b2ead81a..50730fc23fe5 100644
--- a/drivers/net/wireless/p54/p54.h
+++ b/drivers/net/wireless/p54/p54.h
@@ -116,7 +116,8 @@ struct p54_edcf_queue_param {
116 __le16 txop; 116 __le16 txop;
117} __packed; 117} __packed;
118 118
119struct p54_rssi_linear_approximation { 119struct p54_rssi_db_entry {
120 u16 freq;
120 s16 mul; 121 s16 mul;
121 s16 add; 122 s16 add;
122 s16 longbow_unkn; 123 s16 longbow_unkn;
@@ -197,13 +198,14 @@ struct p54_common {
197 u8 rx_diversity_mask; 198 u8 rx_diversity_mask;
198 u8 tx_diversity_mask; 199 u8 tx_diversity_mask;
199 unsigned int output_power; 200 unsigned int output_power;
201 struct p54_rssi_db_entry *cur_rssi;
200 int noise; 202 int noise;
201 /* calibration, output power limit and rssi<->dBm conversation data */ 203 /* calibration, output power limit and rssi<->dBm conversation data */
202 struct pda_iq_autocal_entry *iq_autocal; 204 struct pda_iq_autocal_entry *iq_autocal;
203 unsigned int iq_autocal_len; 205 unsigned int iq_autocal_len;
204 struct p54_cal_database *curve_data; 206 struct p54_cal_database *curve_data;
205 struct p54_cal_database *output_limit; 207 struct p54_cal_database *output_limit;
206 struct p54_rssi_linear_approximation rssical_db[IEEE80211_NUM_BANDS]; 208 struct p54_cal_database *rssi_db;
207 struct ieee80211_supported_band *band_table[IEEE80211_NUM_BANDS]; 209 struct ieee80211_supported_band *band_table[IEEE80211_NUM_BANDS];
208 210
209 /* BBP/MAC state */ 211 /* BBP/MAC state */
@@ -215,6 +217,7 @@ struct p54_common {
215 u32 tsf_low32, tsf_high32; 217 u32 tsf_low32, tsf_high32;
216 u32 basic_rate_mask; 218 u32 basic_rate_mask;
217 u16 aid; 219 u16 aid;
220 u8 coverage_class;
218 bool powersave_override; 221 bool powersave_override;
219 __le32 beacon_req_id; 222 __le32 beacon_req_id;
220 struct completion beacon_comp; 223 struct completion beacon_comp;
diff --git a/drivers/net/wireless/p54/p54spi_eeprom.h b/drivers/net/wireless/p54/p54spi_eeprom.h
index d592cbd34d78..0b7bfb0adcf2 100644
--- a/drivers/net/wireless/p54/p54spi_eeprom.h
+++ b/drivers/net/wireless/p54/p54spi_eeprom.h
@@ -65,9 +65,10 @@ static unsigned char p54spi_eeprom[] = {
650x03, 0x00, 0x00, 0x11, /* PDR_ANTENNA_GAIN */ 650x03, 0x00, 0x00, 0x11, /* PDR_ANTENNA_GAIN */
66 0x08, 0x08, 0x08, 0x08, 66 0x08, 0x08, 0x08, 0x08,
67 67
680x09, 0x00, 0xad, 0xde, /* PDR_RSSI_LINEAR_APPROXIMATION_CUSTOM */ 680x0a, 0x00, 0xff, 0xca, /* PDR_RSSI_LINEAR_APPROXIMATION_CUSTOMV2 */
69 0x0a, 0x01, 0x72, 0xfe, 0x1a, 0x00, 0x00, 0x00, 69 0x01, 0x00, 0x0a, 0x00,
70 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 70 0x00, 0x00, 0x0a, 0x00,
71 0x85, 0x09, 0x0a, 0x01, 0x72, 0xfe, 0x1a, 0x00, 0x00, 0x00,
71 72
72/* struct pda_custom_wrapper */ 73/* struct pda_custom_wrapper */
730x10, 0x06, 0x5d, 0xb0, /* PDR_PRISM_PA_CAL_CURVE_DATA_CUSTOM */ 740x10, 0x06, 0x5d, 0xb0, /* PDR_PRISM_PA_CAL_CURVE_DATA_CUSTOM */
@@ -671,7 +672,7 @@ static unsigned char p54spi_eeprom[] = {
671 0xa8, 0x09, 0x25, 0x00, 0xf5, 0xff, 0xf9, 0xff, 0x00, 0x01, 672 0xa8, 0x09, 0x25, 0x00, 0xf5, 0xff, 0xf9, 0xff, 0x00, 0x01,
672 673
6730x02, 0x00, 0x00, 0x00, /* PDR_END */ 6740x02, 0x00, 0x00, 0x00, /* PDR_END */
674 0x67, 0x99, 675 0xb6, 0x04,
675}; 676};
676 677
677#endif /* P54SPI_EEPROM_H */ 678#endif /* P54SPI_EEPROM_H */
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index f618b9623e5a..7834c26c2954 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -273,11 +273,9 @@ void p54_tx(struct p54_common *priv, struct sk_buff *skb)
273 273
274static int p54_rssi_to_dbm(struct p54_common *priv, int rssi) 274static int p54_rssi_to_dbm(struct p54_common *priv, int rssi)
275{ 275{
276 int band = priv->hw->conf.channel->band;
277
278 if (priv->rxhw != 5) { 276 if (priv->rxhw != 5) {
279 return ((rssi * priv->rssical_db[band].mul) / 64 + 277 return ((rssi * priv->cur_rssi->mul) / 64 +
280 priv->rssical_db[band].add) / 4; 278 priv->cur_rssi->add) / 4;
281 } else { 279 } else {
282 /* 280 /*
283 * TODO: find the correct formula 281 * TODO: find the correct formula
@@ -369,7 +367,7 @@ static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb)
369 rx_status->mactime = ((u64)priv->tsf_high32) << 32 | tsf32; 367 rx_status->mactime = ((u64)priv->tsf_high32) << 32 | tsf32;
370 priv->tsf_low32 = tsf32; 368 priv->tsf_low32 = tsf32;
371 369
372 rx_status->flag |= RX_FLAG_TSFT; 370 rx_status->flag |= RX_FLAG_MACTIME_MPDU;
373 371
374 if (hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN)) 372 if (hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN))
375 header_len += hdr->align[0]; 373 header_len += hdr->align[0];
@@ -698,7 +696,7 @@ static u8 p54_convert_algo(u32 cipher)
698 } 696 }
699} 697}
700 698
701int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb) 699void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
702{ 700{
703 struct p54_common *priv = dev->priv; 701 struct p54_common *priv = dev->priv;
704 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 702 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -719,12 +717,8 @@ int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
719 &hdr_flags, &aid, &burst_allowed); 717 &hdr_flags, &aid, &burst_allowed);
720 718
721 if (p54_tx_qos_accounting_alloc(priv, skb, queue)) { 719 if (p54_tx_qos_accounting_alloc(priv, skb, queue)) {
722 if (!IS_QOS_QUEUE(queue)) { 720 dev_kfree_skb_any(skb);
723 dev_kfree_skb_any(skb); 721 return;
724 return NETDEV_TX_OK;
725 } else {
726 return NETDEV_TX_BUSY;
727 }
728 } 722 }
729 723
730 padding = (unsigned long)(skb->data - (sizeof(*hdr) + sizeof(*txhdr))) & 3; 724 padding = (unsigned long)(skb->data - (sizeof(*hdr) + sizeof(*txhdr))) & 3;
@@ -867,5 +861,4 @@ int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
867 p54info->extra_len = extra_len; 861 p54info->extra_len = extra_len;
868 862
869 p54_tx(priv, skb); 863 p54_tx(priv, skb);
870 return NETDEV_TX_OK;
871} 864}
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 6f383cd684b0..f630552427b7 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -97,6 +97,18 @@ config RT2800PCI_RT35XX
97 Support for these devices is non-functional at the moment and is 97 Support for these devices is non-functional at the moment and is
98 intended for testers and developers. 98 intended for testers and developers.
99 99
100config RT2800PCI_RT53XX
101 bool "rt2800-pci - Include support for rt53xx devices (EXPERIMENTAL)"
102 depends on EXPERIMENTAL
103 default n
104 ---help---
105 This adds support for rt53xx wireless chipset family to the
106 rt2800pci driver.
107 Supported chips: RT5390
108
109 Support for these devices is non-functional at the moment and is
110 intended for testers and developers.
111
100endif 112endif
101 113
102config RT2500USB 114config RT2500USB
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 54ca49ad3472..329f3283697b 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -46,7 +46,7 @@
46 * These indirect registers work with busy bits, 46 * These indirect registers work with busy bits,
47 * and we will try maximal REGISTER_BUSY_COUNT times to access 47 * and we will try maximal REGISTER_BUSY_COUNT times to access
48 * the register while taking a REGISTER_BUSY_DELAY us delay 48 * the register while taking a REGISTER_BUSY_DELAY us delay
49 * between each attampt. When the busy bit is still set at that time, 49 * between each attempt. When the busy bit is still set at that time,
50 * the access attempt is considered to have failed, 50 * the access attempt is considered to have failed,
51 * and we will print an error. 51 * and we will print an error.
52 */ 52 */
@@ -305,9 +305,7 @@ static void rt2400pci_config_intf(struct rt2x00_dev *rt2x00dev,
305 * Enable synchronisation. 305 * Enable synchronisation.
306 */ 306 */
307 rt2x00pci_register_read(rt2x00dev, CSR14, &reg); 307 rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
308 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
309 rt2x00_set_field32(&reg, CSR14_TSF_SYNC, conf->sync); 308 rt2x00_set_field32(&reg, CSR14_TSF_SYNC, conf->sync);
310 rt2x00_set_field32(&reg, CSR14_TBCN, 1);
311 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 309 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
312 } 310 }
313 311
@@ -647,6 +645,11 @@ static void rt2400pci_start_queue(struct data_queue *queue)
647 rt2x00pci_register_write(rt2x00dev, RXCSR0, reg); 645 rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
648 break; 646 break;
649 case QID_BEACON: 647 case QID_BEACON:
648 /*
649 * Allow the tbtt tasklet to be scheduled.
650 */
651 tasklet_enable(&rt2x00dev->tbtt_tasklet);
652
650 rt2x00pci_register_read(rt2x00dev, CSR14, &reg); 653 rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
651 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1); 654 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
652 rt2x00_set_field32(&reg, CSR14_TBCN, 1); 655 rt2x00_set_field32(&reg, CSR14_TBCN, 1);
@@ -708,6 +711,11 @@ static void rt2400pci_stop_queue(struct data_queue *queue)
708 rt2x00_set_field32(&reg, CSR14_TBCN, 0); 711 rt2x00_set_field32(&reg, CSR14_TBCN, 0);
709 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0); 712 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
710 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 713 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
714
715 /*
716 * Wait for possibly running tbtt tasklets.
717 */
718 tasklet_disable(&rt2x00dev->tbtt_tasklet);
711 break; 719 break;
712 default: 720 default:
713 break; 721 break;
@@ -771,7 +779,7 @@ static int rt2400pci_init_queues(struct rt2x00_dev *rt2x00dev)
771 rt2x00pci_register_read(rt2x00dev, TXCSR2, &reg); 779 rt2x00pci_register_read(rt2x00dev, TXCSR2, &reg);
772 rt2x00_set_field32(&reg, TXCSR2_TXD_SIZE, rt2x00dev->tx[0].desc_size); 780 rt2x00_set_field32(&reg, TXCSR2_TXD_SIZE, rt2x00dev->tx[0].desc_size);
773 rt2x00_set_field32(&reg, TXCSR2_NUM_TXD, rt2x00dev->tx[1].limit); 781 rt2x00_set_field32(&reg, TXCSR2_NUM_TXD, rt2x00dev->tx[1].limit);
774 rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->bcn[1].limit); 782 rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->atim->limit);
775 rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit); 783 rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit);
776 rt2x00pci_register_write(rt2x00dev, TXCSR2, reg); 784 rt2x00pci_register_write(rt2x00dev, TXCSR2, reg);
777 785
@@ -787,13 +795,13 @@ static int rt2400pci_init_queues(struct rt2x00_dev *rt2x00dev)
787 entry_priv->desc_dma); 795 entry_priv->desc_dma);
788 rt2x00pci_register_write(rt2x00dev, TXCSR5, reg); 796 rt2x00pci_register_write(rt2x00dev, TXCSR5, reg);
789 797
790 entry_priv = rt2x00dev->bcn[1].entries[0].priv_data; 798 entry_priv = rt2x00dev->atim->entries[0].priv_data;
791 rt2x00pci_register_read(rt2x00dev, TXCSR4, &reg); 799 rt2x00pci_register_read(rt2x00dev, TXCSR4, &reg);
792 rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER, 800 rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER,
793 entry_priv->desc_dma); 801 entry_priv->desc_dma);
794 rt2x00pci_register_write(rt2x00dev, TXCSR4, reg); 802 rt2x00pci_register_write(rt2x00dev, TXCSR4, reg);
795 803
796 entry_priv = rt2x00dev->bcn[0].entries[0].priv_data; 804 entry_priv = rt2x00dev->bcn->entries[0].priv_data;
797 rt2x00pci_register_read(rt2x00dev, TXCSR6, &reg); 805 rt2x00pci_register_read(rt2x00dev, TXCSR6, &reg);
798 rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER, 806 rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER,
799 entry_priv->desc_dma); 807 entry_priv->desc_dma);
@@ -963,9 +971,9 @@ static int rt2400pci_init_bbp(struct rt2x00_dev *rt2x00dev)
963static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev, 971static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
964 enum dev_state state) 972 enum dev_state state)
965{ 973{
966 int mask = (state == STATE_RADIO_IRQ_OFF) || 974 int mask = (state == STATE_RADIO_IRQ_OFF);
967 (state == STATE_RADIO_IRQ_OFF_ISR);
968 u32 reg; 975 u32 reg;
976 unsigned long flags;
969 977
970 /* 978 /*
971 * When interrupts are being enabled, the interrupt registers 979 * When interrupts are being enabled, the interrupt registers
@@ -974,12 +982,20 @@ static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
974 if (state == STATE_RADIO_IRQ_ON) { 982 if (state == STATE_RADIO_IRQ_ON) {
975 rt2x00pci_register_read(rt2x00dev, CSR7, &reg); 983 rt2x00pci_register_read(rt2x00dev, CSR7, &reg);
976 rt2x00pci_register_write(rt2x00dev, CSR7, reg); 984 rt2x00pci_register_write(rt2x00dev, CSR7, reg);
985
986 /*
987 * Enable tasklets.
988 */
989 tasklet_enable(&rt2x00dev->txstatus_tasklet);
990 tasklet_enable(&rt2x00dev->rxdone_tasklet);
977 } 991 }
978 992
979 /* 993 /*
980 * Only toggle the interrupts bits we are going to use. 994 * Only toggle the interrupts bits we are going to use.
981 * Non-checked interrupt bits are disabled by default. 995 * Non-checked interrupt bits are disabled by default.
982 */ 996 */
997 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
998
983 rt2x00pci_register_read(rt2x00dev, CSR8, &reg); 999 rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
984 rt2x00_set_field32(&reg, CSR8_TBCN_EXPIRE, mask); 1000 rt2x00_set_field32(&reg, CSR8_TBCN_EXPIRE, mask);
985 rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, mask); 1001 rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, mask);
@@ -987,6 +1003,17 @@ static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
987 rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, mask); 1003 rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, mask);
988 rt2x00_set_field32(&reg, CSR8_RXDONE, mask); 1004 rt2x00_set_field32(&reg, CSR8_RXDONE, mask);
989 rt2x00pci_register_write(rt2x00dev, CSR8, reg); 1005 rt2x00pci_register_write(rt2x00dev, CSR8, reg);
1006
1007 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
1008
1009 if (state == STATE_RADIO_IRQ_OFF) {
1010 /*
1011 * Ensure that all tasklets are finished before
1012 * disabling the interrupts.
1013 */
1014 tasklet_disable(&rt2x00dev->txstatus_tasklet);
1015 tasklet_disable(&rt2x00dev->rxdone_tasklet);
1016 }
990} 1017}
991 1018
992static int rt2400pci_enable_radio(struct rt2x00_dev *rt2x00dev) 1019static int rt2400pci_enable_radio(struct rt2x00_dev *rt2x00dev)
@@ -1059,9 +1086,7 @@ static int rt2400pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1059 rt2400pci_disable_radio(rt2x00dev); 1086 rt2400pci_disable_radio(rt2x00dev);
1060 break; 1087 break;
1061 case STATE_RADIO_IRQ_ON: 1088 case STATE_RADIO_IRQ_ON:
1062 case STATE_RADIO_IRQ_ON_ISR:
1063 case STATE_RADIO_IRQ_OFF: 1089 case STATE_RADIO_IRQ_OFF:
1064 case STATE_RADIO_IRQ_OFF_ISR:
1065 rt2400pci_toggle_irq(rt2x00dev, state); 1090 rt2400pci_toggle_irq(rt2x00dev, state);
1066 break; 1091 break;
1067 case STATE_DEEP_SLEEP: 1092 case STATE_DEEP_SLEEP:
@@ -1106,19 +1131,21 @@ static void rt2400pci_write_tx_desc(struct queue_entry *entry,
1106 rt2x00_desc_write(txd, 2, word); 1131 rt2x00_desc_write(txd, 2, word);
1107 1132
1108 rt2x00_desc_read(txd, 3, &word); 1133 rt2x00_desc_read(txd, 3, &word);
1109 rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->signal); 1134 rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->u.plcp.signal);
1110 rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL_REGNUM, 5); 1135 rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL_REGNUM, 5);
1111 rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL_BUSY, 1); 1136 rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL_BUSY, 1);
1112 rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->service); 1137 rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->u.plcp.service);
1113 rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE_REGNUM, 6); 1138 rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE_REGNUM, 6);
1114 rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE_BUSY, 1); 1139 rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE_BUSY, 1);
1115 rt2x00_desc_write(txd, 3, word); 1140 rt2x00_desc_write(txd, 3, word);
1116 1141
1117 rt2x00_desc_read(txd, 4, &word); 1142 rt2x00_desc_read(txd, 4, &word);
1118 rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_LOW, txdesc->length_low); 1143 rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_LOW,
1144 txdesc->u.plcp.length_low);
1119 rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW_REGNUM, 8); 1145 rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW_REGNUM, 8);
1120 rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW_BUSY, 1); 1146 rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW_BUSY, 1);
1121 rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_HIGH, txdesc->length_high); 1147 rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_HIGH,
1148 txdesc->u.plcp.length_high);
1122 rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH_REGNUM, 7); 1149 rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH_REGNUM, 7);
1123 rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH_BUSY, 1); 1150 rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH_BUSY, 1);
1124 rt2x00_desc_write(txd, 4, word); 1151 rt2x00_desc_write(txd, 4, word);
@@ -1139,7 +1166,7 @@ static void rt2400pci_write_tx_desc(struct queue_entry *entry,
1139 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); 1166 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
1140 rt2x00_set_field32(&word, TXD_W0_RTS, 1167 rt2x00_set_field32(&word, TXD_W0_RTS,
1141 test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)); 1168 test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags));
1142 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1169 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs);
1143 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1170 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
1144 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); 1171 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
1145 rt2x00_desc_write(txd, 0, word); 1172 rt2x00_desc_write(txd, 0, word);
@@ -1183,8 +1210,6 @@ static void rt2400pci_write_beacon(struct queue_entry *entry,
1183 /* 1210 /*
1184 * Enable beaconing again. 1211 * Enable beaconing again.
1185 */ 1212 */
1186 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
1187 rt2x00_set_field32(&reg, CSR14_TBCN, 1);
1188 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1); 1213 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
1189 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 1214 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
1190} 1215}
@@ -1253,7 +1278,7 @@ static void rt2400pci_fill_rxdone(struct queue_entry *entry,
1253static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev, 1278static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev,
1254 const enum data_queue_qid queue_idx) 1279 const enum data_queue_qid queue_idx)
1255{ 1280{
1256 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); 1281 struct data_queue *queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
1257 struct queue_entry_priv_pci *entry_priv; 1282 struct queue_entry_priv_pci *entry_priv;
1258 struct queue_entry *entry; 1283 struct queue_entry *entry;
1259 struct txdone_entry_desc txdesc; 1284 struct txdone_entry_desc txdesc;
@@ -1289,57 +1314,68 @@ static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev,
1289 } 1314 }
1290} 1315}
1291 1316
1292static irqreturn_t rt2400pci_interrupt_thread(int irq, void *dev_instance) 1317static void rt2400pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
1318 struct rt2x00_field32 irq_field)
1293{ 1319{
1294 struct rt2x00_dev *rt2x00dev = dev_instance; 1320 u32 reg;
1295 u32 reg = rt2x00dev->irqvalue[0];
1296 1321
1297 /* 1322 /*
1298 * Handle interrupts, walk through all bits 1323 * Enable a single interrupt. The interrupt mask register
1299 * and run the tasks, the bits are checked in order of 1324 * access needs locking.
1300 * priority.
1301 */ 1325 */
1326 spin_lock_irq(&rt2x00dev->irqmask_lock);
1302 1327
1303 /* 1328 rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
1304 * 1 - Beacon timer expired interrupt. 1329 rt2x00_set_field32(&reg, irq_field, 0);
1305 */ 1330 rt2x00pci_register_write(rt2x00dev, CSR8, reg);
1306 if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE))
1307 rt2x00lib_beacondone(rt2x00dev);
1308 1331
1309 /* 1332 spin_unlock_irq(&rt2x00dev->irqmask_lock);
1310 * 2 - Rx ring done interrupt. 1333}
1311 */
1312 if (rt2x00_get_field32(reg, CSR7_RXDONE))
1313 rt2x00pci_rxdone(rt2x00dev);
1314 1334
1315 /* 1335static void rt2400pci_txstatus_tasklet(unsigned long data)
1316 * 3 - Atim ring transmit done interrupt. 1336{
1317 */ 1337 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
1318 if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING)) 1338 u32 reg;
1319 rt2400pci_txdone(rt2x00dev, QID_ATIM);
1320 1339
1321 /* 1340 /*
1322 * 4 - Priority ring transmit done interrupt. 1341 * Handle all tx queues.
1323 */ 1342 */
1324 if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING)) 1343 rt2400pci_txdone(rt2x00dev, QID_ATIM);
1325 rt2400pci_txdone(rt2x00dev, QID_AC_VO); 1344 rt2400pci_txdone(rt2x00dev, QID_AC_VO);
1345 rt2400pci_txdone(rt2x00dev, QID_AC_VI);
1326 1346
1327 /* 1347 /*
1328 * 5 - Tx ring transmit done interrupt. 1348 * Enable all TXDONE interrupts again.
1329 */ 1349 */
1330 if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) 1350 spin_lock_irq(&rt2x00dev->irqmask_lock);
1331 rt2400pci_txdone(rt2x00dev, QID_AC_VI);
1332 1351
1333 /* Enable interrupts again. */ 1352 rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
1334 rt2x00dev->ops->lib->set_device_state(rt2x00dev, 1353 rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0);
1335 STATE_RADIO_IRQ_ON_ISR); 1354 rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, 0);
1336 return IRQ_HANDLED; 1355 rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0);
1356 rt2x00pci_register_write(rt2x00dev, CSR8, reg);
1357
1358 spin_unlock_irq(&rt2x00dev->irqmask_lock);
1359}
1360
1361static void rt2400pci_tbtt_tasklet(unsigned long data)
1362{
1363 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
1364 rt2x00lib_beacondone(rt2x00dev);
1365 rt2400pci_enable_interrupt(rt2x00dev, CSR8_TBCN_EXPIRE);
1366}
1367
1368static void rt2400pci_rxdone_tasklet(unsigned long data)
1369{
1370 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
1371 rt2x00pci_rxdone(rt2x00dev);
1372 rt2400pci_enable_interrupt(rt2x00dev, CSR8_RXDONE);
1337} 1373}
1338 1374
1339static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance) 1375static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance)
1340{ 1376{
1341 struct rt2x00_dev *rt2x00dev = dev_instance; 1377 struct rt2x00_dev *rt2x00dev = dev_instance;
1342 u32 reg; 1378 u32 reg, mask;
1343 1379
1344 /* 1380 /*
1345 * Get the interrupt sources & saved to local variable. 1381 * Get the interrupt sources & saved to local variable.
@@ -1354,14 +1390,44 @@ static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance)
1354 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 1390 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
1355 return IRQ_HANDLED; 1391 return IRQ_HANDLED;
1356 1392
1357 /* Store irqvalues for use in the interrupt thread. */ 1393 mask = reg;
1358 rt2x00dev->irqvalue[0] = reg;
1359 1394
1360 /* Disable interrupts, will be enabled again in the interrupt thread. */ 1395 /*
1361 rt2x00dev->ops->lib->set_device_state(rt2x00dev, 1396 * Schedule tasklets for interrupt handling.
1362 STATE_RADIO_IRQ_OFF_ISR); 1397 */
1398 if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE))
1399 tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
1363 1400
1364 return IRQ_WAKE_THREAD; 1401 if (rt2x00_get_field32(reg, CSR7_RXDONE))
1402 tasklet_schedule(&rt2x00dev->rxdone_tasklet);
1403
1404 if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING) ||
1405 rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING) ||
1406 rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) {
1407 tasklet_schedule(&rt2x00dev->txstatus_tasklet);
1408 /*
1409 * Mask out all txdone interrupts.
1410 */
1411 rt2x00_set_field32(&mask, CSR8_TXDONE_TXRING, 1);
1412 rt2x00_set_field32(&mask, CSR8_TXDONE_ATIMRING, 1);
1413 rt2x00_set_field32(&mask, CSR8_TXDONE_PRIORING, 1);
1414 }
1415
1416 /*
1417 * Disable all interrupts for which a tasklet was scheduled right now,
1418 * the tasklet will reenable the appropriate interrupts.
1419 */
1420 spin_lock(&rt2x00dev->irqmask_lock);
1421
1422 rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
1423 reg |= mask;
1424 rt2x00pci_register_write(rt2x00dev, CSR8, reg);
1425
1426 spin_unlock(&rt2x00dev->irqmask_lock);
1427
1428
1429
1430 return IRQ_HANDLED;
1365} 1431}
1366 1432
1367/* 1433/*
@@ -1574,6 +1640,7 @@ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1574 */ 1640 */
1575 __set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); 1641 __set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
1576 __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags); 1642 __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
1643 __set_bit(DRIVER_REQUIRE_SW_SEQNO, &rt2x00dev->flags);
1577 1644
1578 /* 1645 /*
1579 * Set the rssi offset. 1646 * Set the rssi offset.
@@ -1655,7 +1722,9 @@ static const struct ieee80211_ops rt2400pci_mac80211_ops = {
1655 1722
1656static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = { 1723static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
1657 .irq_handler = rt2400pci_interrupt, 1724 .irq_handler = rt2400pci_interrupt,
1658 .irq_handler_thread = rt2400pci_interrupt_thread, 1725 .txstatus_tasklet = rt2400pci_txstatus_tasklet,
1726 .tbtt_tasklet = rt2400pci_tbtt_tasklet,
1727 .rxdone_tasklet = rt2400pci_rxdone_tasklet,
1659 .probe_hw = rt2400pci_probe_hw, 1728 .probe_hw = rt2400pci_probe_hw,
1660 .initialize = rt2x00pci_initialize, 1729 .initialize = rt2x00pci_initialize,
1661 .uninitialize = rt2x00pci_uninitialize, 1730 .uninitialize = rt2x00pci_uninitialize,
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index a9ff26a27724..58277878889e 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -293,7 +293,7 @@ static void rt2500pci_config_intf(struct rt2x00_dev *rt2x00dev,
293 struct rt2x00intf_conf *conf, 293 struct rt2x00intf_conf *conf,
294 const unsigned int flags) 294 const unsigned int flags)
295{ 295{
296 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON); 296 struct data_queue *queue = rt2x00dev->bcn;
297 unsigned int bcn_preload; 297 unsigned int bcn_preload;
298 u32 reg; 298 u32 reg;
299 299
@@ -311,9 +311,7 @@ static void rt2500pci_config_intf(struct rt2x00_dev *rt2x00dev,
311 * Enable synchronisation. 311 * Enable synchronisation.
312 */ 312 */
313 rt2x00pci_register_read(rt2x00dev, CSR14, &reg); 313 rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
314 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
315 rt2x00_set_field32(&reg, CSR14_TSF_SYNC, conf->sync); 314 rt2x00_set_field32(&reg, CSR14_TSF_SYNC, conf->sync);
316 rt2x00_set_field32(&reg, CSR14_TBCN, 1);
317 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 315 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
318 } 316 }
319 317
@@ -737,6 +735,11 @@ static void rt2500pci_start_queue(struct data_queue *queue)
737 rt2x00pci_register_write(rt2x00dev, RXCSR0, reg); 735 rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
738 break; 736 break;
739 case QID_BEACON: 737 case QID_BEACON:
738 /*
739 * Allow the tbtt tasklet to be scheduled.
740 */
741 tasklet_enable(&rt2x00dev->tbtt_tasklet);
742
740 rt2x00pci_register_read(rt2x00dev, CSR14, &reg); 743 rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
741 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1); 744 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
742 rt2x00_set_field32(&reg, CSR14_TBCN, 1); 745 rt2x00_set_field32(&reg, CSR14_TBCN, 1);
@@ -798,6 +801,11 @@ static void rt2500pci_stop_queue(struct data_queue *queue)
798 rt2x00_set_field32(&reg, CSR14_TBCN, 0); 801 rt2x00_set_field32(&reg, CSR14_TBCN, 0);
799 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0); 802 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
800 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 803 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
804
805 /*
806 * Wait for possibly running tbtt tasklets.
807 */
808 tasklet_disable(&rt2x00dev->tbtt_tasklet);
801 break; 809 break;
802 default: 810 default:
803 break; 811 break;
@@ -857,7 +865,7 @@ static int rt2500pci_init_queues(struct rt2x00_dev *rt2x00dev)
857 rt2x00pci_register_read(rt2x00dev, TXCSR2, &reg); 865 rt2x00pci_register_read(rt2x00dev, TXCSR2, &reg);
858 rt2x00_set_field32(&reg, TXCSR2_TXD_SIZE, rt2x00dev->tx[0].desc_size); 866 rt2x00_set_field32(&reg, TXCSR2_TXD_SIZE, rt2x00dev->tx[0].desc_size);
859 rt2x00_set_field32(&reg, TXCSR2_NUM_TXD, rt2x00dev->tx[1].limit); 867 rt2x00_set_field32(&reg, TXCSR2_NUM_TXD, rt2x00dev->tx[1].limit);
860 rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->bcn[1].limit); 868 rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->atim->limit);
861 rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit); 869 rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit);
862 rt2x00pci_register_write(rt2x00dev, TXCSR2, reg); 870 rt2x00pci_register_write(rt2x00dev, TXCSR2, reg);
863 871
@@ -873,13 +881,13 @@ static int rt2500pci_init_queues(struct rt2x00_dev *rt2x00dev)
873 entry_priv->desc_dma); 881 entry_priv->desc_dma);
874 rt2x00pci_register_write(rt2x00dev, TXCSR5, reg); 882 rt2x00pci_register_write(rt2x00dev, TXCSR5, reg);
875 883
876 entry_priv = rt2x00dev->bcn[1].entries[0].priv_data; 884 entry_priv = rt2x00dev->atim->entries[0].priv_data;
877 rt2x00pci_register_read(rt2x00dev, TXCSR4, &reg); 885 rt2x00pci_register_read(rt2x00dev, TXCSR4, &reg);
878 rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER, 886 rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER,
879 entry_priv->desc_dma); 887 entry_priv->desc_dma);
880 rt2x00pci_register_write(rt2x00dev, TXCSR4, reg); 888 rt2x00pci_register_write(rt2x00dev, TXCSR4, reg);
881 889
882 entry_priv = rt2x00dev->bcn[0].entries[0].priv_data; 890 entry_priv = rt2x00dev->bcn->entries[0].priv_data;
883 rt2x00pci_register_read(rt2x00dev, TXCSR6, &reg); 891 rt2x00pci_register_read(rt2x00dev, TXCSR6, &reg);
884 rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER, 892 rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER,
885 entry_priv->desc_dma); 893 entry_priv->desc_dma);
@@ -1118,9 +1126,9 @@ static int rt2500pci_init_bbp(struct rt2x00_dev *rt2x00dev)
1118static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev, 1126static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
1119 enum dev_state state) 1127 enum dev_state state)
1120{ 1128{
1121 int mask = (state == STATE_RADIO_IRQ_OFF) || 1129 int mask = (state == STATE_RADIO_IRQ_OFF);
1122 (state == STATE_RADIO_IRQ_OFF_ISR);
1123 u32 reg; 1130 u32 reg;
1131 unsigned long flags;
1124 1132
1125 /* 1133 /*
1126 * When interrupts are being enabled, the interrupt registers 1134 * When interrupts are being enabled, the interrupt registers
@@ -1129,12 +1137,20 @@ static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
1129 if (state == STATE_RADIO_IRQ_ON) { 1137 if (state == STATE_RADIO_IRQ_ON) {
1130 rt2x00pci_register_read(rt2x00dev, CSR7, &reg); 1138 rt2x00pci_register_read(rt2x00dev, CSR7, &reg);
1131 rt2x00pci_register_write(rt2x00dev, CSR7, reg); 1139 rt2x00pci_register_write(rt2x00dev, CSR7, reg);
1140
1141 /*
1142 * Enable tasklets.
1143 */
1144 tasklet_enable(&rt2x00dev->txstatus_tasklet);
1145 tasklet_enable(&rt2x00dev->rxdone_tasklet);
1132 } 1146 }
1133 1147
1134 /* 1148 /*
1135 * Only toggle the interrupts bits we are going to use. 1149 * Only toggle the interrupts bits we are going to use.
1136 * Non-checked interrupt bits are disabled by default. 1150 * Non-checked interrupt bits are disabled by default.
1137 */ 1151 */
1152 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
1153
1138 rt2x00pci_register_read(rt2x00dev, CSR8, &reg); 1154 rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
1139 rt2x00_set_field32(&reg, CSR8_TBCN_EXPIRE, mask); 1155 rt2x00_set_field32(&reg, CSR8_TBCN_EXPIRE, mask);
1140 rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, mask); 1156 rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, mask);
@@ -1142,6 +1158,16 @@ static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
1142 rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, mask); 1158 rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, mask);
1143 rt2x00_set_field32(&reg, CSR8_RXDONE, mask); 1159 rt2x00_set_field32(&reg, CSR8_RXDONE, mask);
1144 rt2x00pci_register_write(rt2x00dev, CSR8, reg); 1160 rt2x00pci_register_write(rt2x00dev, CSR8, reg);
1161
1162 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
1163
1164 if (state == STATE_RADIO_IRQ_OFF) {
1165 /*
1166 * Ensure that all tasklets are finished.
1167 */
1168 tasklet_disable(&rt2x00dev->txstatus_tasklet);
1169 tasklet_disable(&rt2x00dev->rxdone_tasklet);
1170 }
1145} 1171}
1146 1172
1147static int rt2500pci_enable_radio(struct rt2x00_dev *rt2x00dev) 1173static int rt2500pci_enable_radio(struct rt2x00_dev *rt2x00dev)
@@ -1214,9 +1240,7 @@ static int rt2500pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1214 rt2500pci_disable_radio(rt2x00dev); 1240 rt2500pci_disable_radio(rt2x00dev);
1215 break; 1241 break;
1216 case STATE_RADIO_IRQ_ON: 1242 case STATE_RADIO_IRQ_ON:
1217 case STATE_RADIO_IRQ_ON_ISR:
1218 case STATE_RADIO_IRQ_OFF: 1243 case STATE_RADIO_IRQ_OFF:
1219 case STATE_RADIO_IRQ_OFF_ISR:
1220 rt2500pci_toggle_irq(rt2x00dev, state); 1244 rt2500pci_toggle_irq(rt2x00dev, state);
1221 break; 1245 break;
1222 case STATE_DEEP_SLEEP: 1246 case STATE_DEEP_SLEEP:
@@ -1263,10 +1287,12 @@ static void rt2500pci_write_tx_desc(struct queue_entry *entry,
1263 rt2x00_desc_write(txd, 2, word); 1287 rt2x00_desc_write(txd, 2, word);
1264 1288
1265 rt2x00_desc_read(txd, 3, &word); 1289 rt2x00_desc_read(txd, 3, &word);
1266 rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->signal); 1290 rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->u.plcp.signal);
1267 rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->service); 1291 rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->u.plcp.service);
1268 rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW, txdesc->length_low); 1292 rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW,
1269 rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH, txdesc->length_high); 1293 txdesc->u.plcp.length_low);
1294 rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH,
1295 txdesc->u.plcp.length_high);
1270 rt2x00_desc_write(txd, 3, word); 1296 rt2x00_desc_write(txd, 3, word);
1271 1297
1272 rt2x00_desc_read(txd, 10, &word); 1298 rt2x00_desc_read(txd, 10, &word);
@@ -1291,7 +1317,7 @@ static void rt2500pci_write_tx_desc(struct queue_entry *entry,
1291 rt2x00_set_field32(&word, TXD_W0_OFDM, 1317 rt2x00_set_field32(&word, TXD_W0_OFDM,
1292 (txdesc->rate_mode == RATE_MODE_OFDM)); 1318 (txdesc->rate_mode == RATE_MODE_OFDM));
1293 rt2x00_set_field32(&word, TXD_W0_CIPHER_OWNER, 1); 1319 rt2x00_set_field32(&word, TXD_W0_CIPHER_OWNER, 1);
1294 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1320 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs);
1295 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1321 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
1296 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); 1322 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
1297 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length); 1323 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
@@ -1337,8 +1363,6 @@ static void rt2500pci_write_beacon(struct queue_entry *entry,
1337 /* 1363 /*
1338 * Enable beaconing again. 1364 * Enable beaconing again.
1339 */ 1365 */
1340 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
1341 rt2x00_set_field32(&reg, CSR14_TBCN, 1);
1342 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1); 1366 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
1343 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 1367 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
1344} 1368}
@@ -1386,7 +1410,7 @@ static void rt2500pci_fill_rxdone(struct queue_entry *entry,
1386static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev, 1410static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev,
1387 const enum data_queue_qid queue_idx) 1411 const enum data_queue_qid queue_idx)
1388{ 1412{
1389 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); 1413 struct data_queue *queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
1390 struct queue_entry_priv_pci *entry_priv; 1414 struct queue_entry_priv_pci *entry_priv;
1391 struct queue_entry *entry; 1415 struct queue_entry *entry;
1392 struct txdone_entry_desc txdesc; 1416 struct txdone_entry_desc txdesc;
@@ -1422,58 +1446,68 @@ static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev,
1422 } 1446 }
1423} 1447}
1424 1448
1425static irqreturn_t rt2500pci_interrupt_thread(int irq, void *dev_instance) 1449static void rt2500pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
1450 struct rt2x00_field32 irq_field)
1426{ 1451{
1427 struct rt2x00_dev *rt2x00dev = dev_instance; 1452 u32 reg;
1428 u32 reg = rt2x00dev->irqvalue[0];
1429 1453
1430 /* 1454 /*
1431 * Handle interrupts, walk through all bits 1455 * Enable a single interrupt. The interrupt mask register
1432 * and run the tasks, the bits are checked in order of 1456 * access needs locking.
1433 * priority.
1434 */ 1457 */
1458 spin_lock_irq(&rt2x00dev->irqmask_lock);
1435 1459
1436 /* 1460 rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
1437 * 1 - Beacon timer expired interrupt. 1461 rt2x00_set_field32(&reg, irq_field, 0);
1438 */ 1462 rt2x00pci_register_write(rt2x00dev, CSR8, reg);
1439 if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE))
1440 rt2x00lib_beacondone(rt2x00dev);
1441 1463
1442 /* 1464 spin_unlock_irq(&rt2x00dev->irqmask_lock);
1443 * 2 - Rx ring done interrupt. 1465}
1444 */
1445 if (rt2x00_get_field32(reg, CSR7_RXDONE))
1446 rt2x00pci_rxdone(rt2x00dev);
1447 1466
1448 /* 1467static void rt2500pci_txstatus_tasklet(unsigned long data)
1449 * 3 - Atim ring transmit done interrupt. 1468{
1450 */ 1469 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
1451 if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING)) 1470 u32 reg;
1452 rt2500pci_txdone(rt2x00dev, QID_ATIM);
1453 1471
1454 /* 1472 /*
1455 * 4 - Priority ring transmit done interrupt. 1473 * Handle all tx queues.
1456 */ 1474 */
1457 if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING)) 1475 rt2500pci_txdone(rt2x00dev, QID_ATIM);
1458 rt2500pci_txdone(rt2x00dev, QID_AC_VO); 1476 rt2500pci_txdone(rt2x00dev, QID_AC_VO);
1477 rt2500pci_txdone(rt2x00dev, QID_AC_VI);
1459 1478
1460 /* 1479 /*
1461 * 5 - Tx ring transmit done interrupt. 1480 * Enable all TXDONE interrupts again.
1462 */ 1481 */
1463 if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) 1482 spin_lock_irq(&rt2x00dev->irqmask_lock);
1464 rt2500pci_txdone(rt2x00dev, QID_AC_VI); 1483
1484 rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
1485 rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0);
1486 rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, 0);
1487 rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0);
1488 rt2x00pci_register_write(rt2x00dev, CSR8, reg);
1465 1489
1466 /* Enable interrupts again. */ 1490 spin_unlock_irq(&rt2x00dev->irqmask_lock);
1467 rt2x00dev->ops->lib->set_device_state(rt2x00dev, 1491}
1468 STATE_RADIO_IRQ_ON_ISR);
1469 1492
1470 return IRQ_HANDLED; 1493static void rt2500pci_tbtt_tasklet(unsigned long data)
1494{
1495 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
1496 rt2x00lib_beacondone(rt2x00dev);
1497 rt2500pci_enable_interrupt(rt2x00dev, CSR8_TBCN_EXPIRE);
1498}
1499
1500static void rt2500pci_rxdone_tasklet(unsigned long data)
1501{
1502 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
1503 rt2x00pci_rxdone(rt2x00dev);
1504 rt2500pci_enable_interrupt(rt2x00dev, CSR8_RXDONE);
1471} 1505}
1472 1506
1473static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance) 1507static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance)
1474{ 1508{
1475 struct rt2x00_dev *rt2x00dev = dev_instance; 1509 struct rt2x00_dev *rt2x00dev = dev_instance;
1476 u32 reg; 1510 u32 reg, mask;
1477 1511
1478 /* 1512 /*
1479 * Get the interrupt sources & saved to local variable. 1513 * Get the interrupt sources & saved to local variable.
@@ -1488,14 +1522,42 @@ static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance)
1488 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 1522 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
1489 return IRQ_HANDLED; 1523 return IRQ_HANDLED;
1490 1524
1491 /* Store irqvalues for use in the interrupt thread. */ 1525 mask = reg;
1492 rt2x00dev->irqvalue[0] = reg;
1493 1526
1494 /* Disable interrupts, will be enabled again in the interrupt thread. */ 1527 /*
1495 rt2x00dev->ops->lib->set_device_state(rt2x00dev, 1528 * Schedule tasklets for interrupt handling.
1496 STATE_RADIO_IRQ_OFF_ISR); 1529 */
1530 if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE))
1531 tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
1497 1532
1498 return IRQ_WAKE_THREAD; 1533 if (rt2x00_get_field32(reg, CSR7_RXDONE))
1534 tasklet_schedule(&rt2x00dev->rxdone_tasklet);
1535
1536 if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING) ||
1537 rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING) ||
1538 rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) {
1539 tasklet_schedule(&rt2x00dev->txstatus_tasklet);
1540 /*
1541 * Mask out all txdone interrupts.
1542 */
1543 rt2x00_set_field32(&mask, CSR8_TXDONE_TXRING, 1);
1544 rt2x00_set_field32(&mask, CSR8_TXDONE_ATIMRING, 1);
1545 rt2x00_set_field32(&mask, CSR8_TXDONE_PRIORING, 1);
1546 }
1547
1548 /*
1549 * Disable all interrupts for which a tasklet was scheduled right now,
1550 * the tasklet will reenable the appropriate interrupts.
1551 */
1552 spin_lock(&rt2x00dev->irqmask_lock);
1553
1554 rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
1555 reg |= mask;
1556 rt2x00pci_register_write(rt2x00dev, CSR8, reg);
1557
1558 spin_unlock(&rt2x00dev->irqmask_lock);
1559
1560 return IRQ_HANDLED;
1499} 1561}
1500 1562
1501/* 1563/*
@@ -1896,6 +1958,7 @@ static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1896 */ 1958 */
1897 __set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); 1959 __set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
1898 __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags); 1960 __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
1961 __set_bit(DRIVER_REQUIRE_SW_SEQNO, &rt2x00dev->flags);
1899 1962
1900 /* 1963 /*
1901 * Set the rssi offset. 1964 * Set the rssi offset.
@@ -1952,7 +2015,9 @@ static const struct ieee80211_ops rt2500pci_mac80211_ops = {
1952 2015
1953static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = { 2016static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
1954 .irq_handler = rt2500pci_interrupt, 2017 .irq_handler = rt2500pci_interrupt,
1955 .irq_handler_thread = rt2500pci_interrupt_thread, 2018 .txstatus_tasklet = rt2500pci_txstatus_tasklet,
2019 .tbtt_tasklet = rt2500pci_tbtt_tasklet,
2020 .rxdone_tasklet = rt2500pci_rxdone_tasklet,
1956 .probe_hw = rt2500pci_probe_hw, 2021 .probe_hw = rt2500pci_probe_hw,
1957 .initialize = rt2x00pci_initialize, 2022 .initialize = rt2x00pci_initialize,
1958 .uninitialize = rt2x00pci_uninitialize, 2023 .uninitialize = rt2x00pci_uninitialize,
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 6b3b1de46792..979fe6596a2d 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -478,9 +478,7 @@ static void rt2500usb_config_intf(struct rt2x00_dev *rt2x00dev,
478 rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg); 478 rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg);
479 479
480 rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg); 480 rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg);
481 rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 1);
482 rt2x00_set_field16(&reg, TXRX_CSR19_TSF_SYNC, conf->sync); 481 rt2x00_set_field16(&reg, TXRX_CSR19_TSF_SYNC, conf->sync);
483 rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 1);
484 rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); 482 rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
485 } 483 }
486 484
@@ -1056,9 +1054,7 @@ static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev,
1056 rt2500usb_disable_radio(rt2x00dev); 1054 rt2500usb_disable_radio(rt2x00dev);
1057 break; 1055 break;
1058 case STATE_RADIO_IRQ_ON: 1056 case STATE_RADIO_IRQ_ON:
1059 case STATE_RADIO_IRQ_ON_ISR:
1060 case STATE_RADIO_IRQ_OFF: 1057 case STATE_RADIO_IRQ_OFF:
1061 case STATE_RADIO_IRQ_OFF_ISR:
1062 /* No support, but no error either */ 1058 /* No support, but no error either */
1063 break; 1059 break;
1064 case STATE_DEEP_SLEEP: 1060 case STATE_DEEP_SLEEP:
@@ -1104,7 +1100,7 @@ static void rt2500usb_write_tx_desc(struct queue_entry *entry,
1104 (txdesc->rate_mode == RATE_MODE_OFDM)); 1100 (txdesc->rate_mode == RATE_MODE_OFDM));
1105 rt2x00_set_field32(&word, TXD_W0_NEW_SEQ, 1101 rt2x00_set_field32(&word, TXD_W0_NEW_SEQ,
1106 test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)); 1102 test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags));
1107 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1103 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs);
1108 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length); 1104 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
1109 rt2x00_set_field32(&word, TXD_W0_CIPHER, !!txdesc->cipher); 1105 rt2x00_set_field32(&word, TXD_W0_CIPHER, !!txdesc->cipher);
1110 rt2x00_set_field32(&word, TXD_W0_KEY_ID, txdesc->key_idx); 1106 rt2x00_set_field32(&word, TXD_W0_KEY_ID, txdesc->key_idx);
@@ -1118,10 +1114,12 @@ static void rt2500usb_write_tx_desc(struct queue_entry *entry,
1118 rt2x00_desc_write(txd, 1, word); 1114 rt2x00_desc_write(txd, 1, word);
1119 1115
1120 rt2x00_desc_read(txd, 2, &word); 1116 rt2x00_desc_read(txd, 2, &word);
1121 rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->signal); 1117 rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal);
1122 rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->service); 1118 rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service);
1123 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->length_low); 1119 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW,
1124 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high); 1120 txdesc->u.plcp.length_low);
1121 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH,
1122 txdesc->u.plcp.length_high);
1125 rt2x00_desc_write(txd, 2, word); 1123 rt2x00_desc_write(txd, 2, word);
1126 1124
1127 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) { 1125 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) {
@@ -1799,6 +1797,7 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
1799 __set_bit(DRIVER_REQUIRE_COPY_IV, &rt2x00dev->flags); 1797 __set_bit(DRIVER_REQUIRE_COPY_IV, &rt2x00dev->flags);
1800 } 1798 }
1801 __set_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags); 1799 __set_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags);
1800 __set_bit(DRIVER_REQUIRE_SW_SEQNO, &rt2x00dev->flags);
1802 1801
1803 /* 1802 /*
1804 * Set the rssi offset. 1803 * Set the rssi offset.
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 4c55e8525cad..70b9abbdeb9e 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -51,6 +51,7 @@
51 * RF3320 2.4G 1T1R(RT3350/RT3370/RT3390) 51 * RF3320 2.4G 1T1R(RT3350/RT3370/RT3390)
52 * RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392) 52 * RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
53 * RF3853 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662) 53 * RF3853 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
54 * RF5390 2.4G 1T1R
54 */ 55 */
55#define RF2820 0x0001 56#define RF2820 0x0001
56#define RF2850 0x0002 57#define RF2850 0x0002
@@ -65,6 +66,7 @@
65#define RF3320 0x000b 66#define RF3320 0x000b
66#define RF3322 0x000c 67#define RF3322 0x000c
67#define RF3853 0x000d 68#define RF3853 0x000d
69#define RF5390 0x5390
68 70
69/* 71/*
70 * Chipset revisions. 72 * Chipset revisions.
@@ -77,6 +79,7 @@
77#define REV_RT3071E 0x0211 79#define REV_RT3071E 0x0211
78#define REV_RT3090E 0x0211 80#define REV_RT3090E 0x0211
79#define REV_RT3390E 0x0211 81#define REV_RT3390E 0x0211
82#define REV_RT5390F 0x0502
80 83
81/* 84/*
82 * Signal information. 85 * Signal information.
@@ -121,6 +124,13 @@
121#define E2PROM_CSR_RELOAD FIELD32(0x00000080) 124#define E2PROM_CSR_RELOAD FIELD32(0x00000080)
122 125
123/* 126/*
127 * AUX_CTRL: Aux/PCI-E related configuration
128 */
129#define AUX_CTRL 0x10c
130#define AUX_CTRL_WAKE_PCIE_EN FIELD32(0x00000002)
131#define AUX_CTRL_FORCE_PCIE_CLK FIELD32(0x00000400)
132
133/*
124 * OPT_14: Unknown register used by rt3xxx devices. 134 * OPT_14: Unknown register used by rt3xxx devices.
125 */ 135 */
126#define OPT_14_CSR 0x0114 136#define OPT_14_CSR 0x0114
@@ -270,6 +280,7 @@
270 280
271/* 281/*
272 * GPIO_CTRL_CFG: 282 * GPIO_CTRL_CFG:
283 * GPIOD: GPIO direction, 0: Output, 1: Input
273 */ 284 */
274#define GPIO_CTRL_CFG 0x0228 285#define GPIO_CTRL_CFG 0x0228
275#define GPIO_CTRL_CFG_BIT0 FIELD32(0x00000001) 286#define GPIO_CTRL_CFG_BIT0 FIELD32(0x00000001)
@@ -280,7 +291,14 @@
280#define GPIO_CTRL_CFG_BIT5 FIELD32(0x00000020) 291#define GPIO_CTRL_CFG_BIT5 FIELD32(0x00000020)
281#define GPIO_CTRL_CFG_BIT6 FIELD32(0x00000040) 292#define GPIO_CTRL_CFG_BIT6 FIELD32(0x00000040)
282#define GPIO_CTRL_CFG_BIT7 FIELD32(0x00000080) 293#define GPIO_CTRL_CFG_BIT7 FIELD32(0x00000080)
283#define GPIO_CTRL_CFG_BIT8 FIELD32(0x00000100) 294#define GPIO_CTRL_CFG_GPIOD_BIT0 FIELD32(0x00000100)
295#define GPIO_CTRL_CFG_GPIOD_BIT1 FIELD32(0x00000200)
296#define GPIO_CTRL_CFG_GPIOD_BIT2 FIELD32(0x00000400)
297#define GPIO_CTRL_CFG_GPIOD_BIT3 FIELD32(0x00000800)
298#define GPIO_CTRL_CFG_GPIOD_BIT4 FIELD32(0x00001000)
299#define GPIO_CTRL_CFG_GPIOD_BIT5 FIELD32(0x00002000)
300#define GPIO_CTRL_CFG_GPIOD_BIT6 FIELD32(0x00004000)
301#define GPIO_CTRL_CFG_GPIOD_BIT7 FIELD32(0x00008000)
284 302
285/* 303/*
286 * MCU_CMD_CFG 304 * MCU_CMD_CFG
@@ -372,8 +390,12 @@
372 390
373/* 391/*
374 * US_CYC_CNT 392 * US_CYC_CNT
393 * BT_MODE_EN: Bluetooth mode enable
394 * CLOCK CYCLE: Clock cycle count in 1us.
395 * PCI:0x21, PCIE:0x7d, USB:0x1e
375 */ 396 */
376#define US_CYC_CNT 0x02a4 397#define US_CYC_CNT 0x02a4
398#define US_CYC_CNT_BT_MODE_EN FIELD32(0x00000100)
377#define US_CYC_CNT_CLOCK_CYCLE FIELD32(0x000000ff) 399#define US_CYC_CNT_CLOCK_CYCLE FIELD32(0x000000ff)
378 400
379/* 401/*
@@ -442,7 +464,7 @@
442 */ 464 */
443#define RF_CSR_CFG 0x0500 465#define RF_CSR_CFG 0x0500
444#define RF_CSR_CFG_DATA FIELD32(0x000000ff) 466#define RF_CSR_CFG_DATA FIELD32(0x000000ff)
445#define RF_CSR_CFG_REGNUM FIELD32(0x00001f00) 467#define RF_CSR_CFG_REGNUM FIELD32(0x00003f00)
446#define RF_CSR_CFG_WRITE FIELD32(0x00010000) 468#define RF_CSR_CFG_WRITE FIELD32(0x00010000)
447#define RF_CSR_CFG_BUSY FIELD32(0x00020000) 469#define RF_CSR_CFG_BUSY FIELD32(0x00020000)
448 470
@@ -1132,8 +1154,8 @@
1132 * PROTECT_RATE: Protection control frame rate for CCK TX(RTS/CTS/CFEnd) 1154 * PROTECT_RATE: Protection control frame rate for CCK TX(RTS/CTS/CFEnd)
1133 * PROTECT_CTRL: Protection control frame type for CCK TX 1155 * PROTECT_CTRL: Protection control frame type for CCK TX
1134 * 0:none, 1:RTS/CTS, 2:CTS-to-self 1156 * 0:none, 1:RTS/CTS, 2:CTS-to-self
1135 * PROTECT_NAV: TXOP protection type for CCK TX 1157 * PROTECT_NAV_SHORT: TXOP protection type for CCK TX with short NAV
1136 * 0:none, 1:ShortNAVprotect, 2:LongNAVProtect 1158 * PROTECT_NAV_LONG: TXOP protection type for CCK TX with long NAV
1137 * TX_OP_ALLOW_CCK: CCK TXOP allowance, 0:disallow 1159 * TX_OP_ALLOW_CCK: CCK TXOP allowance, 0:disallow
1138 * TX_OP_ALLOW_OFDM: CCK TXOP allowance, 0:disallow 1160 * TX_OP_ALLOW_OFDM: CCK TXOP allowance, 0:disallow
1139 * TX_OP_ALLOW_MM20: CCK TXOP allowance, 0:disallow 1161 * TX_OP_ALLOW_MM20: CCK TXOP allowance, 0:disallow
@@ -1145,7 +1167,8 @@
1145#define CCK_PROT_CFG 0x1364 1167#define CCK_PROT_CFG 0x1364
1146#define CCK_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff) 1168#define CCK_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
1147#define CCK_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000) 1169#define CCK_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
1148#define CCK_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000) 1170#define CCK_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000)
1171#define CCK_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
1149#define CCK_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000) 1172#define CCK_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
1150#define CCK_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000) 1173#define CCK_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
1151#define CCK_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000) 1174#define CCK_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
@@ -1160,7 +1183,8 @@
1160#define OFDM_PROT_CFG 0x1368 1183#define OFDM_PROT_CFG 0x1368
1161#define OFDM_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff) 1184#define OFDM_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
1162#define OFDM_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000) 1185#define OFDM_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
1163#define OFDM_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000) 1186#define OFDM_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000)
1187#define OFDM_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
1164#define OFDM_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000) 1188#define OFDM_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
1165#define OFDM_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000) 1189#define OFDM_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
1166#define OFDM_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000) 1190#define OFDM_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
@@ -1175,7 +1199,8 @@
1175#define MM20_PROT_CFG 0x136c 1199#define MM20_PROT_CFG 0x136c
1176#define MM20_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff) 1200#define MM20_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
1177#define MM20_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000) 1201#define MM20_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
1178#define MM20_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000) 1202#define MM20_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000)
1203#define MM20_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
1179#define MM20_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000) 1204#define MM20_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
1180#define MM20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000) 1205#define MM20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
1181#define MM20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000) 1206#define MM20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
@@ -1190,7 +1215,8 @@
1190#define MM40_PROT_CFG 0x1370 1215#define MM40_PROT_CFG 0x1370
1191#define MM40_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff) 1216#define MM40_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
1192#define MM40_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000) 1217#define MM40_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
1193#define MM40_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000) 1218#define MM40_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000)
1219#define MM40_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
1194#define MM40_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000) 1220#define MM40_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
1195#define MM40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000) 1221#define MM40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
1196#define MM40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000) 1222#define MM40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
@@ -1205,7 +1231,8 @@
1205#define GF20_PROT_CFG 0x1374 1231#define GF20_PROT_CFG 0x1374
1206#define GF20_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff) 1232#define GF20_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
1207#define GF20_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000) 1233#define GF20_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
1208#define GF20_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000) 1234#define GF20_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000)
1235#define GF20_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
1209#define GF20_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000) 1236#define GF20_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
1210#define GF20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000) 1237#define GF20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
1211#define GF20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000) 1238#define GF20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
@@ -1220,7 +1247,8 @@
1220#define GF40_PROT_CFG 0x1378 1247#define GF40_PROT_CFG 0x1378
1221#define GF40_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff) 1248#define GF40_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
1222#define GF40_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000) 1249#define GF40_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
1223#define GF40_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000) 1250#define GF40_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000)
1251#define GF40_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
1224#define GF40_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000) 1252#define GF40_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
1225#define GF40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000) 1253#define GF40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
1226#define GF40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000) 1254#define GF40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
@@ -1697,11 +1725,14 @@ struct mac_iveiv_entry {
1697 */ 1725 */
1698 1726
1699/* 1727/*
1700 * BBP 1: TX Antenna & Power 1728 * BBP 1: TX Antenna & Power Control
1701 * POWER: 0 - normal, 1 - drop tx power by 6dBm, 2 - drop tx power by 12dBm, 1729 * POWER_CTRL:
1702 * 3 - increase tx power by 6dBm 1730 * 0 - normal,
1731 * 1 - drop tx power by 6dBm,
1732 * 2 - drop tx power by 12dBm,
1733 * 3 - increase tx power by 6dBm
1703 */ 1734 */
1704#define BBP1_TX_POWER FIELD8(0x07) 1735#define BBP1_TX_POWER_CTRL FIELD8(0x07)
1705#define BBP1_TX_ANTENNA FIELD8(0x18) 1736#define BBP1_TX_ANTENNA FIELD8(0x18)
1706 1737
1707/* 1738/*
@@ -1715,6 +1746,13 @@ struct mac_iveiv_entry {
1715 */ 1746 */
1716#define BBP4_TX_BF FIELD8(0x01) 1747#define BBP4_TX_BF FIELD8(0x01)
1717#define BBP4_BANDWIDTH FIELD8(0x18) 1748#define BBP4_BANDWIDTH FIELD8(0x18)
1749#define BBP4_MAC_IF_CTRL FIELD8(0x40)
1750
1751/*
1752 * BBP 109
1753 */
1754#define BBP109_TX0_POWER FIELD8(0x0f)
1755#define BBP109_TX1_POWER FIELD8(0xf0)
1718 1756
1719/* 1757/*
1720 * BBP 138: Unknown 1758 * BBP 138: Unknown
@@ -1725,6 +1763,11 @@ struct mac_iveiv_entry {
1725#define BBP138_TX_DAC2 FIELD8(0x40) 1763#define BBP138_TX_DAC2 FIELD8(0x40)
1726 1764
1727/* 1765/*
1766 * BBP 152: Rx Ant
1767 */
1768#define BBP152_RX_DEFAULT_ANT FIELD8(0x80)
1769
1770/*
1728 * RFCSR registers 1771 * RFCSR registers
1729 * The wordsize of the RFCSR is 8 bits. 1772 * The wordsize of the RFCSR is 8 bits.
1730 */ 1773 */
@@ -1733,12 +1776,18 @@ struct mac_iveiv_entry {
1733 * RFCSR 1: 1776 * RFCSR 1:
1734 */ 1777 */
1735#define RFCSR1_RF_BLOCK_EN FIELD8(0x01) 1778#define RFCSR1_RF_BLOCK_EN FIELD8(0x01)
1779#define RFCSR1_PLL_PD FIELD8(0x02)
1736#define RFCSR1_RX0_PD FIELD8(0x04) 1780#define RFCSR1_RX0_PD FIELD8(0x04)
1737#define RFCSR1_TX0_PD FIELD8(0x08) 1781#define RFCSR1_TX0_PD FIELD8(0x08)
1738#define RFCSR1_RX1_PD FIELD8(0x10) 1782#define RFCSR1_RX1_PD FIELD8(0x10)
1739#define RFCSR1_TX1_PD FIELD8(0x20) 1783#define RFCSR1_TX1_PD FIELD8(0x20)
1740 1784
1741/* 1785/*
1786 * RFCSR 2:
1787 */
1788#define RFCSR2_RESCAL_EN FIELD8(0x80)
1789
1790/*
1742 * RFCSR 6: 1791 * RFCSR 6:
1743 */ 1792 */
1744#define RFCSR6_R1 FIELD8(0x03) 1793#define RFCSR6_R1 FIELD8(0x03)
@@ -1750,6 +1799,11 @@ struct mac_iveiv_entry {
1750#define RFCSR7_RF_TUNING FIELD8(0x01) 1799#define RFCSR7_RF_TUNING FIELD8(0x01)
1751 1800
1752/* 1801/*
1802 * RFCSR 11:
1803 */
1804#define RFCSR11_R FIELD8(0x03)
1805
1806/*
1753 * RFCSR 12: 1807 * RFCSR 12:
1754 */ 1808 */
1755#define RFCSR12_TX_POWER FIELD8(0x1f) 1809#define RFCSR12_TX_POWER FIELD8(0x1f)
@@ -1770,6 +1824,7 @@ struct mac_iveiv_entry {
1770#define RFCSR17_TXMIXER_GAIN FIELD8(0x07) 1824#define RFCSR17_TXMIXER_GAIN FIELD8(0x07)
1771#define RFCSR17_TX_LO1_EN FIELD8(0x08) 1825#define RFCSR17_TX_LO1_EN FIELD8(0x08)
1772#define RFCSR17_R FIELD8(0x20) 1826#define RFCSR17_R FIELD8(0x20)
1827#define RFCSR17_CODE FIELD8(0x7f)
1773 1828
1774/* 1829/*
1775 * RFCSR 20: 1830 * RFCSR 20:
@@ -1802,9 +1857,33 @@ struct mac_iveiv_entry {
1802/* 1857/*
1803 * RFCSR 30: 1858 * RFCSR 30:
1804 */ 1859 */
1860#define RFCSR30_TX_H20M FIELD8(0x02)
1861#define RFCSR30_RX_H20M FIELD8(0x04)
1862#define RFCSR30_RX_VCM FIELD8(0x18)
1805#define RFCSR30_RF_CALIBRATION FIELD8(0x80) 1863#define RFCSR30_RF_CALIBRATION FIELD8(0x80)
1806 1864
1807/* 1865/*
1866 * RFCSR 31:
1867 */
1868#define RFCSR31_RX_AGC_FC FIELD8(0x1f)
1869#define RFCSR31_RX_H20M FIELD8(0x20)
1870
1871/*
1872 * RFCSR 38:
1873 */
1874#define RFCSR38_RX_LO1_EN FIELD8(0x20)
1875
1876/*
1877 * RFCSR 39:
1878 */
1879#define RFCSR39_RX_LO2_EN FIELD8(0x80)
1880
1881/*
1882 * RFCSR 49:
1883 */
1884#define RFCSR49_TX FIELD8(0x3f)
1885
1886/*
1808 * RF registers 1887 * RF registers
1809 */ 1888 */
1810 1889
@@ -1837,6 +1916,11 @@ struct mac_iveiv_entry {
1837 */ 1916 */
1838 1917
1839/* 1918/*
1919 * Chip ID
1920 */
1921#define EEPROM_CHIP_ID 0x0000
1922
1923/*
1840 * EEPROM Version 1924 * EEPROM Version
1841 */ 1925 */
1842#define EEPROM_VERSION 0x0001 1926#define EEPROM_VERSION 0x0001
@@ -1989,23 +2073,26 @@ struct mac_iveiv_entry {
1989#define EEPROM_RSSI_A2_LNA_A2 FIELD16(0xff00) 2073#define EEPROM_RSSI_A2_LNA_A2 FIELD16(0xff00)
1990 2074
1991/* 2075/*
1992 * EEPROM Maximum TX power values 2076 * EEPROM EIRP Maximum TX power values(unit: dbm)
1993 */ 2077 */
1994#define EEPROM_MAX_TX_POWER 0x0027 2078#define EEPROM_EIRP_MAX_TX_POWER 0x0027
1995#define EEPROM_MAX_TX_POWER_24GHZ FIELD16(0x00ff) 2079#define EEPROM_EIRP_MAX_TX_POWER_2GHZ FIELD16(0x00ff)
1996#define EEPROM_MAX_TX_POWER_5GHZ FIELD16(0xff00) 2080#define EEPROM_EIRP_MAX_TX_POWER_5GHZ FIELD16(0xff00)
1997 2081
1998/* 2082/*
1999 * EEPROM TXpower delta: 20MHZ AND 40 MHZ use different power. 2083 * EEPROM TXpower delta: 20MHZ AND 40 MHZ use different power.
2000 * This is delta in 40MHZ. 2084 * This is delta in 40MHZ.
2001 * VALUE: Tx Power dalta value (MAX=4) 2085 * VALUE: Tx Power dalta value, MAX=4(unit: dbm)
2002 * TYPE: 1: Plus the delta value, 0: minus the delta value 2086 * TYPE: 1: Plus the delta value, 0: minus the delta value
2003 * TXPOWER: Enable: 2087 * ENABLE: enable tx power compensation for 40BW
2004 */ 2088 */
2005#define EEPROM_TXPOWER_DELTA 0x0028 2089#define EEPROM_TXPOWER_DELTA 0x0028
2006#define EEPROM_TXPOWER_DELTA_VALUE FIELD16(0x003f) 2090#define EEPROM_TXPOWER_DELTA_VALUE_2G FIELD16(0x003f)
2007#define EEPROM_TXPOWER_DELTA_TYPE FIELD16(0x0040) 2091#define EEPROM_TXPOWER_DELTA_TYPE_2G FIELD16(0x0040)
2008#define EEPROM_TXPOWER_DELTA_TXPOWER FIELD16(0x0080) 2092#define EEPROM_TXPOWER_DELTA_ENABLE_2G FIELD16(0x0080)
2093#define EEPROM_TXPOWER_DELTA_VALUE_5G FIELD16(0x3f00)
2094#define EEPROM_TXPOWER_DELTA_TYPE_5G FIELD16(0x4000)
2095#define EEPROM_TXPOWER_DELTA_ENABLE_5G FIELD16(0x8000)
2009 2096
2010/* 2097/*
2011 * EEPROM TXPOWER 802.11BG 2098 * EEPROM TXPOWER 802.11BG
@@ -2058,6 +2145,7 @@ struct mac_iveiv_entry {
2058#define MCU_LED_LED_POLARITY 0x54 2145#define MCU_LED_LED_POLARITY 0x54
2059#define MCU_RADAR 0x60 2146#define MCU_RADAR 0x60
2060#define MCU_BOOT_SIGNAL 0x72 2147#define MCU_BOOT_SIGNAL 0x72
2148#define MCU_ANT_SELECT 0X73
2061#define MCU_BBP_SIGNAL 0x80 2149#define MCU_BBP_SIGNAL 0x80
2062#define MCU_POWER_SAVE 0x83 2150#define MCU_POWER_SAVE 0x83
2063 2151
@@ -2202,4 +2290,9 @@ struct mac_iveiv_entry {
2202#define TXPOWER_A_TO_DEV(__txpower) \ 2290#define TXPOWER_A_TO_DEV(__txpower) \
2203 clamp_t(char, __txpower, MIN_A_TXPOWER, MAX_A_TXPOWER) 2291 clamp_t(char, __txpower, MIN_A_TXPOWER, MAX_A_TXPOWER)
2204 2292
2293/*
2294 * Board's maximun TX power limitation
2295 */
2296#define EIRP_MAX_TX_POWER_LIMIT 0x50
2297
2205#endif /* RT2800_H */ 2298#endif /* RT2800_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 54917a281398..2ee6cebb9b25 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -400,8 +400,15 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
400 if (rt2800_wait_csr_ready(rt2x00dev)) 400 if (rt2800_wait_csr_ready(rt2x00dev))
401 return -EBUSY; 401 return -EBUSY;
402 402
403 if (rt2x00_is_pci(rt2x00dev)) 403 if (rt2x00_is_pci(rt2x00dev)) {
404 if (rt2x00_rt(rt2x00dev, RT5390)) {
405 rt2800_register_read(rt2x00dev, AUX_CTRL, &reg);
406 rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
407 rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
408 rt2800_register_write(rt2x00dev, AUX_CTRL, reg);
409 }
404 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002); 410 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002);
411 }
405 412
406 /* 413 /*
407 * Disable DMA, will be reenabled later when enabling 414 * Disable DMA, will be reenabled later when enabling
@@ -465,14 +472,15 @@ void rt2800_write_tx_data(struct queue_entry *entry,
465 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); 472 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
466 rt2x00_set_field32(&word, TXWI_W0_AMPDU, 473 rt2x00_set_field32(&word, TXWI_W0_AMPDU,
467 test_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags)); 474 test_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags));
468 rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY, txdesc->mpdu_density); 475 rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY,
469 rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->txop); 476 txdesc->u.ht.mpdu_density);
470 rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->mcs); 477 rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->u.ht.txop);
478 rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->u.ht.mcs);
471 rt2x00_set_field32(&word, TXWI_W0_BW, 479 rt2x00_set_field32(&word, TXWI_W0_BW,
472 test_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags)); 480 test_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags));
473 rt2x00_set_field32(&word, TXWI_W0_SHORT_GI, 481 rt2x00_set_field32(&word, TXWI_W0_SHORT_GI,
474 test_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags)); 482 test_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags));
475 rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->stbc); 483 rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->u.ht.stbc);
476 rt2x00_set_field32(&word, TXWI_W0_PHYMODE, txdesc->rate_mode); 484 rt2x00_set_field32(&word, TXWI_W0_PHYMODE, txdesc->rate_mode);
477 rt2x00_desc_write(txwi, 0, word); 485 rt2x00_desc_write(txwi, 0, word);
478 486
@@ -481,7 +489,7 @@ void rt2800_write_tx_data(struct queue_entry *entry,
481 test_bit(ENTRY_TXD_ACK, &txdesc->flags)); 489 test_bit(ENTRY_TXD_ACK, &txdesc->flags));
482 rt2x00_set_field32(&word, TXWI_W1_NSEQ, 490 rt2x00_set_field32(&word, TXWI_W1_NSEQ,
483 test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags)); 491 test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
484 rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size); 492 rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->u.ht.ba_size);
485 rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID, 493 rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID,
486 test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ? 494 test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ?
487 txdesc->key_idx : 0xff); 495 txdesc->key_idx : 0xff);
@@ -674,7 +682,7 @@ void rt2800_txdone_entry(struct queue_entry *entry, u32 status)
674 * confuse the rate control algortihm by providing clearly wrong 682 * confuse the rate control algortihm by providing clearly wrong
675 * data. 683 * data.
676 */ 684 */
677 if (aggr == 1 && ampdu == 0 && real_mcs != mcs) { 685 if (unlikely(aggr == 1 && ampdu == 0 && real_mcs != mcs)) {
678 skbdesc->tx_rate_idx = real_mcs; 686 skbdesc->tx_rate_idx = real_mcs;
679 mcs = real_mcs; 687 mcs = real_mcs;
680 } 688 }
@@ -744,7 +752,7 @@ void rt2800_txdone(struct rt2x00_dev *rt2x00dev)
744 if (pid >= QID_RX) 752 if (pid >= QID_RX)
745 continue; 753 continue;
746 754
747 queue = rt2x00queue_get_queue(rt2x00dev, pid); 755 queue = rt2x00queue_get_tx_queue(rt2x00dev, pid);
748 if (unlikely(!queue)) 756 if (unlikely(!queue))
749 continue; 757 continue;
750 758
@@ -773,13 +781,14 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
773 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 781 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
774 unsigned int beacon_base; 782 unsigned int beacon_base;
775 unsigned int padding_len; 783 unsigned int padding_len;
776 u32 reg; 784 u32 orig_reg, reg;
777 785
778 /* 786 /*
779 * Disable beaconing while we are reloading the beacon data, 787 * Disable beaconing while we are reloading the beacon data,
780 * otherwise we might be sending out invalid data. 788 * otherwise we might be sending out invalid data.
781 */ 789 */
782 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg); 790 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
791 orig_reg = reg;
783 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0); 792 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
784 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); 793 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
785 794
@@ -810,7 +819,14 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
810 * Write entire beacon with TXWI and padding to register. 819 * Write entire beacon with TXWI and padding to register.
811 */ 820 */
812 padding_len = roundup(entry->skb->len, 4) - entry->skb->len; 821 padding_len = roundup(entry->skb->len, 4) - entry->skb->len;
813 skb_pad(entry->skb, padding_len); 822 if (padding_len && skb_pad(entry->skb, padding_len)) {
823 ERROR(rt2x00dev, "Failure padding beacon, aborting\n");
824 /* skb freed by skb_pad() on failure */
825 entry->skb = NULL;
826 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, orig_reg);
827 return;
828 }
829
814 beacon_base = HW_BEACON_OFFSET(entry->entry_idx); 830 beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
815 rt2800_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data, 831 rt2800_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data,
816 entry->skb->len + padding_len); 832 entry->skb->len + padding_len);
@@ -818,8 +834,6 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
818 /* 834 /*
819 * Enable beaconing again. 835 * Enable beaconing again.
820 */ 836 */
821 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
822 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
823 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1); 837 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
824 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); 838 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
825 839
@@ -831,8 +845,8 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
831} 845}
832EXPORT_SYMBOL_GPL(rt2800_write_beacon); 846EXPORT_SYMBOL_GPL(rt2800_write_beacon);
833 847
834static inline void rt2800_clear_beacon(struct rt2x00_dev *rt2x00dev, 848static inline void rt2800_clear_beacon_register(struct rt2x00_dev *rt2x00dev,
835 unsigned int beacon_base) 849 unsigned int beacon_base)
836{ 850{
837 int i; 851 int i;
838 852
@@ -845,6 +859,33 @@ static inline void rt2800_clear_beacon(struct rt2x00_dev *rt2x00dev,
845 rt2800_register_write(rt2x00dev, beacon_base + i, 0); 859 rt2800_register_write(rt2x00dev, beacon_base + i, 0);
846} 860}
847 861
862void rt2800_clear_beacon(struct queue_entry *entry)
863{
864 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
865 u32 reg;
866
867 /*
868 * Disable beaconing while we are reloading the beacon data,
869 * otherwise we might be sending out invalid data.
870 */
871 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
872 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
873 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
874
875 /*
876 * Clear beacon.
877 */
878 rt2800_clear_beacon_register(rt2x00dev,
879 HW_BEACON_OFFSET(entry->entry_idx));
880
881 /*
882 * Enabled beaconing again.
883 */
884 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
885 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
886}
887EXPORT_SYMBOL_GPL(rt2800_clear_beacon);
888
848#ifdef CONFIG_RT2X00_LIB_DEBUGFS 889#ifdef CONFIG_RT2X00_LIB_DEBUGFS
849const struct rt2x00debug rt2800_rt2x00debug = { 890const struct rt2x00debug rt2800_rt2x00debug = {
850 .owner = THIS_MODULE, 891 .owner = THIS_MODULE,
@@ -1005,7 +1046,7 @@ static void rt2800_config_wcid_attr(struct rt2x00_dev *rt2x00dev,
1005 1046
1006 memset(&wcid_entry, 0, sizeof(wcid_entry)); 1047 memset(&wcid_entry, 0, sizeof(wcid_entry));
1007 if (crypto->cmd == SET_KEY) 1048 if (crypto->cmd == SET_KEY)
1008 memcpy(&wcid_entry, crypto->address, ETH_ALEN); 1049 memcpy(wcid_entry.mac, crypto->address, ETH_ALEN);
1009 rt2800_register_multiwrite(rt2x00dev, offset, 1050 rt2800_register_multiwrite(rt2x00dev, offset,
1010 &wcid_entry, sizeof(wcid_entry)); 1051 &wcid_entry, sizeof(wcid_entry));
1011} 1052}
@@ -1060,27 +1101,44 @@ int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev,
1060} 1101}
1061EXPORT_SYMBOL_GPL(rt2800_config_shared_key); 1102EXPORT_SYMBOL_GPL(rt2800_config_shared_key);
1062 1103
1104static inline int rt2800_find_pairwise_keyslot(struct rt2x00_dev *rt2x00dev)
1105{
1106 int idx;
1107 u32 offset, reg;
1108
1109 /*
1110 * Search for the first free pairwise key entry and return the
1111 * corresponding index.
1112 *
1113 * Make sure the WCID starts _after_ the last possible shared key
1114 * entry (>32).
1115 *
1116 * Since parts of the pairwise key table might be shared with
1117 * the beacon frame buffers 6 & 7 we should only write into the
1118 * first 222 entries.
1119 */
1120 for (idx = 33; idx <= 222; idx++) {
1121 offset = MAC_WCID_ATTR_ENTRY(idx);
1122 rt2800_register_read(rt2x00dev, offset, &reg);
1123 if (!reg)
1124 return idx;
1125 }
1126 return -1;
1127}
1128
1063int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev, 1129int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
1064 struct rt2x00lib_crypto *crypto, 1130 struct rt2x00lib_crypto *crypto,
1065 struct ieee80211_key_conf *key) 1131 struct ieee80211_key_conf *key)
1066{ 1132{
1067 struct hw_key_entry key_entry; 1133 struct hw_key_entry key_entry;
1068 u32 offset; 1134 u32 offset;
1135 int idx;
1069 1136
1070 if (crypto->cmd == SET_KEY) { 1137 if (crypto->cmd == SET_KEY) {
1071 /* 1138 idx = rt2800_find_pairwise_keyslot(rt2x00dev);
1072 * 1 pairwise key is possible per AID, this means that the AID 1139 if (idx < 0)
1073 * equals our hw_key_idx. Make sure the WCID starts _after_ the
1074 * last possible shared key entry.
1075 *
1076 * Since parts of the pairwise key table might be shared with
1077 * the beacon frame buffers 6 & 7 we should only write into the
1078 * first 222 entries.
1079 */
1080 if (crypto->aid > (222 - 32))
1081 return -ENOSPC; 1140 return -ENOSPC;
1082 1141 key->hw_key_idx = idx;
1083 key->hw_key_idx = 32 + crypto->aid;
1084 1142
1085 memcpy(key_entry.key, crypto->key, 1143 memcpy(key_entry.key, crypto->key,
1086 sizeof(key_entry.key)); 1144 sizeof(key_entry.key));
@@ -1155,29 +1213,11 @@ void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
1155 1213
1156 if (flags & CONFIG_UPDATE_TYPE) { 1214 if (flags & CONFIG_UPDATE_TYPE) {
1157 /* 1215 /*
1158 * Clear current synchronisation setup.
1159 */
1160 rt2800_clear_beacon(rt2x00dev,
1161 HW_BEACON_OFFSET(intf->beacon->entry_idx));
1162 /*
1163 * Enable synchronisation. 1216 * Enable synchronisation.
1164 */ 1217 */
1165 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg); 1218 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
1166 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
1167 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, conf->sync); 1219 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, conf->sync);
1168 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE,
1169 (conf->sync == TSF_SYNC_ADHOC ||
1170 conf->sync == TSF_SYNC_AP_NONE));
1171 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); 1220 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
1172
1173 /*
1174 * Enable pre tbtt interrupt for beaconing modes
1175 */
1176 rt2800_register_read(rt2x00dev, INT_TIMER_EN, &reg);
1177 rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER,
1178 (conf->sync == TSF_SYNC_AP_NONE));
1179 rt2800_register_write(rt2x00dev, INT_TIMER_EN, reg);
1180
1181 } 1221 }
1182 1222
1183 if (flags & CONFIG_UPDATE_MAC) { 1223 if (flags & CONFIG_UPDATE_MAC) {
@@ -1361,10 +1401,32 @@ void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp,
1361} 1401}
1362EXPORT_SYMBOL_GPL(rt2800_config_erp); 1402EXPORT_SYMBOL_GPL(rt2800_config_erp);
1363 1403
1404static void rt2800_set_ant_diversity(struct rt2x00_dev *rt2x00dev,
1405 enum antenna ant)
1406{
1407 u32 reg;
1408 u8 eesk_pin = (ant == ANTENNA_A) ? 1 : 0;
1409 u8 gpio_bit3 = (ant == ANTENNA_A) ? 0 : 1;
1410
1411 if (rt2x00_is_pci(rt2x00dev)) {
1412 rt2800_register_read(rt2x00dev, E2PROM_CSR, &reg);
1413 rt2x00_set_field32(&reg, E2PROM_CSR_DATA_CLOCK, eesk_pin);
1414 rt2800_register_write(rt2x00dev, E2PROM_CSR, reg);
1415 } else if (rt2x00_is_usb(rt2x00dev))
1416 rt2800_mcu_request(rt2x00dev, MCU_ANT_SELECT, 0xff,
1417 eesk_pin, 0);
1418
1419 rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
1420 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT3, 0);
1421 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, gpio_bit3);
1422 rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
1423}
1424
1364void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) 1425void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
1365{ 1426{
1366 u8 r1; 1427 u8 r1;
1367 u8 r3; 1428 u8 r3;
1429 u16 eeprom;
1368 1430
1369 rt2800_bbp_read(rt2x00dev, 1, &r1); 1431 rt2800_bbp_read(rt2x00dev, 1, &r1);
1370 rt2800_bbp_read(rt2x00dev, 3, &r3); 1432 rt2800_bbp_read(rt2x00dev, 3, &r3);
@@ -1372,7 +1434,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
1372 /* 1434 /*
1373 * Configure the TX antenna. 1435 * Configure the TX antenna.
1374 */ 1436 */
1375 switch ((int)ant->tx) { 1437 switch (ant->tx_chain_num) {
1376 case 1: 1438 case 1:
1377 rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0); 1439 rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
1378 break; 1440 break;
@@ -1387,8 +1449,18 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
1387 /* 1449 /*
1388 * Configure the RX antenna. 1450 * Configure the RX antenna.
1389 */ 1451 */
1390 switch ((int)ant->rx) { 1452 switch (ant->rx_chain_num) {
1391 case 1: 1453 case 1:
1454 if (rt2x00_rt(rt2x00dev, RT3070) ||
1455 rt2x00_rt(rt2x00dev, RT3090) ||
1456 rt2x00_rt(rt2x00dev, RT3390)) {
1457 rt2x00_eeprom_read(rt2x00dev,
1458 EEPROM_NIC_CONF1, &eeprom);
1459 if (rt2x00_get_field16(eeprom,
1460 EEPROM_NIC_CONF1_ANT_DIVERSITY))
1461 rt2800_set_ant_diversity(rt2x00dev,
1462 rt2x00dev->default_ant.rx);
1463 }
1392 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0); 1464 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0);
1393 break; 1465 break;
1394 case 2: 1466 case 2:
@@ -1434,13 +1506,13 @@ static void rt2800_config_channel_rf2xxx(struct rt2x00_dev *rt2x00dev,
1434{ 1506{
1435 rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset); 1507 rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
1436 1508
1437 if (rt2x00dev->default_ant.tx == 1) 1509 if (rt2x00dev->default_ant.tx_chain_num == 1)
1438 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_TX1, 1); 1510 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_TX1, 1);
1439 1511
1440 if (rt2x00dev->default_ant.rx == 1) { 1512 if (rt2x00dev->default_ant.rx_chain_num == 1) {
1441 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX1, 1); 1513 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX1, 1);
1442 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1); 1514 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
1443 } else if (rt2x00dev->default_ant.rx == 2) 1515 } else if (rt2x00dev->default_ant.rx_chain_num == 2)
1444 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1); 1516 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
1445 1517
1446 if (rf->channel > 14) { 1518 if (rf->channel > 14) {
@@ -1526,6 +1598,105 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
1526 rt2800_rfcsr_write(rt2x00dev, 7, rfcsr); 1598 rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
1527} 1599}
1528 1600
1601
1602#define RT5390_POWER_BOUND 0x27
1603#define RT5390_FREQ_OFFSET_BOUND 0x5f
1604
1605static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
1606 struct ieee80211_conf *conf,
1607 struct rf_channel *rf,
1608 struct channel_info *info)
1609{
1610 u8 rfcsr;
1611 u16 eeprom;
1612
1613 rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
1614 rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3);
1615 rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
1616 rt2x00_set_field8(&rfcsr, RFCSR11_R, rf->rf2);
1617 rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
1618
1619 rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr);
1620 if (info->default_power1 > RT5390_POWER_BOUND)
1621 rt2x00_set_field8(&rfcsr, RFCSR49_TX, RT5390_POWER_BOUND);
1622 else
1623 rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1);
1624 rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
1625
1626 rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
1627 rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
1628 rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
1629 rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
1630 rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
1631 rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
1632
1633 rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
1634 if (rt2x00dev->freq_offset > RT5390_FREQ_OFFSET_BOUND)
1635 rt2x00_set_field8(&rfcsr, RFCSR17_CODE,
1636 RT5390_FREQ_OFFSET_BOUND);
1637 else
1638 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
1639 rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
1640
1641 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
1642 if (rf->channel <= 14) {
1643 int idx = rf->channel-1;
1644
1645 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST)) {
1646 if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
1647 /* r55/r59 value array of channel 1~14 */
1648 static const char r55_bt_rev[] = {0x83, 0x83,
1649 0x83, 0x73, 0x73, 0x63, 0x53, 0x53,
1650 0x53, 0x43, 0x43, 0x43, 0x43, 0x43};
1651 static const char r59_bt_rev[] = {0x0e, 0x0e,
1652 0x0e, 0x0e, 0x0e, 0x0b, 0x0a, 0x09,
1653 0x07, 0x07, 0x07, 0x07, 0x07, 0x07};
1654
1655 rt2800_rfcsr_write(rt2x00dev, 55,
1656 r55_bt_rev[idx]);
1657 rt2800_rfcsr_write(rt2x00dev, 59,
1658 r59_bt_rev[idx]);
1659 } else {
1660 static const char r59_bt[] = {0x8b, 0x8b, 0x8b,
1661 0x8b, 0x8b, 0x8b, 0x8b, 0x8a, 0x89,
1662 0x88, 0x88, 0x86, 0x85, 0x84};
1663
1664 rt2800_rfcsr_write(rt2x00dev, 59, r59_bt[idx]);
1665 }
1666 } else {
1667 if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
1668 static const char r55_nonbt_rev[] = {0x23, 0x23,
1669 0x23, 0x23, 0x13, 0x13, 0x03, 0x03,
1670 0x03, 0x03, 0x03, 0x03, 0x03, 0x03};
1671 static const char r59_nonbt_rev[] = {0x07, 0x07,
1672 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
1673 0x07, 0x07, 0x06, 0x05, 0x04, 0x04};
1674
1675 rt2800_rfcsr_write(rt2x00dev, 55,
1676 r55_nonbt_rev[idx]);
1677 rt2800_rfcsr_write(rt2x00dev, 59,
1678 r59_nonbt_rev[idx]);
1679 } else if (rt2x00_rt(rt2x00dev, RT5390)) {
1680 static const char r59_non_bt[] = {0x8f, 0x8f,
1681 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d,
1682 0x8a, 0x88, 0x88, 0x87, 0x87, 0x86};
1683
1684 rt2800_rfcsr_write(rt2x00dev, 59,
1685 r59_non_bt[idx]);
1686 }
1687 }
1688 }
1689
1690 rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
1691 rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, 0);
1692 rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, 0);
1693 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
1694
1695 rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
1696 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
1697 rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
1698}
1699
1529static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, 1700static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
1530 struct ieee80211_conf *conf, 1701 struct ieee80211_conf *conf,
1531 struct rf_channel *rf, 1702 struct rf_channel *rf,
@@ -1550,6 +1721,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
1550 rt2x00_rf(rt2x00dev, RF3052) || 1721 rt2x00_rf(rt2x00dev, RF3052) ||
1551 rt2x00_rf(rt2x00dev, RF3320)) 1722 rt2x00_rf(rt2x00dev, RF3320))
1552 rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info); 1723 rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info);
1724 else if (rt2x00_rf(rt2x00dev, RF5390))
1725 rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info);
1553 else 1726 else
1554 rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info); 1727 rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
1555 1728
@@ -1562,12 +1735,15 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
1562 rt2800_bbp_write(rt2x00dev, 86, 0); 1735 rt2800_bbp_write(rt2x00dev, 86, 0);
1563 1736
1564 if (rf->channel <= 14) { 1737 if (rf->channel <= 14) {
1565 if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) { 1738 if (!rt2x00_rt(rt2x00dev, RT5390)) {
1566 rt2800_bbp_write(rt2x00dev, 82, 0x62); 1739 if (test_bit(CONFIG_EXTERNAL_LNA_BG,
1567 rt2800_bbp_write(rt2x00dev, 75, 0x46); 1740 &rt2x00dev->flags)) {
1568 } else { 1741 rt2800_bbp_write(rt2x00dev, 82, 0x62);
1569 rt2800_bbp_write(rt2x00dev, 82, 0x84); 1742 rt2800_bbp_write(rt2x00dev, 75, 0x46);
1570 rt2800_bbp_write(rt2x00dev, 75, 0x50); 1743 } else {
1744 rt2800_bbp_write(rt2x00dev, 82, 0x84);
1745 rt2800_bbp_write(rt2x00dev, 75, 0x50);
1746 }
1571 } 1747 }
1572 } else { 1748 } else {
1573 rt2800_bbp_write(rt2x00dev, 82, 0xf2); 1749 rt2800_bbp_write(rt2x00dev, 82, 0xf2);
@@ -1587,13 +1763,13 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
1587 tx_pin = 0; 1763 tx_pin = 0;
1588 1764
1589 /* Turn on unused PA or LNA when not using 1T or 1R */ 1765 /* Turn on unused PA or LNA when not using 1T or 1R */
1590 if (rt2x00dev->default_ant.tx != 1) { 1766 if (rt2x00dev->default_ant.tx_chain_num == 2) {
1591 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN, 1); 1767 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN, 1);
1592 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN, 1); 1768 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN, 1);
1593 } 1769 }
1594 1770
1595 /* Turn on unused PA or LNA when not using 1T or 1R */ 1771 /* Turn on unused PA or LNA when not using 1T or 1R */
1596 if (rt2x00dev->default_ant.rx != 1) { 1772 if (rt2x00dev->default_ant.rx_chain_num == 2) {
1597 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1); 1773 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
1598 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1); 1774 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
1599 } 1775 }
@@ -1637,30 +1813,116 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
1637 rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC, &reg); 1813 rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC, &reg);
1638} 1814}
1639 1815
1816static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev,
1817 enum ieee80211_band band)
1818{
1819 u16 eeprom;
1820 u8 comp_en;
1821 u8 comp_type;
1822 int comp_value;
1823
1824 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_DELTA, &eeprom);
1825
1826 if (eeprom == 0xffff)
1827 return 0;
1828
1829 if (band == IEEE80211_BAND_2GHZ) {
1830 comp_en = rt2x00_get_field16(eeprom,
1831 EEPROM_TXPOWER_DELTA_ENABLE_2G);
1832 if (comp_en) {
1833 comp_type = rt2x00_get_field16(eeprom,
1834 EEPROM_TXPOWER_DELTA_TYPE_2G);
1835 comp_value = rt2x00_get_field16(eeprom,
1836 EEPROM_TXPOWER_DELTA_VALUE_2G);
1837 if (!comp_type)
1838 comp_value = -comp_value;
1839 }
1840 } else {
1841 comp_en = rt2x00_get_field16(eeprom,
1842 EEPROM_TXPOWER_DELTA_ENABLE_5G);
1843 if (comp_en) {
1844 comp_type = rt2x00_get_field16(eeprom,
1845 EEPROM_TXPOWER_DELTA_TYPE_5G);
1846 comp_value = rt2x00_get_field16(eeprom,
1847 EEPROM_TXPOWER_DELTA_VALUE_5G);
1848 if (!comp_type)
1849 comp_value = -comp_value;
1850 }
1851 }
1852
1853 return comp_value;
1854}
1855
1856static u8 rt2800_compesate_txpower(struct rt2x00_dev *rt2x00dev,
1857 int is_rate_b,
1858 enum ieee80211_band band,
1859 int power_level,
1860 u8 txpower)
1861{
1862 u32 reg;
1863 u16 eeprom;
1864 u8 criterion;
1865 u8 eirp_txpower;
1866 u8 eirp_txpower_criterion;
1867 u8 reg_limit;
1868 int bw_comp = 0;
1869
1870 if (!((band == IEEE80211_BAND_5GHZ) && is_rate_b))
1871 return txpower;
1872
1873 if (test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
1874 bw_comp = rt2800_get_txpower_bw_comp(rt2x00dev, band);
1875
1876 if (test_bit(CONFIG_SUPPORT_POWER_LIMIT, &rt2x00dev->flags)) {
1877 /*
1878 * Check if eirp txpower exceed txpower_limit.
1879 * We use OFDM 6M as criterion and its eirp txpower
1880 * is stored at EEPROM_EIRP_MAX_TX_POWER.
1881 * .11b data rate need add additional 4dbm
1882 * when calculating eirp txpower.
1883 */
1884 rt2800_register_read(rt2x00dev, TX_PWR_CFG_0, &reg);
1885 criterion = rt2x00_get_field32(reg, TX_PWR_CFG_0_6MBS);
1886
1887 rt2x00_eeprom_read(rt2x00dev,
1888 EEPROM_EIRP_MAX_TX_POWER, &eeprom);
1889
1890 if (band == IEEE80211_BAND_2GHZ)
1891 eirp_txpower_criterion = rt2x00_get_field16(eeprom,
1892 EEPROM_EIRP_MAX_TX_POWER_2GHZ);
1893 else
1894 eirp_txpower_criterion = rt2x00_get_field16(eeprom,
1895 EEPROM_EIRP_MAX_TX_POWER_5GHZ);
1896
1897 eirp_txpower = eirp_txpower_criterion + (txpower - criterion) +
1898 (is_rate_b ? 4 : 0) + bw_comp;
1899
1900 reg_limit = (eirp_txpower > power_level) ?
1901 (eirp_txpower - power_level) : 0;
1902 } else
1903 reg_limit = 0;
1904
1905 return txpower + bw_comp - reg_limit;
1906}
1907
1640static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev, 1908static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
1641 const int max_txpower) 1909 struct ieee80211_conf *conf)
1642{ 1910{
1643 u8 txpower; 1911 u8 txpower;
1644 u8 max_value = (u8)max_txpower;
1645 u16 eeprom; 1912 u16 eeprom;
1646 int i; 1913 int i, is_rate_b;
1647 u32 reg; 1914 u32 reg;
1648 u8 r1; 1915 u8 r1;
1649 u32 offset; 1916 u32 offset;
1917 enum ieee80211_band band = conf->channel->band;
1918 int power_level = conf->power_level;
1650 1919
1651 /* 1920 /*
1652 * set to normal tx power mode: +/- 0dBm 1921 * set to normal bbp tx power control mode: +/- 0dBm
1653 */ 1922 */
1654 rt2800_bbp_read(rt2x00dev, 1, &r1); 1923 rt2800_bbp_read(rt2x00dev, 1, &r1);
1655 rt2x00_set_field8(&r1, BBP1_TX_POWER, 0); 1924 rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, 0);
1656 rt2800_bbp_write(rt2x00dev, 1, r1); 1925 rt2800_bbp_write(rt2x00dev, 1, r1);
1657
1658 /*
1659 * The eeprom contains the tx power values for each rate. These
1660 * values map to 100% tx power. Each 16bit word contains four tx
1661 * power values and the order is the same as used in the TX_PWR_CFG
1662 * registers.
1663 */
1664 offset = TX_PWR_CFG_0; 1926 offset = TX_PWR_CFG_0;
1665 1927
1666 for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) { 1928 for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) {
@@ -1674,73 +1936,99 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
1674 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i, 1936 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i,
1675 &eeprom); 1937 &eeprom);
1676 1938
1677 /* TX_PWR_CFG_0: 1MBS, TX_PWR_CFG_1: 24MBS, 1939 is_rate_b = i ? 0 : 1;
1940 /*
1941 * TX_PWR_CFG_0: 1MBS, TX_PWR_CFG_1: 24MBS,
1678 * TX_PWR_CFG_2: MCS4, TX_PWR_CFG_3: MCS12, 1942 * TX_PWR_CFG_2: MCS4, TX_PWR_CFG_3: MCS12,
1679 * TX_PWR_CFG_4: unknown */ 1943 * TX_PWR_CFG_4: unknown
1944 */
1680 txpower = rt2x00_get_field16(eeprom, 1945 txpower = rt2x00_get_field16(eeprom,
1681 EEPROM_TXPOWER_BYRATE_RATE0); 1946 EEPROM_TXPOWER_BYRATE_RATE0);
1682 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE0, 1947 txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
1683 min(txpower, max_value)); 1948 power_level, txpower);
1949 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE0, txpower);
1684 1950
1685 /* TX_PWR_CFG_0: 2MBS, TX_PWR_CFG_1: 36MBS, 1951 /*
1952 * TX_PWR_CFG_0: 2MBS, TX_PWR_CFG_1: 36MBS,
1686 * TX_PWR_CFG_2: MCS5, TX_PWR_CFG_3: MCS13, 1953 * TX_PWR_CFG_2: MCS5, TX_PWR_CFG_3: MCS13,
1687 * TX_PWR_CFG_4: unknown */ 1954 * TX_PWR_CFG_4: unknown
1955 */
1688 txpower = rt2x00_get_field16(eeprom, 1956 txpower = rt2x00_get_field16(eeprom,
1689 EEPROM_TXPOWER_BYRATE_RATE1); 1957 EEPROM_TXPOWER_BYRATE_RATE1);
1690 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE1, 1958 txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
1691 min(txpower, max_value)); 1959 power_level, txpower);
1960 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE1, txpower);
1692 1961
1693 /* TX_PWR_CFG_0: 55MBS, TX_PWR_CFG_1: 48MBS, 1962 /*
1963 * TX_PWR_CFG_0: 5.5MBS, TX_PWR_CFG_1: 48MBS,
1694 * TX_PWR_CFG_2: MCS6, TX_PWR_CFG_3: MCS14, 1964 * TX_PWR_CFG_2: MCS6, TX_PWR_CFG_3: MCS14,
1695 * TX_PWR_CFG_4: unknown */ 1965 * TX_PWR_CFG_4: unknown
1966 */
1696 txpower = rt2x00_get_field16(eeprom, 1967 txpower = rt2x00_get_field16(eeprom,
1697 EEPROM_TXPOWER_BYRATE_RATE2); 1968 EEPROM_TXPOWER_BYRATE_RATE2);
1698 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE2, 1969 txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
1699 min(txpower, max_value)); 1970 power_level, txpower);
1971 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE2, txpower);
1700 1972
1701 /* TX_PWR_CFG_0: 11MBS, TX_PWR_CFG_1: 54MBS, 1973 /*
1974 * TX_PWR_CFG_0: 11MBS, TX_PWR_CFG_1: 54MBS,
1702 * TX_PWR_CFG_2: MCS7, TX_PWR_CFG_3: MCS15, 1975 * TX_PWR_CFG_2: MCS7, TX_PWR_CFG_3: MCS15,
1703 * TX_PWR_CFG_4: unknown */ 1976 * TX_PWR_CFG_4: unknown
1977 */
1704 txpower = rt2x00_get_field16(eeprom, 1978 txpower = rt2x00_get_field16(eeprom,
1705 EEPROM_TXPOWER_BYRATE_RATE3); 1979 EEPROM_TXPOWER_BYRATE_RATE3);
1706 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE3, 1980 txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
1707 min(txpower, max_value)); 1981 power_level, txpower);
1982 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE3, txpower);
1708 1983
1709 /* read the next four txpower values */ 1984 /* read the next four txpower values */
1710 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i + 1, 1985 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i + 1,
1711 &eeprom); 1986 &eeprom);
1712 1987
1713 /* TX_PWR_CFG_0: 6MBS, TX_PWR_CFG_1: MCS0, 1988 is_rate_b = 0;
1989 /*
1990 * TX_PWR_CFG_0: 6MBS, TX_PWR_CFG_1: MCS0,
1714 * TX_PWR_CFG_2: MCS8, TX_PWR_CFG_3: unknown, 1991 * TX_PWR_CFG_2: MCS8, TX_PWR_CFG_3: unknown,
1715 * TX_PWR_CFG_4: unknown */ 1992 * TX_PWR_CFG_4: unknown
1993 */
1716 txpower = rt2x00_get_field16(eeprom, 1994 txpower = rt2x00_get_field16(eeprom,
1717 EEPROM_TXPOWER_BYRATE_RATE0); 1995 EEPROM_TXPOWER_BYRATE_RATE0);
1718 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE4, 1996 txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
1719 min(txpower, max_value)); 1997 power_level, txpower);
1998 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE4, txpower);
1720 1999
1721 /* TX_PWR_CFG_0: 9MBS, TX_PWR_CFG_1: MCS1, 2000 /*
2001 * TX_PWR_CFG_0: 9MBS, TX_PWR_CFG_1: MCS1,
1722 * TX_PWR_CFG_2: MCS9, TX_PWR_CFG_3: unknown, 2002 * TX_PWR_CFG_2: MCS9, TX_PWR_CFG_3: unknown,
1723 * TX_PWR_CFG_4: unknown */ 2003 * TX_PWR_CFG_4: unknown
2004 */
1724 txpower = rt2x00_get_field16(eeprom, 2005 txpower = rt2x00_get_field16(eeprom,
1725 EEPROM_TXPOWER_BYRATE_RATE1); 2006 EEPROM_TXPOWER_BYRATE_RATE1);
1726 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE5, 2007 txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
1727 min(txpower, max_value)); 2008 power_level, txpower);
2009 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE5, txpower);
1728 2010
1729 /* TX_PWR_CFG_0: 12MBS, TX_PWR_CFG_1: MCS2, 2011 /*
2012 * TX_PWR_CFG_0: 12MBS, TX_PWR_CFG_1: MCS2,
1730 * TX_PWR_CFG_2: MCS10, TX_PWR_CFG_3: unknown, 2013 * TX_PWR_CFG_2: MCS10, TX_PWR_CFG_3: unknown,
1731 * TX_PWR_CFG_4: unknown */ 2014 * TX_PWR_CFG_4: unknown
2015 */
1732 txpower = rt2x00_get_field16(eeprom, 2016 txpower = rt2x00_get_field16(eeprom,
1733 EEPROM_TXPOWER_BYRATE_RATE2); 2017 EEPROM_TXPOWER_BYRATE_RATE2);
1734 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE6, 2018 txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
1735 min(txpower, max_value)); 2019 power_level, txpower);
2020 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE6, txpower);
1736 2021
1737 /* TX_PWR_CFG_0: 18MBS, TX_PWR_CFG_1: MCS3, 2022 /*
2023 * TX_PWR_CFG_0: 18MBS, TX_PWR_CFG_1: MCS3,
1738 * TX_PWR_CFG_2: MCS11, TX_PWR_CFG_3: unknown, 2024 * TX_PWR_CFG_2: MCS11, TX_PWR_CFG_3: unknown,
1739 * TX_PWR_CFG_4: unknown */ 2025 * TX_PWR_CFG_4: unknown
2026 */
1740 txpower = rt2x00_get_field16(eeprom, 2027 txpower = rt2x00_get_field16(eeprom,
1741 EEPROM_TXPOWER_BYRATE_RATE3); 2028 EEPROM_TXPOWER_BYRATE_RATE3);
1742 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE7, 2029 txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
1743 min(txpower, max_value)); 2030 power_level, txpower);
2031 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE7, txpower);
1744 2032
1745 rt2800_register_write(rt2x00dev, offset, reg); 2033 rt2800_register_write(rt2x00dev, offset, reg);
1746 2034
@@ -1799,11 +2087,13 @@ void rt2800_config(struct rt2x00_dev *rt2x00dev,
1799 /* Always recalculate LNA gain before changing configuration */ 2087 /* Always recalculate LNA gain before changing configuration */
1800 rt2800_config_lna_gain(rt2x00dev, libconf); 2088 rt2800_config_lna_gain(rt2x00dev, libconf);
1801 2089
1802 if (flags & IEEE80211_CONF_CHANGE_CHANNEL) 2090 if (flags & IEEE80211_CONF_CHANGE_CHANNEL) {
1803 rt2800_config_channel(rt2x00dev, libconf->conf, 2091 rt2800_config_channel(rt2x00dev, libconf->conf,
1804 &libconf->rf, &libconf->channel); 2092 &libconf->rf, &libconf->channel);
2093 rt2800_config_txpower(rt2x00dev, libconf->conf);
2094 }
1805 if (flags & IEEE80211_CONF_CHANGE_POWER) 2095 if (flags & IEEE80211_CONF_CHANGE_POWER)
1806 rt2800_config_txpower(rt2x00dev, libconf->conf->power_level); 2096 rt2800_config_txpower(rt2x00dev, libconf->conf);
1807 if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS) 2097 if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
1808 rt2800_config_retry_limit(rt2x00dev, libconf); 2098 rt2800_config_retry_limit(rt2x00dev, libconf);
1809 if (flags & IEEE80211_CONF_CHANGE_PS) 2099 if (flags & IEEE80211_CONF_CHANGE_PS)
@@ -1832,7 +2122,8 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
1832 if (rt2x00_rt(rt2x00dev, RT3070) || 2122 if (rt2x00_rt(rt2x00dev, RT3070) ||
1833 rt2x00_rt(rt2x00dev, RT3071) || 2123 rt2x00_rt(rt2x00dev, RT3071) ||
1834 rt2x00_rt(rt2x00dev, RT3090) || 2124 rt2x00_rt(rt2x00dev, RT3090) ||
1835 rt2x00_rt(rt2x00dev, RT3390)) 2125 rt2x00_rt(rt2x00dev, RT3390) ||
2126 rt2x00_rt(rt2x00dev, RT5390))
1836 return 0x1c + (2 * rt2x00dev->lna_gain); 2127 return 0x1c + (2 * rt2x00dev->lna_gain);
1837 else 2128 else
1838 return 0x2e + rt2x00dev->lna_gain; 2129 return 0x2e + rt2x00dev->lna_gain;
@@ -1964,6 +2255,10 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1964 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400); 2255 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
1965 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000); 2256 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
1966 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x0000001f); 2257 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x0000001f);
2258 } else if (rt2x00_rt(rt2x00dev, RT5390)) {
2259 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
2260 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
2261 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
1967 } else { 2262 } else {
1968 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000); 2263 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000);
1969 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); 2264 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
@@ -2032,7 +2327,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
2032 rt2800_register_read(rt2x00dev, CCK_PROT_CFG, &reg); 2327 rt2800_register_read(rt2x00dev, CCK_PROT_CFG, &reg);
2033 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_RATE, 3); 2328 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_RATE, 3);
2034 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_CTRL, 0); 2329 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_CTRL, 0);
2035 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_NAV, 1); 2330 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_NAV_SHORT, 1);
2036 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_CCK, 1); 2331 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_CCK, 1);
2037 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_OFDM, 1); 2332 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
2038 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM20, 1); 2333 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2045,7 +2340,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
2045 rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, &reg); 2340 rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
2046 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_RATE, 3); 2341 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_RATE, 3);
2047 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL, 0); 2342 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL, 0);
2048 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_NAV, 1); 2343 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_NAV_SHORT, 1);
2049 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_CCK, 1); 2344 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_CCK, 1);
2050 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_OFDM, 1); 2345 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
2051 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM20, 1); 2346 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2058,7 +2353,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
2058 rt2800_register_read(rt2x00dev, MM20_PROT_CFG, &reg); 2353 rt2800_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
2059 rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_RATE, 0x4004); 2354 rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_RATE, 0x4004);
2060 rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_CTRL, 0); 2355 rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_CTRL, 0);
2061 rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_NAV, 1); 2356 rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_NAV_SHORT, 1);
2062 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_CCK, 1); 2357 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
2063 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_OFDM, 1); 2358 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
2064 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM20, 1); 2359 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2071,7 +2366,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
2071 rt2800_register_read(rt2x00dev, MM40_PROT_CFG, &reg); 2366 rt2800_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
2072 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_RATE, 0x4084); 2367 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_RATE, 0x4084);
2073 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL, 0); 2368 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL, 0);
2074 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV, 1); 2369 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV_SHORT, 1);
2075 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1); 2370 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
2076 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1); 2371 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
2077 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM20, 1); 2372 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2084,7 +2379,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
2084 rt2800_register_read(rt2x00dev, GF20_PROT_CFG, &reg); 2379 rt2800_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
2085 rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_RATE, 0x4004); 2380 rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_RATE, 0x4004);
2086 rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_CTRL, 0); 2381 rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_CTRL, 0);
2087 rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_NAV, 1); 2382 rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_NAV_SHORT, 1);
2088 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_CCK, 1); 2383 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
2089 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_OFDM, 1); 2384 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
2090 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM20, 1); 2385 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2097,7 +2392,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
2097 rt2800_register_read(rt2x00dev, GF40_PROT_CFG, &reg); 2392 rt2800_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
2098 rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_RATE, 0x4084); 2393 rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_RATE, 0x4084);
2099 rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_CTRL, 0); 2394 rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_CTRL, 0);
2100 rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_NAV, 1); 2395 rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_NAV_SHORT, 1);
2101 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_CCK, 1); 2396 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
2102 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_OFDM, 1); 2397 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
2103 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM20, 1); 2398 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2180,26 +2475,30 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
2180 rt2800_register_multiwrite(rt2x00dev, MAC_WCID_ENTRY(i), 2475 rt2800_register_multiwrite(rt2x00dev, MAC_WCID_ENTRY(i),
2181 wcid, sizeof(wcid)); 2476 wcid, sizeof(wcid));
2182 2477
2183 rt2800_register_write(rt2x00dev, MAC_WCID_ATTR_ENTRY(i), 1); 2478 rt2800_register_write(rt2x00dev, MAC_WCID_ATTR_ENTRY(i), 0);
2184 rt2800_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0); 2479 rt2800_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0);
2185 } 2480 }
2186 2481
2187 /* 2482 /*
2188 * Clear all beacons 2483 * Clear all beacons
2189 */ 2484 */
2190 rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE0); 2485 rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE0);
2191 rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE1); 2486 rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE1);
2192 rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE2); 2487 rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE2);
2193 rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE3); 2488 rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE3);
2194 rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE4); 2489 rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE4);
2195 rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE5); 2490 rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE5);
2196 rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE6); 2491 rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE6);
2197 rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE7); 2492 rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE7);
2198 2493
2199 if (rt2x00_is_usb(rt2x00dev)) { 2494 if (rt2x00_is_usb(rt2x00dev)) {
2200 rt2800_register_read(rt2x00dev, US_CYC_CNT, &reg); 2495 rt2800_register_read(rt2x00dev, US_CYC_CNT, &reg);
2201 rt2x00_set_field32(&reg, US_CYC_CNT_CLOCK_CYCLE, 30); 2496 rt2x00_set_field32(&reg, US_CYC_CNT_CLOCK_CYCLE, 30);
2202 rt2800_register_write(rt2x00dev, US_CYC_CNT, reg); 2497 rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
2498 } else if (rt2x00_is_pcie(rt2x00dev)) {
2499 rt2800_register_read(rt2x00dev, US_CYC_CNT, &reg);
2500 rt2x00_set_field32(&reg, US_CYC_CNT_CLOCK_CYCLE, 125);
2501 rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
2203 } 2502 }
2204 2503
2205 rt2800_register_read(rt2x00dev, HT_FBK_CFG0, &reg); 2504 rt2800_register_read(rt2x00dev, HT_FBK_CFG0, &reg);
@@ -2335,15 +2634,31 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
2335 rt2800_wait_bbp_ready(rt2x00dev))) 2634 rt2800_wait_bbp_ready(rt2x00dev)))
2336 return -EACCES; 2635 return -EACCES;
2337 2636
2338 if (rt2800_is_305x_soc(rt2x00dev)) 2637 if (rt2x00_rt(rt2x00dev, RT5390)) {
2638 rt2800_bbp_read(rt2x00dev, 4, &value);
2639 rt2x00_set_field8(&value, BBP4_MAC_IF_CTRL, 1);
2640 rt2800_bbp_write(rt2x00dev, 4, value);
2641 }
2642
2643 if (rt2800_is_305x_soc(rt2x00dev) ||
2644 rt2x00_rt(rt2x00dev, RT5390))
2339 rt2800_bbp_write(rt2x00dev, 31, 0x08); 2645 rt2800_bbp_write(rt2x00dev, 31, 0x08);
2340 2646
2341 rt2800_bbp_write(rt2x00dev, 65, 0x2c); 2647 rt2800_bbp_write(rt2x00dev, 65, 0x2c);
2342 rt2800_bbp_write(rt2x00dev, 66, 0x38); 2648 rt2800_bbp_write(rt2x00dev, 66, 0x38);
2343 2649
2650 if (rt2x00_rt(rt2x00dev, RT5390))
2651 rt2800_bbp_write(rt2x00dev, 68, 0x0b);
2652
2344 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) { 2653 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) {
2345 rt2800_bbp_write(rt2x00dev, 69, 0x16); 2654 rt2800_bbp_write(rt2x00dev, 69, 0x16);
2346 rt2800_bbp_write(rt2x00dev, 73, 0x12); 2655 rt2800_bbp_write(rt2x00dev, 73, 0x12);
2656 } else if (rt2x00_rt(rt2x00dev, RT5390)) {
2657 rt2800_bbp_write(rt2x00dev, 69, 0x12);
2658 rt2800_bbp_write(rt2x00dev, 73, 0x13);
2659 rt2800_bbp_write(rt2x00dev, 75, 0x46);
2660 rt2800_bbp_write(rt2x00dev, 76, 0x28);
2661 rt2800_bbp_write(rt2x00dev, 77, 0x59);
2347 } else { 2662 } else {
2348 rt2800_bbp_write(rt2x00dev, 69, 0x12); 2663 rt2800_bbp_write(rt2x00dev, 69, 0x12);
2349 rt2800_bbp_write(rt2x00dev, 73, 0x10); 2664 rt2800_bbp_write(rt2x00dev, 73, 0x10);
@@ -2354,7 +2669,8 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
2354 if (rt2x00_rt(rt2x00dev, RT3070) || 2669 if (rt2x00_rt(rt2x00dev, RT3070) ||
2355 rt2x00_rt(rt2x00dev, RT3071) || 2670 rt2x00_rt(rt2x00dev, RT3071) ||
2356 rt2x00_rt(rt2x00dev, RT3090) || 2671 rt2x00_rt(rt2x00dev, RT3090) ||
2357 rt2x00_rt(rt2x00dev, RT3390)) { 2672 rt2x00_rt(rt2x00dev, RT3390) ||
2673 rt2x00_rt(rt2x00dev, RT5390)) {
2358 rt2800_bbp_write(rt2x00dev, 79, 0x13); 2674 rt2800_bbp_write(rt2x00dev, 79, 0x13);
2359 rt2800_bbp_write(rt2x00dev, 80, 0x05); 2675 rt2800_bbp_write(rt2x00dev, 80, 0x05);
2360 rt2800_bbp_write(rt2x00dev, 81, 0x33); 2676 rt2800_bbp_write(rt2x00dev, 81, 0x33);
@@ -2366,35 +2682,62 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
2366 } 2682 }
2367 2683
2368 rt2800_bbp_write(rt2x00dev, 82, 0x62); 2684 rt2800_bbp_write(rt2x00dev, 82, 0x62);
2369 rt2800_bbp_write(rt2x00dev, 83, 0x6a); 2685 if (rt2x00_rt(rt2x00dev, RT5390))
2686 rt2800_bbp_write(rt2x00dev, 83, 0x7a);
2687 else
2688 rt2800_bbp_write(rt2x00dev, 83, 0x6a);
2370 2689
2371 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D)) 2690 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D))
2372 rt2800_bbp_write(rt2x00dev, 84, 0x19); 2691 rt2800_bbp_write(rt2x00dev, 84, 0x19);
2692 else if (rt2x00_rt(rt2x00dev, RT5390))
2693 rt2800_bbp_write(rt2x00dev, 84, 0x9a);
2373 else 2694 else
2374 rt2800_bbp_write(rt2x00dev, 84, 0x99); 2695 rt2800_bbp_write(rt2x00dev, 84, 0x99);
2375 2696
2376 rt2800_bbp_write(rt2x00dev, 86, 0x00); 2697 if (rt2x00_rt(rt2x00dev, RT5390))
2698 rt2800_bbp_write(rt2x00dev, 86, 0x38);
2699 else
2700 rt2800_bbp_write(rt2x00dev, 86, 0x00);
2701
2377 rt2800_bbp_write(rt2x00dev, 91, 0x04); 2702 rt2800_bbp_write(rt2x00dev, 91, 0x04);
2378 rt2800_bbp_write(rt2x00dev, 92, 0x00); 2703
2704 if (rt2x00_rt(rt2x00dev, RT5390))
2705 rt2800_bbp_write(rt2x00dev, 92, 0x02);
2706 else
2707 rt2800_bbp_write(rt2x00dev, 92, 0x00);
2379 2708
2380 if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) || 2709 if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) ||
2381 rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) || 2710 rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) ||
2382 rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) || 2711 rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) ||
2383 rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) || 2712 rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) ||
2713 rt2x00_rt(rt2x00dev, RT5390) ||
2384 rt2800_is_305x_soc(rt2x00dev)) 2714 rt2800_is_305x_soc(rt2x00dev))
2385 rt2800_bbp_write(rt2x00dev, 103, 0xc0); 2715 rt2800_bbp_write(rt2x00dev, 103, 0xc0);
2386 else 2716 else
2387 rt2800_bbp_write(rt2x00dev, 103, 0x00); 2717 rt2800_bbp_write(rt2x00dev, 103, 0x00);
2388 2718
2719 if (rt2x00_rt(rt2x00dev, RT5390))
2720 rt2800_bbp_write(rt2x00dev, 104, 0x92);
2721
2389 if (rt2800_is_305x_soc(rt2x00dev)) 2722 if (rt2800_is_305x_soc(rt2x00dev))
2390 rt2800_bbp_write(rt2x00dev, 105, 0x01); 2723 rt2800_bbp_write(rt2x00dev, 105, 0x01);
2724 else if (rt2x00_rt(rt2x00dev, RT5390))
2725 rt2800_bbp_write(rt2x00dev, 105, 0x3c);
2391 else 2726 else
2392 rt2800_bbp_write(rt2x00dev, 105, 0x05); 2727 rt2800_bbp_write(rt2x00dev, 105, 0x05);
2393 rt2800_bbp_write(rt2x00dev, 106, 0x35); 2728
2729 if (rt2x00_rt(rt2x00dev, RT5390))
2730 rt2800_bbp_write(rt2x00dev, 106, 0x03);
2731 else
2732 rt2800_bbp_write(rt2x00dev, 106, 0x35);
2733
2734 if (rt2x00_rt(rt2x00dev, RT5390))
2735 rt2800_bbp_write(rt2x00dev, 128, 0x12);
2394 2736
2395 if (rt2x00_rt(rt2x00dev, RT3071) || 2737 if (rt2x00_rt(rt2x00dev, RT3071) ||
2396 rt2x00_rt(rt2x00dev, RT3090) || 2738 rt2x00_rt(rt2x00dev, RT3090) ||
2397 rt2x00_rt(rt2x00dev, RT3390)) { 2739 rt2x00_rt(rt2x00dev, RT3390) ||
2740 rt2x00_rt(rt2x00dev, RT5390)) {
2398 rt2800_bbp_read(rt2x00dev, 138, &value); 2741 rt2800_bbp_read(rt2x00dev, 138, &value);
2399 2742
2400 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); 2743 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
@@ -2406,6 +2749,42 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
2406 rt2800_bbp_write(rt2x00dev, 138, value); 2749 rt2800_bbp_write(rt2x00dev, 138, value);
2407 } 2750 }
2408 2751
2752 if (rt2x00_rt(rt2x00dev, RT5390)) {
2753 int ant, div_mode;
2754
2755 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
2756 div_mode = rt2x00_get_field16(eeprom,
2757 EEPROM_NIC_CONF1_ANT_DIVERSITY);
2758 ant = (div_mode == 3) ? 1 : 0;
2759
2760 /* check if this is a Bluetooth combo card */
2761 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
2762 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST)) {
2763 u32 reg;
2764
2765 rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
2766 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT3, 0);
2767 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT6, 0);
2768 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, 0);
2769 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT6, 0);
2770 if (ant == 0)
2771 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, 1);
2772 else if (ant == 1)
2773 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT6, 1);
2774 rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
2775 }
2776
2777 rt2800_bbp_read(rt2x00dev, 152, &value);
2778 if (ant == 0)
2779 rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1);
2780 else
2781 rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0);
2782 rt2800_bbp_write(rt2x00dev, 152, value);
2783
2784 /* Init frequency calibration */
2785 rt2800_bbp_write(rt2x00dev, 142, 1);
2786 rt2800_bbp_write(rt2x00dev, 143, 57);
2787 }
2409 2788
2410 for (i = 0; i < EEPROM_BBP_SIZE; i++) { 2789 for (i = 0; i < EEPROM_BBP_SIZE; i++) {
2411 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); 2790 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
@@ -2436,6 +2815,10 @@ static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev,
2436 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * bw40); 2815 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * bw40);
2437 rt2800_bbp_write(rt2x00dev, 4, bbp); 2816 rt2800_bbp_write(rt2x00dev, 4, bbp);
2438 2817
2818 rt2800_rfcsr_read(rt2x00dev, 31, &rfcsr);
2819 rt2x00_set_field8(&rfcsr, RFCSR31_RX_H20M, bw40);
2820 rt2800_rfcsr_write(rt2x00dev, 31, rfcsr);
2821
2439 rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr); 2822 rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
2440 rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 1); 2823 rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 1);
2441 rt2800_rfcsr_write(rt2x00dev, 22, rfcsr); 2824 rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
@@ -2491,18 +2874,28 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
2491 !rt2x00_rt(rt2x00dev, RT3071) && 2874 !rt2x00_rt(rt2x00dev, RT3071) &&
2492 !rt2x00_rt(rt2x00dev, RT3090) && 2875 !rt2x00_rt(rt2x00dev, RT3090) &&
2493 !rt2x00_rt(rt2x00dev, RT3390) && 2876 !rt2x00_rt(rt2x00dev, RT3390) &&
2877 !rt2x00_rt(rt2x00dev, RT5390) &&
2494 !rt2800_is_305x_soc(rt2x00dev)) 2878 !rt2800_is_305x_soc(rt2x00dev))
2495 return 0; 2879 return 0;
2496 2880
2497 /* 2881 /*
2498 * Init RF calibration. 2882 * Init RF calibration.
2499 */ 2883 */
2500 rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr); 2884 if (rt2x00_rt(rt2x00dev, RT5390)) {
2501 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1); 2885 rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr);
2502 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); 2886 rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1);
2503 msleep(1); 2887 rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
2504 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0); 2888 msleep(1);
2505 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); 2889 rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 0);
2890 rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
2891 } else {
2892 rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
2893 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
2894 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
2895 msleep(1);
2896 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
2897 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
2898 }
2506 2899
2507 if (rt2x00_rt(rt2x00dev, RT3070) || 2900 if (rt2x00_rt(rt2x00dev, RT3070) ||
2508 rt2x00_rt(rt2x00dev, RT3071) || 2901 rt2x00_rt(rt2x00dev, RT3071) ||
@@ -2510,7 +2903,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
2510 rt2800_rfcsr_write(rt2x00dev, 4, 0x40); 2903 rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
2511 rt2800_rfcsr_write(rt2x00dev, 5, 0x03); 2904 rt2800_rfcsr_write(rt2x00dev, 5, 0x03);
2512 rt2800_rfcsr_write(rt2x00dev, 6, 0x02); 2905 rt2800_rfcsr_write(rt2x00dev, 6, 0x02);
2513 rt2800_rfcsr_write(rt2x00dev, 7, 0x70); 2906 rt2800_rfcsr_write(rt2x00dev, 7, 0x60);
2514 rt2800_rfcsr_write(rt2x00dev, 9, 0x0f); 2907 rt2800_rfcsr_write(rt2x00dev, 9, 0x0f);
2515 rt2800_rfcsr_write(rt2x00dev, 10, 0x41); 2908 rt2800_rfcsr_write(rt2x00dev, 10, 0x41);
2516 rt2800_rfcsr_write(rt2x00dev, 11, 0x21); 2909 rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
@@ -2593,6 +2986,87 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
2593 rt2800_rfcsr_write(rt2x00dev, 30, 0x00); 2986 rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
2594 rt2800_rfcsr_write(rt2x00dev, 31, 0x00); 2987 rt2800_rfcsr_write(rt2x00dev, 31, 0x00);
2595 return 0; 2988 return 0;
2989 } else if (rt2x00_rt(rt2x00dev, RT5390)) {
2990 rt2800_rfcsr_write(rt2x00dev, 1, 0x0f);
2991 rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
2992 rt2800_rfcsr_write(rt2x00dev, 3, 0x88);
2993 rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
2994 if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
2995 rt2800_rfcsr_write(rt2x00dev, 6, 0xe0);
2996 else
2997 rt2800_rfcsr_write(rt2x00dev, 6, 0xa0);
2998 rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
2999 rt2800_rfcsr_write(rt2x00dev, 10, 0x53);
3000 rt2800_rfcsr_write(rt2x00dev, 11, 0x4a);
3001 rt2800_rfcsr_write(rt2x00dev, 12, 0xc6);
3002 rt2800_rfcsr_write(rt2x00dev, 13, 0x9f);
3003 rt2800_rfcsr_write(rt2x00dev, 14, 0x00);
3004 rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
3005 rt2800_rfcsr_write(rt2x00dev, 16, 0x00);
3006 rt2800_rfcsr_write(rt2x00dev, 18, 0x03);
3007 rt2800_rfcsr_write(rt2x00dev, 19, 0x00);
3008
3009 rt2800_rfcsr_write(rt2x00dev, 20, 0x00);
3010 rt2800_rfcsr_write(rt2x00dev, 21, 0x00);
3011 rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
3012 rt2800_rfcsr_write(rt2x00dev, 23, 0x00);
3013 rt2800_rfcsr_write(rt2x00dev, 24, 0x00);
3014 if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
3015 rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
3016 else
3017 rt2800_rfcsr_write(rt2x00dev, 25, 0xc0);
3018 rt2800_rfcsr_write(rt2x00dev, 26, 0x00);
3019 rt2800_rfcsr_write(rt2x00dev, 27, 0x09);
3020 rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
3021 rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
3022
3023 rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
3024 rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
3025 rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
3026 rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
3027 rt2800_rfcsr_write(rt2x00dev, 34, 0x07);
3028 rt2800_rfcsr_write(rt2x00dev, 35, 0x12);
3029 rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
3030 rt2800_rfcsr_write(rt2x00dev, 37, 0x08);
3031 rt2800_rfcsr_write(rt2x00dev, 38, 0x85);
3032 rt2800_rfcsr_write(rt2x00dev, 39, 0x1b);
3033
3034 if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
3035 rt2800_rfcsr_write(rt2x00dev, 40, 0x0b);
3036 else
3037 rt2800_rfcsr_write(rt2x00dev, 40, 0x4b);
3038 rt2800_rfcsr_write(rt2x00dev, 41, 0xbb);
3039 rt2800_rfcsr_write(rt2x00dev, 42, 0xd2);
3040 rt2800_rfcsr_write(rt2x00dev, 43, 0x9a);
3041 rt2800_rfcsr_write(rt2x00dev, 44, 0x0e);
3042 rt2800_rfcsr_write(rt2x00dev, 45, 0xa2);
3043 if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
3044 rt2800_rfcsr_write(rt2x00dev, 46, 0x73);
3045 else
3046 rt2800_rfcsr_write(rt2x00dev, 46, 0x7b);
3047 rt2800_rfcsr_write(rt2x00dev, 47, 0x00);
3048 rt2800_rfcsr_write(rt2x00dev, 48, 0x10);
3049 rt2800_rfcsr_write(rt2x00dev, 49, 0x94);
3050
3051 rt2800_rfcsr_write(rt2x00dev, 52, 0x38);
3052 if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
3053 rt2800_rfcsr_write(rt2x00dev, 53, 0x00);
3054 else
3055 rt2800_rfcsr_write(rt2x00dev, 53, 0x84);
3056 rt2800_rfcsr_write(rt2x00dev, 54, 0x78);
3057 rt2800_rfcsr_write(rt2x00dev, 55, 0x44);
3058 rt2800_rfcsr_write(rt2x00dev, 56, 0x22);
3059 rt2800_rfcsr_write(rt2x00dev, 57, 0x80);
3060 rt2800_rfcsr_write(rt2x00dev, 58, 0x7f);
3061 rt2800_rfcsr_write(rt2x00dev, 59, 0x63);
3062
3063 rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
3064 if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
3065 rt2800_rfcsr_write(rt2x00dev, 61, 0xd1);
3066 else
3067 rt2800_rfcsr_write(rt2x00dev, 61, 0xdd);
3068 rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
3069 rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
2596 } 3070 }
2597 3071
2598 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) { 3072 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
@@ -2602,12 +3076,12 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
2602 rt2800_register_write(rt2x00dev, LDO_CFG0, reg); 3076 rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
2603 } else if (rt2x00_rt(rt2x00dev, RT3071) || 3077 } else if (rt2x00_rt(rt2x00dev, RT3071) ||
2604 rt2x00_rt(rt2x00dev, RT3090)) { 3078 rt2x00_rt(rt2x00dev, RT3090)) {
3079 rt2800_rfcsr_write(rt2x00dev, 31, 0x14);
3080
2605 rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr); 3081 rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
2606 rt2x00_set_field8(&rfcsr, RFCSR6_R2, 1); 3082 rt2x00_set_field8(&rfcsr, RFCSR6_R2, 1);
2607 rt2800_rfcsr_write(rt2x00dev, 6, rfcsr); 3083 rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
2608 3084
2609 rt2800_rfcsr_write(rt2x00dev, 31, 0x14);
2610
2611 rt2800_register_read(rt2x00dev, LDO_CFG0, &reg); 3085 rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
2612 rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1); 3086 rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
2613 if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || 3087 if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
@@ -2619,6 +3093,10 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
2619 rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 0); 3093 rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 0);
2620 } 3094 }
2621 rt2800_register_write(rt2x00dev, LDO_CFG0, reg); 3095 rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
3096
3097 rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
3098 rt2x00_set_field32(&reg, GPIO_SWITCH_5, 0);
3099 rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
2622 } else if (rt2x00_rt(rt2x00dev, RT3390)) { 3100 } else if (rt2x00_rt(rt2x00dev, RT3390)) {
2623 rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg); 3101 rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
2624 rt2x00_set_field32(&reg, GPIO_SWITCH_5, 0); 3102 rt2x00_set_field32(&reg, GPIO_SWITCH_5, 0);
@@ -2642,21 +3120,23 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
2642 rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x15); 3120 rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x15);
2643 } 3121 }
2644 3122
2645 /* 3123 if (!rt2x00_rt(rt2x00dev, RT5390)) {
2646 * Set back to initial state 3124 /*
2647 */ 3125 * Set back to initial state
2648 rt2800_bbp_write(rt2x00dev, 24, 0); 3126 */
3127 rt2800_bbp_write(rt2x00dev, 24, 0);
2649 3128
2650 rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr); 3129 rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
2651 rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0); 3130 rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0);
2652 rt2800_rfcsr_write(rt2x00dev, 22, rfcsr); 3131 rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
2653 3132
2654 /* 3133 /*
2655 * set BBP back to BW20 3134 * Set BBP back to BW20
2656 */ 3135 */
2657 rt2800_bbp_read(rt2x00dev, 4, &bbp); 3136 rt2800_bbp_read(rt2x00dev, 4, &bbp);
2658 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0); 3137 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0);
2659 rt2800_bbp_write(rt2x00dev, 4, bbp); 3138 rt2800_bbp_write(rt2x00dev, 4, bbp);
3139 }
2660 3140
2661 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) || 3141 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) ||
2662 rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || 3142 rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
@@ -2668,24 +3148,29 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
2668 rt2x00_set_field32(&reg, OPT_14_CSR_BIT0, 1); 3148 rt2x00_set_field32(&reg, OPT_14_CSR_BIT0, 1);
2669 rt2800_register_write(rt2x00dev, OPT_14_CSR, reg); 3149 rt2800_register_write(rt2x00dev, OPT_14_CSR, reg);
2670 3150
2671 rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr); 3151 if (!rt2x00_rt(rt2x00dev, RT5390)) {
2672 rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0); 3152 rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
2673 if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || 3153 rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0);
2674 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) || 3154 if (rt2x00_rt(rt2x00dev, RT3070) ||
2675 rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) { 3155 rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
2676 if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) 3156 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
2677 rt2x00_set_field8(&rfcsr, RFCSR17_R, 1); 3157 rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
2678 } 3158 if (!test_bit(CONFIG_EXTERNAL_LNA_BG,
2679 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom); 3159 &rt2x00dev->flags))
2680 if (rt2x00_get_field16(eeprom, EEPROM_TXMIXER_GAIN_BG_VAL) >= 1) 3160 rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
2681 rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN, 3161 }
2682 rt2x00_get_field16(eeprom, 3162 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom);
2683 EEPROM_TXMIXER_GAIN_BG_VAL)); 3163 if (rt2x00_get_field16(eeprom, EEPROM_TXMIXER_GAIN_BG_VAL) >= 1)
2684 rt2800_rfcsr_write(rt2x00dev, 17, rfcsr); 3164 rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN,
3165 rt2x00_get_field16(eeprom,
3166 EEPROM_TXMIXER_GAIN_BG_VAL));
3167 rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
3168 }
2685 3169
2686 if (rt2x00_rt(rt2x00dev, RT3090)) { 3170 if (rt2x00_rt(rt2x00dev, RT3090)) {
2687 rt2800_bbp_read(rt2x00dev, 138, &bbp); 3171 rt2800_bbp_read(rt2x00dev, 138, &bbp);
2688 3172
3173 /* Turn off unused DAC1 and ADC1 to reduce power consumption */
2689 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); 3174 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
2690 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1) 3175 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
2691 rt2x00_set_field8(&bbp, BBP138_RX_ADC1, 0); 3176 rt2x00_set_field8(&bbp, BBP138_RX_ADC1, 0);
@@ -2719,10 +3204,9 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
2719 rt2800_rfcsr_write(rt2x00dev, 21, rfcsr); 3204 rt2800_rfcsr_write(rt2x00dev, 21, rfcsr);
2720 } 3205 }
2721 3206
2722 if (rt2x00_rt(rt2x00dev, RT3070) || rt2x00_rt(rt2x00dev, RT3071)) { 3207 if (rt2x00_rt(rt2x00dev, RT3070)) {
2723 rt2800_rfcsr_read(rt2x00dev, 27, &rfcsr); 3208 rt2800_rfcsr_read(rt2x00dev, 27, &rfcsr);
2724 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) || 3209 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F))
2725 rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E))
2726 rt2x00_set_field8(&rfcsr, RFCSR27_R1, 3); 3210 rt2x00_set_field8(&rfcsr, RFCSR27_R1, 3);
2727 else 3211 else
2728 rt2x00_set_field8(&rfcsr, RFCSR27_R1, 0); 3212 rt2x00_set_field8(&rfcsr, RFCSR27_R1, 0);
@@ -2732,6 +3216,20 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
2732 rt2800_rfcsr_write(rt2x00dev, 27, rfcsr); 3216 rt2800_rfcsr_write(rt2x00dev, 27, rfcsr);
2733 } 3217 }
2734 3218
3219 if (rt2x00_rt(rt2x00dev, RT5390)) {
3220 rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr);
3221 rt2x00_set_field8(&rfcsr, RFCSR38_RX_LO1_EN, 0);
3222 rt2800_rfcsr_write(rt2x00dev, 38, rfcsr);
3223
3224 rt2800_rfcsr_read(rt2x00dev, 39, &rfcsr);
3225 rt2x00_set_field8(&rfcsr, RFCSR39_RX_LO2_EN, 0);
3226 rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
3227
3228 rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
3229 rt2x00_set_field8(&rfcsr, RFCSR30_RX_VCM, 2);
3230 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
3231 }
3232
2735 return 0; 3233 return 0;
2736} 3234}
2737 3235
@@ -2810,10 +3308,7 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev)
2810 3308
2811 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg); 3309 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
2812 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0); 3310 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
2813 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
2814 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0); 3311 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
2815 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
2816 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
2817 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); 3312 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
2818 3313
2819 /* Wait for DMA, ignore error */ 3314 /* Wait for DMA, ignore error */
@@ -2823,9 +3318,6 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev)
2823 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 0); 3318 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 0);
2824 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0); 3319 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
2825 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 3320 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
2826
2827 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0);
2828 rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
2829} 3321}
2830EXPORT_SYMBOL_GPL(rt2800_disable_radio); 3322EXPORT_SYMBOL_GPL(rt2800_disable_radio);
2831 3323
@@ -2986,13 +3478,6 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
2986 default_lna_gain); 3478 default_lna_gain);
2987 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word); 3479 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word);
2988 3480
2989 rt2x00_eeprom_read(rt2x00dev, EEPROM_MAX_TX_POWER, &word);
2990 if (rt2x00_get_field16(word, EEPROM_MAX_TX_POWER_24GHZ) == 0xff)
2991 rt2x00_set_field16(&word, EEPROM_MAX_TX_POWER_24GHZ, MAX_G_TXPOWER);
2992 if (rt2x00_get_field16(word, EEPROM_MAX_TX_POWER_5GHZ) == 0xff)
2993 rt2x00_set_field16(&word, EEPROM_MAX_TX_POWER_5GHZ, MAX_A_TXPOWER);
2994 rt2x00_eeprom_write(rt2x00dev, EEPROM_MAX_TX_POWER, word);
2995
2996 return 0; 3481 return 0;
2997} 3482}
2998EXPORT_SYMBOL_GPL(rt2800_validate_eeprom); 3483EXPORT_SYMBOL_GPL(rt2800_validate_eeprom);
@@ -3009,10 +3494,15 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
3009 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); 3494 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
3010 3495
3011 /* 3496 /*
3012 * Identify RF chipset. 3497 * Identify RF chipset by EEPROM value
3498 * RT28xx/RT30xx: defined in "EEPROM_NIC_CONF0_RF_TYPE" field
3499 * RT53xx: defined in "EEPROM_CHIP_ID" field
3013 */ 3500 */
3014 value = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
3015 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg); 3501 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
3502 if (rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5390)
3503 rt2x00_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &value);
3504 else
3505 value = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
3016 3506
3017 rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET), 3507 rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET),
3018 value, rt2x00_get_field32(reg, MAC_CSR0_REVISION)); 3508 value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
@@ -3024,7 +3514,8 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
3024 !rt2x00_rt(rt2x00dev, RT3071) && 3514 !rt2x00_rt(rt2x00dev, RT3071) &&
3025 !rt2x00_rt(rt2x00dev, RT3090) && 3515 !rt2x00_rt(rt2x00dev, RT3090) &&
3026 !rt2x00_rt(rt2x00dev, RT3390) && 3516 !rt2x00_rt(rt2x00dev, RT3390) &&
3027 !rt2x00_rt(rt2x00dev, RT3572)) { 3517 !rt2x00_rt(rt2x00dev, RT3572) &&
3518 !rt2x00_rt(rt2x00dev, RT5390)) {
3028 ERROR(rt2x00dev, "Invalid RT chipset detected.\n"); 3519 ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
3029 return -ENODEV; 3520 return -ENODEV;
3030 } 3521 }
@@ -3038,7 +3529,8 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
3038 !rt2x00_rf(rt2x00dev, RF3021) && 3529 !rt2x00_rf(rt2x00dev, RF3021) &&
3039 !rt2x00_rf(rt2x00dev, RF3022) && 3530 !rt2x00_rf(rt2x00dev, RF3022) &&
3040 !rt2x00_rf(rt2x00dev, RF3052) && 3531 !rt2x00_rf(rt2x00dev, RF3052) &&
3041 !rt2x00_rf(rt2x00dev, RF3320)) { 3532 !rt2x00_rf(rt2x00dev, RF3320) &&
3533 !rt2x00_rf(rt2x00dev, RF5390)) {
3042 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 3534 ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
3043 return -ENODEV; 3535 return -ENODEV;
3044 } 3536 }
@@ -3046,11 +3538,35 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
3046 /* 3538 /*
3047 * Identify default antenna configuration. 3539 * Identify default antenna configuration.
3048 */ 3540 */
3049 rt2x00dev->default_ant.tx = 3541 rt2x00dev->default_ant.tx_chain_num =
3050 rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH); 3542 rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH);
3051 rt2x00dev->default_ant.rx = 3543 rt2x00dev->default_ant.rx_chain_num =
3052 rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH); 3544 rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH);
3053 3545
3546 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
3547
3548 if (rt2x00_rt(rt2x00dev, RT3070) ||
3549 rt2x00_rt(rt2x00dev, RT3090) ||
3550 rt2x00_rt(rt2x00dev, RT3390)) {
3551 value = rt2x00_get_field16(eeprom,
3552 EEPROM_NIC_CONF1_ANT_DIVERSITY);
3553 switch (value) {
3554 case 0:
3555 case 1:
3556 case 2:
3557 rt2x00dev->default_ant.tx = ANTENNA_A;
3558 rt2x00dev->default_ant.rx = ANTENNA_A;
3559 break;
3560 case 3:
3561 rt2x00dev->default_ant.tx = ANTENNA_A;
3562 rt2x00dev->default_ant.rx = ANTENNA_B;
3563 break;
3564 }
3565 } else {
3566 rt2x00dev->default_ant.tx = ANTENNA_A;
3567 rt2x00dev->default_ant.rx = ANTENNA_A;
3568 }
3569
3054 /* 3570 /*
3055 * Read frequency offset and RF programming sequence. 3571 * Read frequency offset and RF programming sequence.
3056 */ 3572 */
@@ -3084,6 +3600,15 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
3084 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &rt2x00dev->led_mcu_reg); 3600 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &rt2x00dev->led_mcu_reg);
3085#endif /* CONFIG_RT2X00_LIB_LEDS */ 3601#endif /* CONFIG_RT2X00_LIB_LEDS */
3086 3602
3603 /*
3604 * Check if support EIRP tx power limit feature.
3605 */
3606 rt2x00_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER, &eeprom);
3607
3608 if (rt2x00_get_field16(eeprom, EEPROM_EIRP_MAX_TX_POWER_2GHZ) <
3609 EIRP_MAX_TX_POWER_LIMIT)
3610 __set_bit(CONFIG_SUPPORT_POWER_LIMIT, &rt2x00dev->flags);
3611
3087 return 0; 3612 return 0;
3088} 3613}
3089EXPORT_SYMBOL_GPL(rt2800_init_eeprom); 3614EXPORT_SYMBOL_GPL(rt2800_init_eeprom);
@@ -3236,7 +3761,6 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
3236 char *default_power1; 3761 char *default_power1;
3237 char *default_power2; 3762 char *default_power2;
3238 unsigned int i; 3763 unsigned int i;
3239 unsigned short max_power;
3240 u16 eeprom; 3764 u16 eeprom;
3241 3765
3242 /* 3766 /*
@@ -3303,7 +3827,8 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
3303 rt2x00_rf(rt2x00dev, RF2020) || 3827 rt2x00_rf(rt2x00dev, RF2020) ||
3304 rt2x00_rf(rt2x00dev, RF3021) || 3828 rt2x00_rf(rt2x00dev, RF3021) ||
3305 rt2x00_rf(rt2x00dev, RF3022) || 3829 rt2x00_rf(rt2x00dev, RF3022) ||
3306 rt2x00_rf(rt2x00dev, RF3320)) { 3830 rt2x00_rf(rt2x00dev, RF3320) ||
3831 rt2x00_rf(rt2x00dev, RF5390)) {
3307 spec->num_channels = 14; 3832 spec->num_channels = 14;
3308 spec->channels = rf_vals_3x; 3833 spec->channels = rf_vals_3x;
3309 } else if (rt2x00_rf(rt2x00dev, RF3052)) { 3834 } else if (rt2x00_rf(rt2x00dev, RF3052)) {
@@ -3361,26 +3886,21 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
3361 3886
3362 spec->channels_info = info; 3887 spec->channels_info = info;
3363 3888
3364 rt2x00_eeprom_read(rt2x00dev, EEPROM_MAX_TX_POWER, &eeprom);
3365 max_power = rt2x00_get_field16(eeprom, EEPROM_MAX_TX_POWER_24GHZ);
3366 default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1); 3889 default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
3367 default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2); 3890 default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2);
3368 3891
3369 for (i = 0; i < 14; i++) { 3892 for (i = 0; i < 14; i++) {
3370 info[i].max_power = max_power; 3893 info[i].default_power1 = default_power1[i];
3371 info[i].default_power1 = TXPOWER_G_FROM_DEV(default_power1[i]); 3894 info[i].default_power2 = default_power2[i];
3372 info[i].default_power2 = TXPOWER_G_FROM_DEV(default_power2[i]);
3373 } 3895 }
3374 3896
3375 if (spec->num_channels > 14) { 3897 if (spec->num_channels > 14) {
3376 max_power = rt2x00_get_field16(eeprom, EEPROM_MAX_TX_POWER_5GHZ);
3377 default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1); 3898 default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1);
3378 default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2); 3899 default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
3379 3900
3380 for (i = 14; i < spec->num_channels; i++) { 3901 for (i = 14; i < spec->num_channels; i++) {
3381 info[i].max_power = max_power; 3902 info[i].default_power1 = default_power1[i];
3382 info[i].default_power1 = TXPOWER_A_FROM_DEV(default_power1[i]); 3903 info[i].default_power2 = default_power2[i];
3383 info[i].default_power2 = TXPOWER_A_FROM_DEV(default_power2[i]);
3384 } 3904 }
3385 } 3905 }
3386 3906
@@ -3472,7 +3992,7 @@ int rt2800_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
3472 if (queue_idx >= 4) 3992 if (queue_idx >= 4)
3473 return 0; 3993 return 0;
3474 3994
3475 queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); 3995 queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
3476 3996
3477 /* Update WMM TXOP register */ 3997 /* Update WMM TXOP register */
3478 offset = WMM_TXOP0_CFG + (sizeof(u32) * (!!(queue_idx & 2))); 3998 offset = WMM_TXOP0_CFG + (sizeof(u32) * (!!(queue_idx & 2)));
@@ -3530,7 +4050,8 @@ EXPORT_SYMBOL_GPL(rt2800_get_tsf);
3530 4050
3531int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 4051int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3532 enum ieee80211_ampdu_mlme_action action, 4052 enum ieee80211_ampdu_mlme_action action,
3533 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 4053 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4054 u8 buf_size)
3534{ 4055{
3535 int ret = 0; 4056 int ret = 0;
3536 4057
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index e3c995a9dec4..0c92d86a36f4 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -156,6 +156,7 @@ void rt2800_txdone(struct rt2x00_dev *rt2x00dev);
156void rt2800_txdone_entry(struct queue_entry *entry, u32 status); 156void rt2800_txdone_entry(struct queue_entry *entry, u32 status);
157 157
158void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc); 158void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc);
159void rt2800_clear_beacon(struct queue_entry *entry);
159 160
160extern const struct rt2x00debug rt2800_rt2x00debug; 161extern const struct rt2x00debug rt2800_rt2x00debug;
161 162
@@ -198,7 +199,8 @@ int rt2800_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
198u64 rt2800_get_tsf(struct ieee80211_hw *hw); 199u64 rt2800_get_tsf(struct ieee80211_hw *hw);
199int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 200int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
200 enum ieee80211_ampdu_mlme_action action, 201 enum ieee80211_ampdu_mlme_action action,
201 struct ieee80211_sta *sta, u16 tid, u16 *ssn); 202 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
203 u8 buf_size);
202int rt2800_get_survey(struct ieee80211_hw *hw, int idx, 204int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
203 struct survey_info *survey); 205 struct survey_info *survey);
204 206
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 3b3f1e45ab3e..808073aa9dcc 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -200,11 +200,22 @@ static void rt2800pci_start_queue(struct data_queue *queue)
200 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 200 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
201 break; 201 break;
202 case QID_BEACON: 202 case QID_BEACON:
203 /*
204 * Allow beacon tasklets to be scheduled for periodic
205 * beacon updates.
206 */
207 tasklet_enable(&rt2x00dev->tbtt_tasklet);
208 tasklet_enable(&rt2x00dev->pretbtt_tasklet);
209
203 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg); 210 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
204 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1); 211 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
205 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1); 212 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
206 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1); 213 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
207 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); 214 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
215
216 rt2800_register_read(rt2x00dev, INT_TIMER_EN, &reg);
217 rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1);
218 rt2800_register_write(rt2x00dev, INT_TIMER_EN, reg);
208 break; 219 break;
209 default: 220 default:
210 break; 221 break;
@@ -250,6 +261,16 @@ static void rt2800pci_stop_queue(struct data_queue *queue)
250 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0); 261 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
251 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0); 262 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
252 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); 263 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
264
265 rt2800_register_read(rt2x00dev, INT_TIMER_EN, &reg);
266 rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0);
267 rt2800_register_write(rt2x00dev, INT_TIMER_EN, reg);
268
269 /*
270 * Wait for tbtt tasklets to finish.
271 */
272 tasklet_disable(&rt2x00dev->tbtt_tasklet);
273 tasklet_disable(&rt2x00dev->pretbtt_tasklet);
253 break; 274 break;
254 default: 275 default:
255 break; 276 break;
@@ -397,9 +418,9 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
397static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev, 418static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
398 enum dev_state state) 419 enum dev_state state)
399{ 420{
400 int mask = (state == STATE_RADIO_IRQ_ON) || 421 int mask = (state == STATE_RADIO_IRQ_ON);
401 (state == STATE_RADIO_IRQ_ON_ISR);
402 u32 reg; 422 u32 reg;
423 unsigned long flags;
403 424
404 /* 425 /*
405 * When interrupts are being enabled, the interrupt registers 426 * When interrupts are being enabled, the interrupt registers
@@ -408,8 +429,17 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
408 if (state == STATE_RADIO_IRQ_ON) { 429 if (state == STATE_RADIO_IRQ_ON) {
409 rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg); 430 rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
410 rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, reg); 431 rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
432
433 /*
434 * Enable tasklets. The beacon related tasklets are
435 * enabled when the beacon queue is started.
436 */
437 tasklet_enable(&rt2x00dev->txstatus_tasklet);
438 tasklet_enable(&rt2x00dev->rxdone_tasklet);
439 tasklet_enable(&rt2x00dev->autowake_tasklet);
411 } 440 }
412 441
442 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
413 rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg); 443 rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
414 rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, 0); 444 rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, 0);
415 rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, 0); 445 rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, 0);
@@ -430,6 +460,17 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
430 rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, 0); 460 rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, 0);
431 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, 0); 461 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, 0);
432 rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg); 462 rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
463 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
464
465 if (state == STATE_RADIO_IRQ_OFF) {
466 /*
467 * Ensure that all tasklets are finished before
468 * disabling the interrupts.
469 */
470 tasklet_disable(&rt2x00dev->txstatus_tasklet);
471 tasklet_disable(&rt2x00dev->rxdone_tasklet);
472 tasklet_disable(&rt2x00dev->autowake_tasklet);
473 }
433} 474}
434 475
435static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev) 476static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
@@ -452,6 +493,13 @@ static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
452 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f); 493 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
453 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00); 494 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
454 495
496 if (rt2x00_rt(rt2x00dev, RT5390)) {
497 rt2800_register_read(rt2x00dev, AUX_CTRL, &reg);
498 rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
499 rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
500 rt2800_register_write(rt2x00dev, AUX_CTRL, reg);
501 }
502
455 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003); 503 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
456 504
457 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg); 505 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
@@ -475,39 +523,23 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
475 523
476static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev) 524static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
477{ 525{
478 u32 reg; 526 if (rt2x00_is_soc(rt2x00dev)) {
479 527 rt2800_disable_radio(rt2x00dev);
480 rt2800_disable_radio(rt2x00dev); 528 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0);
481 529 rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
482 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001280); 530 }
483
484 rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
485 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
486 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
487 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
488 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
489 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
490 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
491 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
492 rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
493
494 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
495 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
496} 531}
497 532
498static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev, 533static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
499 enum dev_state state) 534 enum dev_state state)
500{ 535{
501 /*
502 * Always put the device to sleep (even when we intend to wakeup!)
503 * if the device is booting and wasn't asleep it will return
504 * failure when attempting to wakeup.
505 */
506 rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0xff, 2);
507
508 if (state == STATE_AWAKE) { 536 if (state == STATE_AWAKE) {
509 rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0); 537 rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0x02);
510 rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKUP); 538 rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKUP);
539 } else if (state == STATE_SLEEP) {
540 rt2800_register_write(rt2x00dev, H2M_MAILBOX_STATUS, 0xffffffff);
541 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, 0xffffffff);
542 rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0x01, 0xff, 0x01);
511 } 543 }
512 544
513 return 0; 545 return 0;
@@ -538,9 +570,7 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
538 rt2800pci_set_state(rt2x00dev, STATE_SLEEP); 570 rt2800pci_set_state(rt2x00dev, STATE_SLEEP);
539 break; 571 break;
540 case STATE_RADIO_IRQ_ON: 572 case STATE_RADIO_IRQ_ON:
541 case STATE_RADIO_IRQ_ON_ISR:
542 case STATE_RADIO_IRQ_OFF: 573 case STATE_RADIO_IRQ_OFF:
543 case STATE_RADIO_IRQ_OFF_ISR:
544 rt2800pci_toggle_irq(rt2x00dev, state); 574 rt2800pci_toggle_irq(rt2x00dev, state);
545 break; 575 break;
546 case STATE_DEEP_SLEEP: 576 case STATE_DEEP_SLEEP:
@@ -696,7 +726,7 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
696 726
697 while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) { 727 while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) {
698 qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE); 728 qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE);
699 if (qid >= QID_RX) { 729 if (unlikely(qid >= QID_RX)) {
700 /* 730 /*
701 * Unknown queue, this shouldn't happen. Just drop 731 * Unknown queue, this shouldn't happen. Just drop
702 * this tx status. 732 * this tx status.
@@ -706,7 +736,7 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
706 break; 736 break;
707 } 737 }
708 738
709 queue = rt2x00queue_get_queue(rt2x00dev, qid); 739 queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
710 if (unlikely(queue == NULL)) { 740 if (unlikely(queue == NULL)) {
711 /* 741 /*
712 * The queue is NULL, this shouldn't happen. Stop 742 * The queue is NULL, this shouldn't happen. Stop
@@ -717,7 +747,7 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
717 break; 747 break;
718 } 748 }
719 749
720 if (rt2x00queue_empty(queue)) { 750 if (unlikely(rt2x00queue_empty(queue))) {
721 /* 751 /*
722 * The queue is empty. Stop processing here 752 * The queue is empty. Stop processing here
723 * and drop the tx status. 753 * and drop the tx status.
@@ -732,45 +762,59 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
732 } 762 }
733} 763}
734 764
735static void rt2800pci_txstatus_tasklet(unsigned long data) 765static void rt2800pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
766 struct rt2x00_field32 irq_field)
736{ 767{
737 rt2800pci_txdone((struct rt2x00_dev *)data); 768 u32 reg;
738}
739
740static irqreturn_t rt2800pci_interrupt_thread(int irq, void *dev_instance)
741{
742 struct rt2x00_dev *rt2x00dev = dev_instance;
743 u32 reg = rt2x00dev->irqvalue[0];
744 769
745 /* 770 /*
746 * 1 - Pre TBTT interrupt. 771 * Enable a single interrupt. The interrupt mask register
772 * access needs locking.
747 */ 773 */
748 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT)) 774 spin_lock_irq(&rt2x00dev->irqmask_lock);
749 rt2x00lib_pretbtt(rt2x00dev); 775 rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
776 rt2x00_set_field32(&reg, irq_field, 1);
777 rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
778 spin_unlock_irq(&rt2x00dev->irqmask_lock);
779}
750 780
751 /* 781static void rt2800pci_txstatus_tasklet(unsigned long data)
752 * 2 - Beacondone interrupt. 782{
753 */ 783 rt2800pci_txdone((struct rt2x00_dev *)data);
754 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
755 rt2x00lib_beacondone(rt2x00dev);
756 784
757 /* 785 /*
758 * 3 - Rx ring done interrupt. 786 * No need to enable the tx status interrupt here as we always
787 * leave it enabled to minimize the possibility of a tx status
788 * register overflow. See comment in interrupt handler.
759 */ 789 */
760 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE)) 790}
761 rt2x00pci_rxdone(rt2x00dev);
762 791
763 /* 792static void rt2800pci_pretbtt_tasklet(unsigned long data)
764 * 4 - Auto wakeup interrupt. 793{
765 */ 794 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
766 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP)) 795 rt2x00lib_pretbtt(rt2x00dev);
767 rt2800pci_wakeup(rt2x00dev); 796 rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT);
797}
768 798
769 /* Enable interrupts again. */ 799static void rt2800pci_tbtt_tasklet(unsigned long data)
770 rt2x00dev->ops->lib->set_device_state(rt2x00dev, 800{
771 STATE_RADIO_IRQ_ON_ISR); 801 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
802 rt2x00lib_beacondone(rt2x00dev);
803 rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT);
804}
772 805
773 return IRQ_HANDLED; 806static void rt2800pci_rxdone_tasklet(unsigned long data)
807{
808 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
809 rt2x00pci_rxdone(rt2x00dev);
810 rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE);
811}
812
813static void rt2800pci_autowake_tasklet(unsigned long data)
814{
815 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
816 rt2800pci_wakeup(rt2x00dev);
817 rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_AUTO_WAKEUP);
774} 818}
775 819
776static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev) 820static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
@@ -791,7 +835,7 @@ static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
791 * 835 *
792 * Furthermore we don't disable the TX_FIFO_STATUS 836 * Furthermore we don't disable the TX_FIFO_STATUS
793 * interrupt here but leave it enabled so that the TX_STA_FIFO 837 * interrupt here but leave it enabled so that the TX_STA_FIFO
794 * can also be read while the interrupt thread gets executed. 838 * can also be read while the tx status tasklet gets executed.
795 * 839 *
796 * Since we have only one producer and one consumer we don't 840 * Since we have only one producer and one consumer we don't
797 * need to lock the kfifo. 841 * need to lock the kfifo.
@@ -816,8 +860,7 @@ static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
816static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance) 860static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
817{ 861{
818 struct rt2x00_dev *rt2x00dev = dev_instance; 862 struct rt2x00_dev *rt2x00dev = dev_instance;
819 u32 reg; 863 u32 reg, mask;
820 irqreturn_t ret = IRQ_HANDLED;
821 864
822 /* Read status and ACK all interrupts */ 865 /* Read status and ACK all interrupts */
823 rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg); 866 rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
@@ -829,38 +872,44 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
829 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 872 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
830 return IRQ_HANDLED; 873 return IRQ_HANDLED;
831 874
832 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) 875 /*
833 rt2800pci_txstatus_interrupt(rt2x00dev); 876 * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
877 * for interrupts and interrupt masks we can just use the value of
878 * INT_SOURCE_CSR to create the interrupt mask.
879 */
880 mask = ~reg;
834 881
835 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT) || 882 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) {
836 rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT) || 883 rt2800pci_txstatus_interrupt(rt2x00dev);
837 rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE) ||
838 rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP)) {
839 /* 884 /*
840 * All other interrupts are handled in the interrupt thread. 885 * Never disable the TX_FIFO_STATUS interrupt.
841 * Store irqvalue for use in the interrupt thread.
842 */ 886 */
843 rt2x00dev->irqvalue[0] = reg; 887 rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1);
888 }
844 889
845 /* 890 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
846 * Disable interrupts, will be enabled again in the 891 tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet);
847 * interrupt thread.
848 */
849 rt2x00dev->ops->lib->set_device_state(rt2x00dev,
850 STATE_RADIO_IRQ_OFF_ISR);
851 892
852 /* 893 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
853 * Leave the TX_FIFO_STATUS interrupt enabled to not lose any 894 tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
854 * tx status reports.
855 */
856 rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
857 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
858 rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
859 895
860 ret = IRQ_WAKE_THREAD; 896 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
861 } 897 tasklet_schedule(&rt2x00dev->rxdone_tasklet);
862 898
863 return ret; 899 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
900 tasklet_schedule(&rt2x00dev->autowake_tasklet);
901
902 /*
903 * Disable all interrupts for which a tasklet was scheduled right now,
904 * the tasklet will reenable the appropriate interrupts.
905 */
906 spin_lock(&rt2x00dev->irqmask_lock);
907 rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
908 reg &= mask;
909 rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
910 spin_unlock(&rt2x00dev->irqmask_lock);
911
912 return IRQ_HANDLED;
864} 913}
865 914
866/* 915/*
@@ -928,6 +977,7 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
928 if (!modparam_nohwcrypt) 977 if (!modparam_nohwcrypt)
929 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags); 978 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
930 __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags); 979 __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags);
980 __set_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags);
931 981
932 /* 982 /*
933 * Set the rssi offset. 983 * Set the rssi offset.
@@ -975,8 +1025,11 @@ static const struct rt2800_ops rt2800pci_rt2800_ops = {
975 1025
976static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = { 1026static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
977 .irq_handler = rt2800pci_interrupt, 1027 .irq_handler = rt2800pci_interrupt,
978 .irq_handler_thread = rt2800pci_interrupt_thread, 1028 .txstatus_tasklet = rt2800pci_txstatus_tasklet,
979 .txstatus_tasklet = rt2800pci_txstatus_tasklet, 1029 .pretbtt_tasklet = rt2800pci_pretbtt_tasklet,
1030 .tbtt_tasklet = rt2800pci_tbtt_tasklet,
1031 .rxdone_tasklet = rt2800pci_rxdone_tasklet,
1032 .autowake_tasklet = rt2800pci_autowake_tasklet,
980 .probe_hw = rt2800pci_probe_hw, 1033 .probe_hw = rt2800pci_probe_hw,
981 .get_firmware_name = rt2800pci_get_firmware_name, 1034 .get_firmware_name = rt2800pci_get_firmware_name,
982 .check_firmware = rt2800_check_firmware, 1035 .check_firmware = rt2800_check_firmware,
@@ -996,6 +1049,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
996 .write_tx_desc = rt2800pci_write_tx_desc, 1049 .write_tx_desc = rt2800pci_write_tx_desc,
997 .write_tx_data = rt2800_write_tx_data, 1050 .write_tx_data = rt2800_write_tx_data,
998 .write_beacon = rt2800_write_beacon, 1051 .write_beacon = rt2800_write_beacon,
1052 .clear_beacon = rt2800_clear_beacon,
999 .fill_rxdone = rt2800pci_fill_rxdone, 1053 .fill_rxdone = rt2800pci_fill_rxdone,
1000 .config_shared_key = rt2800_config_shared_key, 1054 .config_shared_key = rt2800_config_shared_key,
1001 .config_pairwise_key = rt2800_config_pairwise_key, 1055 .config_pairwise_key = rt2800_config_pairwise_key,
@@ -1079,6 +1133,9 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
1079 { PCI_DEVICE(0x1814, 0x3592), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1133 { PCI_DEVICE(0x1814, 0x3592), PCI_DEVICE_DATA(&rt2800pci_ops) },
1080 { PCI_DEVICE(0x1814, 0x3593), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1134 { PCI_DEVICE(0x1814, 0x3593), PCI_DEVICE_DATA(&rt2800pci_ops) },
1081#endif 1135#endif
1136#ifdef CONFIG_RT2800PCI_RT53XX
1137 { PCI_DEVICE(0x1814, 0x5390), PCI_DEVICE_DATA(&rt2800pci_ops) },
1138#endif
1082 { 0, } 1139 { 0, }
1083}; 1140};
1084#endif /* CONFIG_PCI */ 1141#endif /* CONFIG_PCI */
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 197a36c05fda..f1a92144996f 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -253,9 +253,7 @@ static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev,
253 rt2800usb_set_state(rt2x00dev, STATE_SLEEP); 253 rt2800usb_set_state(rt2x00dev, STATE_SLEEP);
254 break; 254 break;
255 case STATE_RADIO_IRQ_ON: 255 case STATE_RADIO_IRQ_ON:
256 case STATE_RADIO_IRQ_ON_ISR:
257 case STATE_RADIO_IRQ_OFF: 256 case STATE_RADIO_IRQ_OFF:
258 case STATE_RADIO_IRQ_OFF_ISR:
259 /* No support, but no error either */ 257 /* No support, but no error either */
260 break; 258 break;
261 case STATE_DEEP_SLEEP: 259 case STATE_DEEP_SLEEP:
@@ -567,6 +565,7 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
567 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags); 565 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
568 __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags); 566 __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags);
569 __set_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags); 567 __set_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags);
568 __set_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags);
570 569
571 /* 570 /*
572 * Set the rssi offset. 571 * Set the rssi offset.
@@ -639,6 +638,7 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
639 .write_tx_desc = rt2800usb_write_tx_desc, 638 .write_tx_desc = rt2800usb_write_tx_desc,
640 .write_tx_data = rt2800usb_write_tx_data, 639 .write_tx_data = rt2800usb_write_tx_data,
641 .write_beacon = rt2800_write_beacon, 640 .write_beacon = rt2800_write_beacon,
641 .clear_beacon = rt2800_clear_beacon,
642 .get_tx_data_len = rt2800usb_get_tx_data_len, 642 .get_tx_data_len = rt2800usb_get_tx_data_len,
643 .fill_rxdone = rt2800usb_fill_rxdone, 643 .fill_rxdone = rt2800usb_fill_rxdone,
644 .config_shared_key = rt2800_config_shared_key, 644 .config_shared_key = rt2800_config_shared_key,
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 84aaf393da43..a3940d7300a4 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -189,6 +189,7 @@ struct rt2x00_chip {
189#define RT3572 0x3572 189#define RT3572 0x3572
190#define RT3593 0x3593 /* PCIe */ 190#define RT3593 0x3593 /* PCIe */
191#define RT3883 0x3883 /* WSOC */ 191#define RT3883 0x3883 /* WSOC */
192#define RT5390 0x5390 /* 2.4GHz */
192 193
193 u16 rf; 194 u16 rf;
194 u16 rev; 195 u16 rev;
@@ -225,6 +226,8 @@ struct channel_info {
225struct antenna_setup { 226struct antenna_setup {
226 enum antenna rx; 227 enum antenna rx;
227 enum antenna tx; 228 enum antenna tx;
229 u8 rx_chain_num;
230 u8 tx_chain_num;
228}; 231};
229 232
230/* 233/*
@@ -368,6 +371,7 @@ struct rt2x00_intf {
368 * dedicated beacon entry. 371 * dedicated beacon entry.
369 */ 372 */
370 struct queue_entry *beacon; 373 struct queue_entry *beacon;
374 bool enable_beacon;
371 375
372 /* 376 /*
373 * Actions that needed rescheduling. 377 * Actions that needed rescheduling.
@@ -463,7 +467,6 @@ struct rt2x00lib_crypto {
463 const u8 *address; 467 const u8 *address;
464 468
465 u32 bssidx; 469 u32 bssidx;
466 u32 aid;
467 470
468 u8 key[16]; 471 u8 key[16];
469 u8 tx_mic[8]; 472 u8 tx_mic[8];
@@ -511,14 +514,13 @@ struct rt2x00lib_ops {
511 irq_handler_t irq_handler; 514 irq_handler_t irq_handler;
512 515
513 /* 516 /*
514 * Threaded Interrupt handlers.
515 */
516 irq_handler_t irq_handler_thread;
517
518 /*
519 * TX status tasklet handler. 517 * TX status tasklet handler.
520 */ 518 */
521 void (*txstatus_tasklet) (unsigned long data); 519 void (*txstatus_tasklet) (unsigned long data);
520 void (*pretbtt_tasklet) (unsigned long data);
521 void (*tbtt_tasklet) (unsigned long data);
522 void (*rxdone_tasklet) (unsigned long data);
523 void (*autowake_tasklet) (unsigned long data);
522 524
523 /* 525 /*
524 * Device init handlers. 526 * Device init handlers.
@@ -573,6 +575,7 @@ struct rt2x00lib_ops {
573 struct txentry_desc *txdesc); 575 struct txentry_desc *txdesc);
574 void (*write_beacon) (struct queue_entry *entry, 576 void (*write_beacon) (struct queue_entry *entry,
575 struct txentry_desc *txdesc); 577 struct txentry_desc *txdesc);
578 void (*clear_beacon) (struct queue_entry *entry);
576 int (*get_tx_data_len) (struct queue_entry *entry); 579 int (*get_tx_data_len) (struct queue_entry *entry);
577 580
578 /* 581 /*
@@ -658,12 +661,15 @@ enum rt2x00_flags {
658 DRIVER_REQUIRE_L2PAD, 661 DRIVER_REQUIRE_L2PAD,
659 DRIVER_REQUIRE_TXSTATUS_FIFO, 662 DRIVER_REQUIRE_TXSTATUS_FIFO,
660 DRIVER_REQUIRE_TASKLET_CONTEXT, 663 DRIVER_REQUIRE_TASKLET_CONTEXT,
664 DRIVER_REQUIRE_SW_SEQNO,
665 DRIVER_REQUIRE_HT_TX_DESC,
661 666
662 /* 667 /*
663 * Driver features 668 * Driver features
664 */ 669 */
665 CONFIG_SUPPORT_HW_BUTTON, 670 CONFIG_SUPPORT_HW_BUTTON,
666 CONFIG_SUPPORT_HW_CRYPTO, 671 CONFIG_SUPPORT_HW_CRYPTO,
672 CONFIG_SUPPORT_POWER_LIMIT,
667 DRIVER_SUPPORT_CONTROL_FILTERS, 673 DRIVER_SUPPORT_CONTROL_FILTERS,
668 DRIVER_SUPPORT_CONTROL_FILTER_PSPOLL, 674 DRIVER_SUPPORT_CONTROL_FILTER_PSPOLL,
669 DRIVER_SUPPORT_PRE_TBTT_INTERRUPT, 675 DRIVER_SUPPORT_PRE_TBTT_INTERRUPT,
@@ -788,10 +794,12 @@ struct rt2x00_dev {
788 * - Open ap interface count. 794 * - Open ap interface count.
789 * - Open sta interface count. 795 * - Open sta interface count.
790 * - Association count. 796 * - Association count.
797 * - Beaconing enabled count.
791 */ 798 */
792 unsigned int intf_ap_count; 799 unsigned int intf_ap_count;
793 unsigned int intf_sta_count; 800 unsigned int intf_sta_count;
794 unsigned int intf_associated; 801 unsigned int intf_associated;
802 unsigned int intf_beaconing;
795 803
796 /* 804 /*
797 * Link quality 805 * Link quality
@@ -857,6 +865,13 @@ struct rt2x00_dev {
857 */ 865 */
858 struct ieee80211_low_level_stats low_level_stats; 866 struct ieee80211_low_level_stats low_level_stats;
859 867
868 /**
869 * Work queue for all work which should not be placed
870 * on the mac80211 workqueue (because of dependencies
871 * between various work structures).
872 */
873 struct workqueue_struct *workqueue;
874
860 /* 875 /*
861 * Scheduled work. 876 * Scheduled work.
862 * NOTE: intf_work will use ieee80211_iterate_active_interfaces() 877 * NOTE: intf_work will use ieee80211_iterate_active_interfaces()
@@ -872,14 +887,13 @@ struct rt2x00_dev {
872 struct work_struct txdone_work; 887 struct work_struct txdone_work;
873 888
874 /* 889 /*
875 * Data queue arrays for RX, TX and Beacon. 890 * Data queue arrays for RX, TX, Beacon and ATIM.
876 * The Beacon array also contains the Atim queue
877 * if that is supported by the device.
878 */ 891 */
879 unsigned int data_queues; 892 unsigned int data_queues;
880 struct data_queue *rx; 893 struct data_queue *rx;
881 struct data_queue *tx; 894 struct data_queue *tx;
882 struct data_queue *bcn; 895 struct data_queue *bcn;
896 struct data_queue *atim;
883 897
884 /* 898 /*
885 * Firmware image. 899 * Firmware image.
@@ -887,12 +901,6 @@ struct rt2x00_dev {
887 const struct firmware *fw; 901 const struct firmware *fw;
888 902
889 /* 903 /*
890 * Interrupt values, stored between interrupt service routine
891 * and interrupt thread routine.
892 */
893 u32 irqvalue[2];
894
895 /*
896 * FIFO for storing tx status reports between isr and tasklet. 904 * FIFO for storing tx status reports between isr and tasklet.
897 */ 905 */
898 DECLARE_KFIFO_PTR(txstatus_fifo, u32); 906 DECLARE_KFIFO_PTR(txstatus_fifo, u32);
@@ -901,6 +909,15 @@ struct rt2x00_dev {
901 * Tasklet for processing tx status reports (rt2800pci). 909 * Tasklet for processing tx status reports (rt2800pci).
902 */ 910 */
903 struct tasklet_struct txstatus_tasklet; 911 struct tasklet_struct txstatus_tasklet;
912 struct tasklet_struct pretbtt_tasklet;
913 struct tasklet_struct tbtt_tasklet;
914 struct tasklet_struct rxdone_tasklet;
915 struct tasklet_struct autowake_tasklet;
916
917 /*
918 * Protect the interrupt mask register.
919 */
920 spinlock_t irqmask_lock;
904}; 921};
905 922
906/* 923/*
@@ -1046,12 +1063,24 @@ void rt2x00queue_map_txskb(struct queue_entry *entry);
1046void rt2x00queue_unmap_skb(struct queue_entry *entry); 1063void rt2x00queue_unmap_skb(struct queue_entry *entry);
1047 1064
1048/** 1065/**
1049 * rt2x00queue_get_queue - Convert queue index to queue pointer 1066 * rt2x00queue_get_tx_queue - Convert tx queue index to queue pointer
1050 * @rt2x00dev: Pointer to &struct rt2x00_dev. 1067 * @rt2x00dev: Pointer to &struct rt2x00_dev.
1051 * @queue: rt2x00 queue index (see &enum data_queue_qid). 1068 * @queue: rt2x00 queue index (see &enum data_queue_qid).
1069 *
1070 * Returns NULL for non tx queues.
1052 */ 1071 */
1053struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev, 1072static inline struct data_queue *
1054 const enum data_queue_qid queue); 1073rt2x00queue_get_tx_queue(struct rt2x00_dev *rt2x00dev,
1074 const enum data_queue_qid queue)
1075{
1076 if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
1077 return &rt2x00dev->tx[queue];
1078
1079 if (queue == QID_ATIM)
1080 return rt2x00dev->atim;
1081
1082 return NULL;
1083}
1055 1084
1056/** 1085/**
1057 * rt2x00queue_get_entry - Get queue entry where the given index points to. 1086 * rt2x00queue_get_entry - Get queue entry where the given index points to.
@@ -1168,7 +1197,7 @@ void rt2x00lib_rxdone(struct queue_entry *entry);
1168/* 1197/*
1169 * mac80211 handlers. 1198 * mac80211 handlers.
1170 */ 1199 */
1171int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb); 1200void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
1172int rt2x00mac_start(struct ieee80211_hw *hw); 1201int rt2x00mac_start(struct ieee80211_hw *hw);
1173void rt2x00mac_stop(struct ieee80211_hw *hw); 1202void rt2x00mac_stop(struct ieee80211_hw *hw);
1174int rt2x00mac_add_interface(struct ieee80211_hw *hw, 1203int rt2x00mac_add_interface(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 9597a03242cc..9de9dbe94399 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -121,7 +121,7 @@ static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac,
121 return; 121 return;
122 122
123 if (test_and_clear_bit(DELAYED_UPDATE_BEACON, &intf->delayed_flags)) 123 if (test_and_clear_bit(DELAYED_UPDATE_BEACON, &intf->delayed_flags))
124 rt2x00queue_update_beacon(rt2x00dev, vif, true); 124 rt2x00queue_update_beacon(rt2x00dev, vif);
125} 125}
126 126
127static void rt2x00lib_intf_scheduled(struct work_struct *work) 127static void rt2x00lib_intf_scheduled(struct work_struct *work)
@@ -174,7 +174,13 @@ static void rt2x00lib_beaconupdate_iter(void *data, u8 *mac,
174 vif->type != NL80211_IFTYPE_WDS) 174 vif->type != NL80211_IFTYPE_WDS)
175 return; 175 return;
176 176
177 rt2x00queue_update_beacon(rt2x00dev, vif, true); 177 /*
178 * Update the beacon without locking. This is safe on PCI devices
179 * as they only update the beacon periodically here. This should
180 * never be called for USB devices.
181 */
182 WARN_ON(rt2x00_is_usb(rt2x00dev));
183 rt2x00queue_update_beacon_locked(rt2x00dev, vif);
178} 184}
179 185
180void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev) 186void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
@@ -183,9 +189,9 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
183 return; 189 return;
184 190
185 /* send buffered bc/mc frames out for every bssid */ 191 /* send buffered bc/mc frames out for every bssid */
186 ieee80211_iterate_active_interfaces(rt2x00dev->hw, 192 ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
187 rt2x00lib_bc_buffer_iter, 193 rt2x00lib_bc_buffer_iter,
188 rt2x00dev); 194 rt2x00dev);
189 /* 195 /*
190 * Devices with pre tbtt interrupt don't need to update the beacon 196 * Devices with pre tbtt interrupt don't need to update the beacon
191 * here as they will fetch the next beacon directly prior to 197 * here as they will fetch the next beacon directly prior to
@@ -195,9 +201,9 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
195 return; 201 return;
196 202
197 /* fetch next beacon */ 203 /* fetch next beacon */
198 ieee80211_iterate_active_interfaces(rt2x00dev->hw, 204 ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
199 rt2x00lib_beaconupdate_iter, 205 rt2x00lib_beaconupdate_iter,
200 rt2x00dev); 206 rt2x00dev);
201} 207}
202EXPORT_SYMBOL_GPL(rt2x00lib_beacondone); 208EXPORT_SYMBOL_GPL(rt2x00lib_beacondone);
203 209
@@ -207,9 +213,9 @@ void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev)
207 return; 213 return;
208 214
209 /* fetch next beacon */ 215 /* fetch next beacon */
210 ieee80211_iterate_active_interfaces(rt2x00dev->hw, 216 ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
211 rt2x00lib_beaconupdate_iter, 217 rt2x00lib_beaconupdate_iter,
212 rt2x00dev); 218 rt2x00dev);
213} 219}
214EXPORT_SYMBOL_GPL(rt2x00lib_pretbtt); 220EXPORT_SYMBOL_GPL(rt2x00lib_pretbtt);
215 221
@@ -649,7 +655,10 @@ static void rt2x00lib_channel(struct ieee80211_channel *entry,
649 const int channel, const int tx_power, 655 const int channel, const int tx_power,
650 const int value) 656 const int value)
651{ 657{
652 entry->center_freq = ieee80211_channel_to_frequency(channel); 658 /* XXX: this assumption about the band is wrong for 802.11j */
659 entry->band = channel <= 14 ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
660 entry->center_freq = ieee80211_channel_to_frequency(channel,
661 entry->band);
653 entry->hw_value = value; 662 entry->hw_value = value;
654 entry->max_power = tx_power; 663 entry->max_power = tx_power;
655 entry->max_antenna_gain = 0xff; 664 entry->max_antenna_gain = 0xff;
@@ -812,15 +821,29 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
812 GFP_KERNEL); 821 GFP_KERNEL);
813 if (status) 822 if (status)
814 return status; 823 return status;
824 }
815 825
816 /* tasklet for processing the tx status reports. */ 826 /*
817 if (rt2x00dev->ops->lib->txstatus_tasklet) 827 * Initialize tasklets if used by the driver. Tasklets are
818 tasklet_init(&rt2x00dev->txstatus_tasklet, 828 * disabled until the interrupts are turned on. The driver
819 rt2x00dev->ops->lib->txstatus_tasklet, 829 * has to handle that.
820 (unsigned long)rt2x00dev); 830 */
821 831#define RT2X00_TASKLET_INIT(taskletname) \
832 if (rt2x00dev->ops->lib->taskletname) { \
833 tasklet_init(&rt2x00dev->taskletname, \
834 rt2x00dev->ops->lib->taskletname, \
835 (unsigned long)rt2x00dev); \
836 tasklet_disable(&rt2x00dev->taskletname); \
822 } 837 }
823 838
839 RT2X00_TASKLET_INIT(txstatus_tasklet);
840 RT2X00_TASKLET_INIT(pretbtt_tasklet);
841 RT2X00_TASKLET_INIT(tbtt_tasklet);
842 RT2X00_TASKLET_INIT(rxdone_tasklet);
843 RT2X00_TASKLET_INIT(autowake_tasklet);
844
845#undef RT2X00_TASKLET_INIT
846
824 /* 847 /*
825 * Register HW. 848 * Register HW.
826 */ 849 */
@@ -949,6 +972,7 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
949{ 972{
950 int retval = -ENOMEM; 973 int retval = -ENOMEM;
951 974
975 spin_lock_init(&rt2x00dev->irqmask_lock);
952 mutex_init(&rt2x00dev->csr_mutex); 976 mutex_init(&rt2x00dev->csr_mutex);
953 977
954 set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); 978 set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
@@ -973,8 +997,15 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
973 BIT(NL80211_IFTYPE_WDS); 997 BIT(NL80211_IFTYPE_WDS);
974 998
975 /* 999 /*
976 * Initialize configuration work. 1000 * Initialize work.
977 */ 1001 */
1002 rt2x00dev->workqueue =
1003 alloc_ordered_workqueue(wiphy_name(rt2x00dev->hw->wiphy), 0);
1004 if (!rt2x00dev->workqueue) {
1005 retval = -ENOMEM;
1006 goto exit;
1007 }
1008
978 INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled); 1009 INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
979 1010
980 /* 1011 /*
@@ -1033,6 +1064,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
1033 cancel_work_sync(&rt2x00dev->intf_work); 1064 cancel_work_sync(&rt2x00dev->intf_work);
1034 cancel_work_sync(&rt2x00dev->rxdone_work); 1065 cancel_work_sync(&rt2x00dev->rxdone_work);
1035 cancel_work_sync(&rt2x00dev->txdone_work); 1066 cancel_work_sync(&rt2x00dev->txdone_work);
1067 destroy_workqueue(rt2x00dev->workqueue);
1036 1068
1037 /* 1069 /*
1038 * Free the tx status fifo. 1070 * Free the tx status fifo.
@@ -1043,6 +1075,10 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
1043 * Kill the tx status tasklet. 1075 * Kill the tx status tasklet.
1044 */ 1076 */
1045 tasklet_kill(&rt2x00dev->txstatus_tasklet); 1077 tasklet_kill(&rt2x00dev->txstatus_tasklet);
1078 tasklet_kill(&rt2x00dev->pretbtt_tasklet);
1079 tasklet_kill(&rt2x00dev->tbtt_tasklet);
1080 tasklet_kill(&rt2x00dev->rxdone_tasklet);
1081 tasklet_kill(&rt2x00dev->autowake_tasklet);
1046 1082
1047 /* 1083 /*
1048 * Uninitialize device. 1084 * Uninitialize device.
diff --git a/drivers/net/wireless/rt2x00/rt2x00ht.c b/drivers/net/wireless/rt2x00/rt2x00ht.c
index b7ad46ecaa1d..ae1219dffaae 100644
--- a/drivers/net/wireless/rt2x00/rt2x00ht.c
+++ b/drivers/net/wireless/rt2x00/rt2x00ht.c
@@ -38,12 +38,12 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
38 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; 38 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
39 39
40 if (tx_info->control.sta) 40 if (tx_info->control.sta)
41 txdesc->mpdu_density = 41 txdesc->u.ht.mpdu_density =
42 tx_info->control.sta->ht_cap.ampdu_density; 42 tx_info->control.sta->ht_cap.ampdu_density;
43 43
44 txdesc->ba_size = 7; /* FIXME: What value is needed? */ 44 txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */
45 45
46 txdesc->stbc = 46 txdesc->u.ht.stbc =
47 (tx_info->flags & IEEE80211_TX_CTL_STBC) >> IEEE80211_TX_CTL_STBC_SHIFT; 47 (tx_info->flags & IEEE80211_TX_CTL_STBC) >> IEEE80211_TX_CTL_STBC_SHIFT;
48 48
49 /* 49 /*
@@ -51,25 +51,24 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
51 * mcs rate to be used 51 * mcs rate to be used
52 */ 52 */
53 if (txrate->flags & IEEE80211_TX_RC_MCS) { 53 if (txrate->flags & IEEE80211_TX_RC_MCS) {
54 txdesc->mcs = txrate->idx; 54 txdesc->u.ht.mcs = txrate->idx;
55 55
56 /* 56 /*
57 * MIMO PS should be set to 1 for STA's using dynamic SM PS 57 * MIMO PS should be set to 1 for STA's using dynamic SM PS
58 * when using more then one tx stream (>MCS7). 58 * when using more then one tx stream (>MCS7).
59 */ 59 */
60 if (tx_info->control.sta && txdesc->mcs > 7 && 60 if (tx_info->control.sta && txdesc->u.ht.mcs > 7 &&
61 ((tx_info->control.sta->ht_cap.cap & 61 ((tx_info->control.sta->ht_cap.cap &
62 IEEE80211_HT_CAP_SM_PS) >> 62 IEEE80211_HT_CAP_SM_PS) >>
63 IEEE80211_HT_CAP_SM_PS_SHIFT) == 63 IEEE80211_HT_CAP_SM_PS_SHIFT) ==
64 WLAN_HT_CAP_SM_PS_DYNAMIC) 64 WLAN_HT_CAP_SM_PS_DYNAMIC)
65 __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags); 65 __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
66 } else { 66 } else {
67 txdesc->mcs = rt2x00_get_rate_mcs(hwrate->mcs); 67 txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
68 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 68 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
69 txdesc->mcs |= 0x08; 69 txdesc->u.ht.mcs |= 0x08;
70 } 70 }
71 71
72
73 /* 72 /*
74 * This frame is eligible for an AMPDU, however, don't aggregate 73 * This frame is eligible for an AMPDU, however, don't aggregate
75 * frames that are intended to probe a specific tx rate. 74 * frames that are intended to probe a specific tx rate.
@@ -79,14 +78,6 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
79 __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags); 78 __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
80 79
81 /* 80 /*
82 * Determine HT Mix/Greenfield rate mode
83 */
84 if (txrate->flags & IEEE80211_TX_RC_MCS)
85 txdesc->rate_mode = RATE_MODE_HT_MIX;
86 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
87 txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
88
89 /*
90 * Set 40Mhz mode if necessary (for legacy rates this will 81 * Set 40Mhz mode if necessary (for legacy rates this will
91 * duplicate the frame to both channels). 82 * duplicate the frame to both channels).
92 */ 83 */
@@ -106,11 +97,11 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
106 * for frames not transmitted with TXOP_HTTXOP 97 * for frames not transmitted with TXOP_HTTXOP
107 */ 98 */
108 if (ieee80211_is_mgmt(hdr->frame_control)) 99 if (ieee80211_is_mgmt(hdr->frame_control))
109 txdesc->txop = TXOP_BACKOFF; 100 txdesc->u.ht.txop = TXOP_BACKOFF;
110 else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) 101 else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
111 txdesc->txop = TXOP_SIFS; 102 txdesc->u.ht.txop = TXOP_SIFS;
112 else 103 else
113 txdesc->txop = TXOP_HTTXOP; 104 txdesc->u.ht.txop = TXOP_HTTXOP;
114} 105}
115 106
116u16 rt2x00ht_center_channel(struct rt2x00_dev *rt2x00dev, 107u16 rt2x00ht_center_channel(struct rt2x00_dev *rt2x00dev,
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index a105c500627b..2d94cbaf5f4a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -157,14 +157,30 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
157 bool local); 157 bool local);
158 158
159/** 159/**
160 * rt2x00queue_update_beacon - Send new beacon from mac80211 to hardware 160 * rt2x00queue_update_beacon - Send new beacon from mac80211
161 * to hardware. Handles locking by itself (mutex).
161 * @rt2x00dev: Pointer to &struct rt2x00_dev. 162 * @rt2x00dev: Pointer to &struct rt2x00_dev.
162 * @vif: Interface for which the beacon should be updated. 163 * @vif: Interface for which the beacon should be updated.
163 * @enable_beacon: Enable beaconing
164 */ 164 */
165int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, 165int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
166 struct ieee80211_vif *vif, 166 struct ieee80211_vif *vif);
167 const bool enable_beacon); 167
168/**
169 * rt2x00queue_update_beacon_locked - Send new beacon from mac80211
170 * to hardware. Caller needs to ensure locking.
171 * @rt2x00dev: Pointer to &struct rt2x00_dev.
172 * @vif: Interface for which the beacon should be updated.
173 */
174int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
175 struct ieee80211_vif *vif);
176
177/**
178 * rt2x00queue_clear_beacon - Clear beacon in hardware
179 * @rt2x00dev: Pointer to &struct rt2x00_dev.
180 * @vif: Interface for which the beacon should be updated.
181 */
182int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
183 struct ieee80211_vif *vif);
168 184
169/** 185/**
170 * rt2x00queue_index_inc - Index incrementation function 186 * rt2x00queue_index_inc - Index incrementation function
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index bfda60eaf4ef..c975b0a12e95 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -417,7 +417,8 @@ void rt2x00link_start_watchdog(struct rt2x00_dev *rt2x00dev)
417 !test_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags)) 417 !test_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags))
418 return; 418 return;
419 419
420 schedule_delayed_work(&link->watchdog_work, WATCHDOG_INTERVAL); 420 ieee80211_queue_delayed_work(rt2x00dev->hw,
421 &link->watchdog_work, WATCHDOG_INTERVAL);
421} 422}
422 423
423void rt2x00link_stop_watchdog(struct rt2x00_dev *rt2x00dev) 424void rt2x00link_stop_watchdog(struct rt2x00_dev *rt2x00dev)
@@ -441,7 +442,9 @@ static void rt2x00link_watchdog(struct work_struct *work)
441 rt2x00dev->ops->lib->watchdog(rt2x00dev); 442 rt2x00dev->ops->lib->watchdog(rt2x00dev);
442 443
443 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) 444 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
444 schedule_delayed_work(&link->watchdog_work, WATCHDOG_INTERVAL); 445 ieee80211_queue_delayed_work(rt2x00dev->hw,
446 &link->watchdog_work,
447 WATCHDOG_INTERVAL);
445} 448}
446 449
447void rt2x00link_register(struct rt2x00_dev *rt2x00dev) 450void rt2x00link_register(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index f3da051df39e..661c6baad2b9 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -99,7 +99,7 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
99 return retval; 99 return retval;
100} 100}
101 101
102int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 102void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
103{ 103{
104 struct rt2x00_dev *rt2x00dev = hw->priv; 104 struct rt2x00_dev *rt2x00dev = hw->priv;
105 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 105 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -116,13 +116,13 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
116 goto exit_fail; 116 goto exit_fail;
117 117
118 /* 118 /*
119 * Determine which queue to put packet on. 119 * Use the ATIM queue if appropriate and present.
120 */ 120 */
121 if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM && 121 if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
122 test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) 122 test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags))
123 queue = rt2x00queue_get_queue(rt2x00dev, QID_ATIM); 123 qid = QID_ATIM;
124 else 124
125 queue = rt2x00queue_get_queue(rt2x00dev, qid); 125 queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
126 if (unlikely(!queue)) { 126 if (unlikely(!queue)) {
127 ERROR(rt2x00dev, 127 ERROR(rt2x00dev,
128 "Attempt to send packet over invalid queue %d.\n" 128 "Attempt to send packet over invalid queue %d.\n"
@@ -139,9 +139,9 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
139 * either RTS or CTS-to-self frame and handles everything 139 * either RTS or CTS-to-self frame and handles everything
140 * inside the hardware. 140 * inside the hardware.
141 */ 141 */
142 if ((tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS | 142 if (!rt2x00dev->ops->hw->set_rts_threshold &&
143 IEEE80211_TX_RC_USE_CTS_PROTECT)) && 143 (tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS |
144 !rt2x00dev->ops->hw->set_rts_threshold) { 144 IEEE80211_TX_RC_USE_CTS_PROTECT))) {
145 if (rt2x00queue_available(queue) <= 1) 145 if (rt2x00queue_available(queue) <= 1)
146 goto exit_fail; 146 goto exit_fail;
147 147
@@ -149,18 +149,17 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
149 goto exit_fail; 149 goto exit_fail;
150 } 150 }
151 151
152 if (rt2x00queue_write_tx_frame(queue, skb, false)) 152 if (unlikely(rt2x00queue_write_tx_frame(queue, skb, false)))
153 goto exit_fail; 153 goto exit_fail;
154 154
155 if (rt2x00queue_threshold(queue)) 155 if (rt2x00queue_threshold(queue))
156 rt2x00queue_pause_queue(queue); 156 rt2x00queue_pause_queue(queue);
157 157
158 return NETDEV_TX_OK; 158 return;
159 159
160 exit_fail: 160 exit_fail:
161 ieee80211_stop_queue(rt2x00dev->hw, qid); 161 ieee80211_stop_queue(rt2x00dev->hw, qid);
162 dev_kfree_skb_any(skb); 162 dev_kfree_skb_any(skb);
163 return NETDEV_TX_OK;
164} 163}
165EXPORT_SYMBOL_GPL(rt2x00mac_tx); 164EXPORT_SYMBOL_GPL(rt2x00mac_tx);
166 165
@@ -191,7 +190,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
191{ 190{
192 struct rt2x00_dev *rt2x00dev = hw->priv; 191 struct rt2x00_dev *rt2x00dev = hw->priv;
193 struct rt2x00_intf *intf = vif_to_intf(vif); 192 struct rt2x00_intf *intf = vif_to_intf(vif);
194 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON); 193 struct data_queue *queue = rt2x00dev->bcn;
195 struct queue_entry *entry = NULL; 194 struct queue_entry *entry = NULL;
196 unsigned int i; 195 unsigned int i;
197 196
@@ -519,11 +518,9 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
519 518
520 crypto.cmd = cmd; 519 crypto.cmd = cmd;
521 520
522 if (sta) { 521 if (sta)
523 /* some drivers need the AID */
524 crypto.aid = sta->aid;
525 crypto.address = sta->addr; 522 crypto.address = sta->addr;
526 } else 523 else
527 crypto.address = bcast_addr; 524 crypto.address = bcast_addr;
528 525
529 if (crypto.cipher == CIPHER_TKIP) 526 if (crypto.cipher == CIPHER_TKIP)
@@ -617,11 +614,47 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
617 bss_conf->bssid); 614 bss_conf->bssid);
618 615
619 /* 616 /*
620 * Update the beacon. 617 * Update the beacon. This is only required on USB devices. PCI
618 * devices fetch beacons periodically.
621 */ 619 */
622 if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED)) 620 if (changes & BSS_CHANGED_BEACON && rt2x00_is_usb(rt2x00dev))
623 rt2x00queue_update_beacon(rt2x00dev, vif, 621 rt2x00queue_update_beacon(rt2x00dev, vif);
624 bss_conf->enable_beacon); 622
623 /*
624 * Start/stop beaconing.
625 */
626 if (changes & BSS_CHANGED_BEACON_ENABLED) {
627 if (!bss_conf->enable_beacon && intf->enable_beacon) {
628 rt2x00queue_clear_beacon(rt2x00dev, vif);
629 rt2x00dev->intf_beaconing--;
630 intf->enable_beacon = false;
631
632 if (rt2x00dev->intf_beaconing == 0) {
633 /*
634 * Last beaconing interface disabled
635 * -> stop beacon queue.
636 */
637 mutex_lock(&intf->beacon_skb_mutex);
638 rt2x00queue_stop_queue(rt2x00dev->bcn);
639 mutex_unlock(&intf->beacon_skb_mutex);
640 }
641
642
643 } else if (bss_conf->enable_beacon && !intf->enable_beacon) {
644 rt2x00dev->intf_beaconing++;
645 intf->enable_beacon = true;
646
647 if (rt2x00dev->intf_beaconing == 1) {
648 /*
649 * First beaconing interface enabled
650 * -> start beacon queue.
651 */
652 mutex_lock(&intf->beacon_skb_mutex);
653 rt2x00queue_start_queue(rt2x00dev->bcn);
654 mutex_unlock(&intf->beacon_skb_mutex);
655 }
656 }
657 }
625 658
626 /* 659 /*
627 * When the association status has changed we must reset the link 660 * When the association status has changed we must reset the link
@@ -657,7 +690,7 @@ int rt2x00mac_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
657 struct rt2x00_dev *rt2x00dev = hw->priv; 690 struct rt2x00_dev *rt2x00dev = hw->priv;
658 struct data_queue *queue; 691 struct data_queue *queue;
659 692
660 queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); 693 queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
661 if (unlikely(!queue)) 694 if (unlikely(!queue))
662 return -EINVAL; 695 return -EINVAL;
663 696
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index ace0b668c04e..4dd82b0b0520 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -160,10 +160,9 @@ int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
160 /* 160 /*
161 * Register interrupt handler. 161 * Register interrupt handler.
162 */ 162 */
163 status = request_threaded_irq(rt2x00dev->irq, 163 status = request_irq(rt2x00dev->irq,
164 rt2x00dev->ops->lib->irq_handler, 164 rt2x00dev->ops->lib->irq_handler,
165 rt2x00dev->ops->lib->irq_handler_thread, 165 IRQF_SHARED, rt2x00dev->name, rt2x00dev);
166 IRQF_SHARED, rt2x00dev->name, rt2x00dev);
167 if (status) { 166 if (status) {
168 ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n", 167 ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
169 rt2x00dev->irq, status); 168 rt2x00dev->irq, status);
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index ca82b3a91697..4b3c70eeef1f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -221,14 +221,17 @@ static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
221 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif); 221 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
222 unsigned long irqflags; 222 unsigned long irqflags;
223 223
224 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) || 224 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
225 unlikely(!tx_info->control.vif)) 225 return;
226
227 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
228
229 if (!test_bit(DRIVER_REQUIRE_SW_SEQNO, &entry->queue->rt2x00dev->flags))
226 return; 230 return;
227 231
228 /* 232 /*
229 * Hardware should insert sequence counter. 233 * The hardware is not able to insert a sequence number. Assign a
230 * FIXME: We insert a software sequence counter first for 234 * software generated one here.
231 * hardware that doesn't support hardware sequence counting.
232 * 235 *
233 * This is wrong because beacons are not getting sequence 236 * This is wrong because beacons are not getting sequence
234 * numbers assigned properly. 237 * numbers assigned properly.
@@ -246,7 +249,6 @@ static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
246 249
247 spin_unlock_irqrestore(&intf->seqlock, irqflags); 250 spin_unlock_irqrestore(&intf->seqlock, irqflags);
248 251
249 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
250} 252}
251 253
252static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry, 254static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
@@ -260,6 +262,16 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
260 unsigned int duration; 262 unsigned int duration;
261 unsigned int residual; 263 unsigned int residual;
262 264
265 /*
266 * Determine with what IFS priority this frame should be send.
267 * Set ifs to IFS_SIFS when the this is not the first fragment,
268 * or this fragment came after RTS/CTS.
269 */
270 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
271 txdesc->u.plcp.ifs = IFS_BACKOFF;
272 else
273 txdesc->u.plcp.ifs = IFS_SIFS;
274
263 /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */ 275 /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
264 data_length = entry->skb->len + 4; 276 data_length = entry->skb->len + 4;
265 data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb); 277 data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb);
@@ -268,12 +280,12 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
268 * PLCP setup 280 * PLCP setup
269 * Length calculation depends on OFDM/CCK rate. 281 * Length calculation depends on OFDM/CCK rate.
270 */ 282 */
271 txdesc->signal = hwrate->plcp; 283 txdesc->u.plcp.signal = hwrate->plcp;
272 txdesc->service = 0x04; 284 txdesc->u.plcp.service = 0x04;
273 285
274 if (hwrate->flags & DEV_RATE_OFDM) { 286 if (hwrate->flags & DEV_RATE_OFDM) {
275 txdesc->length_high = (data_length >> 6) & 0x3f; 287 txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f;
276 txdesc->length_low = data_length & 0x3f; 288 txdesc->u.plcp.length_low = data_length & 0x3f;
277 } else { 289 } else {
278 /* 290 /*
279 * Convert length to microseconds. 291 * Convert length to microseconds.
@@ -288,18 +300,18 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
288 * Check if we need to set the Length Extension 300 * Check if we need to set the Length Extension
289 */ 301 */
290 if (hwrate->bitrate == 110 && residual <= 30) 302 if (hwrate->bitrate == 110 && residual <= 30)
291 txdesc->service |= 0x80; 303 txdesc->u.plcp.service |= 0x80;
292 } 304 }
293 305
294 txdesc->length_high = (duration >> 8) & 0xff; 306 txdesc->u.plcp.length_high = (duration >> 8) & 0xff;
295 txdesc->length_low = duration & 0xff; 307 txdesc->u.plcp.length_low = duration & 0xff;
296 308
297 /* 309 /*
298 * When preamble is enabled we should set the 310 * When preamble is enabled we should set the
299 * preamble bit for the signal. 311 * preamble bit for the signal.
300 */ 312 */
301 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 313 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
302 txdesc->signal |= 0x08; 314 txdesc->u.plcp.signal |= 0x08;
303 } 315 }
304} 316}
305 317
@@ -309,9 +321,9 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
309 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 321 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
310 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); 322 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
311 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; 323 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
312 struct ieee80211_rate *rate = 324 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
313 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); 325 struct ieee80211_rate *rate;
314 const struct rt2x00_rate *hwrate; 326 const struct rt2x00_rate *hwrate = NULL;
315 327
316 memset(txdesc, 0, sizeof(*txdesc)); 328 memset(txdesc, 0, sizeof(*txdesc));
317 329
@@ -365,42 +377,42 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
365 377
366 /* 378 /*
367 * Beacons and probe responses require the tsf timestamp 379 * Beacons and probe responses require the tsf timestamp
368 * to be inserted into the frame, except for a frame that has been injected 380 * to be inserted into the frame.
369 * through a monitor interface. This latter is needed for testing a
370 * monitor interface.
371 */ 381 */
372 if ((ieee80211_is_beacon(hdr->frame_control) || 382 if (ieee80211_is_beacon(hdr->frame_control) ||
373 ieee80211_is_probe_resp(hdr->frame_control)) && 383 ieee80211_is_probe_resp(hdr->frame_control))
374 (!(tx_info->flags & IEEE80211_TX_CTL_INJECTED)))
375 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags); 384 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
376 385
377 /*
378 * Determine with what IFS priority this frame should be send.
379 * Set ifs to IFS_SIFS when the this is not the first fragment,
380 * or this fragment came after RTS/CTS.
381 */
382 if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) && 386 if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
383 !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) { 387 !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags))
384 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags); 388 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
385 txdesc->ifs = IFS_BACKOFF;
386 } else
387 txdesc->ifs = IFS_SIFS;
388 389
389 /* 390 /*
390 * Determine rate modulation. 391 * Determine rate modulation.
391 */ 392 */
392 hwrate = rt2x00_get_rate(rate->hw_value); 393 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
393 txdesc->rate_mode = RATE_MODE_CCK; 394 txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
394 if (hwrate->flags & DEV_RATE_OFDM) 395 else if (txrate->flags & IEEE80211_TX_RC_MCS)
395 txdesc->rate_mode = RATE_MODE_OFDM; 396 txdesc->rate_mode = RATE_MODE_HT_MIX;
397 else {
398 rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
399 hwrate = rt2x00_get_rate(rate->hw_value);
400 if (hwrate->flags & DEV_RATE_OFDM)
401 txdesc->rate_mode = RATE_MODE_OFDM;
402 else
403 txdesc->rate_mode = RATE_MODE_CCK;
404 }
396 405
397 /* 406 /*
398 * Apply TX descriptor handling by components 407 * Apply TX descriptor handling by components
399 */ 408 */
400 rt2x00crypto_create_tx_descriptor(entry, txdesc); 409 rt2x00crypto_create_tx_descriptor(entry, txdesc);
401 rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate);
402 rt2x00queue_create_tx_descriptor_seq(entry, txdesc); 410 rt2x00queue_create_tx_descriptor_seq(entry, txdesc);
403 rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate); 411
412 if (test_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags))
413 rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate);
414 else
415 rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
404} 416}
405 417
406static int rt2x00queue_write_tx_data(struct queue_entry *entry, 418static int rt2x00queue_write_tx_data(struct queue_entry *entry,
@@ -566,13 +578,10 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
566 return 0; 578 return 0;
567} 579}
568 580
569int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, 581int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
570 struct ieee80211_vif *vif, 582 struct ieee80211_vif *vif)
571 const bool enable_beacon)
572{ 583{
573 struct rt2x00_intf *intf = vif_to_intf(vif); 584 struct rt2x00_intf *intf = vif_to_intf(vif);
574 struct skb_frame_desc *skbdesc;
575 struct txentry_desc txdesc;
576 585
577 if (unlikely(!intf->beacon)) 586 if (unlikely(!intf->beacon))
578 return -ENOBUFS; 587 return -ENOBUFS;
@@ -584,17 +593,36 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
584 */ 593 */
585 rt2x00queue_free_skb(intf->beacon); 594 rt2x00queue_free_skb(intf->beacon);
586 595
587 if (!enable_beacon) { 596 /*
588 rt2x00queue_stop_queue(intf->beacon->queue); 597 * Clear beacon (single bssid devices don't need to clear the beacon
589 mutex_unlock(&intf->beacon_skb_mutex); 598 * since the beacon queue will get stopped anyway).
590 return 0; 599 */
591 } 600 if (rt2x00dev->ops->lib->clear_beacon)
601 rt2x00dev->ops->lib->clear_beacon(intf->beacon);
602
603 mutex_unlock(&intf->beacon_skb_mutex);
604
605 return 0;
606}
607
608int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
609 struct ieee80211_vif *vif)
610{
611 struct rt2x00_intf *intf = vif_to_intf(vif);
612 struct skb_frame_desc *skbdesc;
613 struct txentry_desc txdesc;
614
615 if (unlikely(!intf->beacon))
616 return -ENOBUFS;
617
618 /*
619 * Clean up the beacon skb.
620 */
621 rt2x00queue_free_skb(intf->beacon);
592 622
593 intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif); 623 intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
594 if (!intf->beacon->skb) { 624 if (!intf->beacon->skb)
595 mutex_unlock(&intf->beacon_skb_mutex);
596 return -ENOMEM; 625 return -ENOMEM;
597 }
598 626
599 /* 627 /*
600 * Copy all TX descriptor information into txdesc, 628 * Copy all TX descriptor information into txdesc,
@@ -611,13 +639,25 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
611 skbdesc->entry = intf->beacon; 639 skbdesc->entry = intf->beacon;
612 640
613 /* 641 /*
614 * Send beacon to hardware and enable beacon genaration.. 642 * Send beacon to hardware.
615 */ 643 */
616 rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc); 644 rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
617 645
646 return 0;
647
648}
649
650int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
651 struct ieee80211_vif *vif)
652{
653 struct rt2x00_intf *intf = vif_to_intf(vif);
654 int ret;
655
656 mutex_lock(&intf->beacon_skb_mutex);
657 ret = rt2x00queue_update_beacon_locked(rt2x00dev, vif);
618 mutex_unlock(&intf->beacon_skb_mutex); 658 mutex_unlock(&intf->beacon_skb_mutex);
619 659
620 return 0; 660 return ret;
621} 661}
622 662
623void rt2x00queue_for_each_entry(struct data_queue *queue, 663void rt2x00queue_for_each_entry(struct data_queue *queue,
@@ -665,29 +705,6 @@ void rt2x00queue_for_each_entry(struct data_queue *queue,
665} 705}
666EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry); 706EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
667 707
668struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
669 const enum data_queue_qid queue)
670{
671 int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
672
673 if (queue == QID_RX)
674 return rt2x00dev->rx;
675
676 if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
677 return &rt2x00dev->tx[queue];
678
679 if (!rt2x00dev->bcn)
680 return NULL;
681
682 if (queue == QID_BEACON)
683 return &rt2x00dev->bcn[0];
684 else if (queue == QID_ATIM && atim)
685 return &rt2x00dev->bcn[1];
686
687 return NULL;
688}
689EXPORT_SYMBOL_GPL(rt2x00queue_get_queue);
690
691struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue, 708struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
692 enum queue_index index) 709 enum queue_index index)
693{ 710{
@@ -885,7 +902,7 @@ void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
885 * The queue flush has failed... 902 * The queue flush has failed...
886 */ 903 */
887 if (unlikely(!rt2x00queue_empty(queue))) 904 if (unlikely(!rt2x00queue_empty(queue)))
888 WARNING(queue->rt2x00dev, "Queue %d failed to flush", queue->qid); 905 WARNING(queue->rt2x00dev, "Queue %d failed to flush\n", queue->qid);
889 906
890 /* 907 /*
891 * Restore the queue to the previous status 908 * Restore the queue to the previous status
@@ -1063,7 +1080,7 @@ int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
1063 goto exit; 1080 goto exit;
1064 1081
1065 if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) { 1082 if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) {
1066 status = rt2x00queue_alloc_entries(&rt2x00dev->bcn[1], 1083 status = rt2x00queue_alloc_entries(rt2x00dev->atim,
1067 rt2x00dev->ops->atim); 1084 rt2x00dev->ops->atim);
1068 if (status) 1085 if (status)
1069 goto exit; 1086 goto exit;
@@ -1137,6 +1154,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
1137 rt2x00dev->rx = queue; 1154 rt2x00dev->rx = queue;
1138 rt2x00dev->tx = &queue[1]; 1155 rt2x00dev->tx = &queue[1];
1139 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues]; 1156 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
1157 rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL;
1140 1158
1141 /* 1159 /*
1142 * Initialize queue parameters. 1160 * Initialize queue parameters.
@@ -1153,9 +1171,9 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
1153 tx_queue_for_each(rt2x00dev, queue) 1171 tx_queue_for_each(rt2x00dev, queue)
1154 rt2x00queue_init(rt2x00dev, queue, qid++); 1172 rt2x00queue_init(rt2x00dev, queue, qid++);
1155 1173
1156 rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[0], QID_BEACON); 1174 rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON);
1157 if (req_atim) 1175 if (req_atim)
1158 rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[1], QID_ATIM); 1176 rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM);
1159 1177
1160 return 0; 1178 return 0;
1161} 1179}
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index fab8e2687f29..0c8b0c699679 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -305,20 +305,27 @@ struct txentry_desc {
305 u16 length; 305 u16 length;
306 u16 header_length; 306 u16 header_length;
307 307
308 u16 length_high; 308 union {
309 u16 length_low; 309 struct {
310 u16 signal; 310 u16 length_high;
311 u16 service; 311 u16 length_low;
312 312 u16 signal;
313 u16 mcs; 313 u16 service;
314 u16 stbc; 314 enum ifs ifs;
315 u16 ba_size; 315 } plcp;
316 u16 rate_mode; 316
317 u16 mpdu_density; 317 struct {
318 u16 mcs;
319 u8 stbc;
320 u8 ba_size;
321 u8 mpdu_density;
322 enum txop txop;
323 } ht;
324 } u;
325
326 enum rate_modulation rate_mode;
318 327
319 short retry_limit; 328 short retry_limit;
320 short ifs;
321 short txop;
322 329
323 enum cipher cipher; 330 enum cipher cipher;
324 u16 key_idx; 331 u16 key_idx;
diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h
index e8259ae48ced..6f867eec49cc 100644
--- a/drivers/net/wireless/rt2x00/rt2x00reg.h
+++ b/drivers/net/wireless/rt2x00/rt2x00reg.h
@@ -85,8 +85,6 @@ enum dev_state {
85 STATE_RADIO_OFF, 85 STATE_RADIO_OFF,
86 STATE_RADIO_IRQ_ON, 86 STATE_RADIO_IRQ_ON,
87 STATE_RADIO_IRQ_OFF, 87 STATE_RADIO_IRQ_OFF,
88 STATE_RADIO_IRQ_ON_ISR,
89 STATE_RADIO_IRQ_OFF_ISR,
90}; 88};
91 89
92/* 90/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 1a9937d5aff6..fbe735f5b352 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -227,7 +227,7 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
227 * Schedule the delayed work for reading the TX status 227 * Schedule the delayed work for reading the TX status
228 * from the device. 228 * from the device.
229 */ 229 */
230 ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->txdone_work); 230 queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
231} 231}
232 232
233static void rt2x00usb_kick_tx_entry(struct queue_entry *entry) 233static void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
@@ -320,7 +320,7 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
320 * Schedule the delayed work for reading the RX status 320 * Schedule the delayed work for reading the RX status
321 * from the device. 321 * from the device.
322 */ 322 */
323 ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->rxdone_work); 323 queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work);
324} 324}
325 325
326static void rt2x00usb_kick_rx_entry(struct queue_entry *entry) 326static void rt2x00usb_kick_rx_entry(struct queue_entry *entry)
@@ -429,7 +429,7 @@ void rt2x00usb_flush_queue(struct data_queue *queue)
429 * Schedule the completion handler manually, when this 429 * Schedule the completion handler manually, when this
430 * worker function runs, it should cleanup the queue. 430 * worker function runs, it should cleanup the queue.
431 */ 431 */
432 ieee80211_queue_work(queue->rt2x00dev->hw, completion); 432 queue_work(queue->rt2x00dev->workqueue, completion);
433 433
434 /* 434 /*
435 * Wait for a little while to give the driver 435 * Wait for a little while to give the driver
@@ -453,7 +453,7 @@ static void rt2x00usb_watchdog_tx_status(struct data_queue *queue)
453 WARNING(queue->rt2x00dev, "TX queue %d status timed out," 453 WARNING(queue->rt2x00dev, "TX queue %d status timed out,"
454 " invoke forced tx handler\n", queue->qid); 454 " invoke forced tx handler\n", queue->qid);
455 455
456 ieee80211_queue_work(queue->rt2x00dev->hw, &queue->rt2x00dev->txdone_work); 456 queue_work(queue->rt2x00dev->workqueue, &queue->rt2x00dev->txdone_work);
457} 457}
458 458
459void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev) 459void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 8de44dd401e0..77e8113b91e1 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -551,26 +551,14 @@ static void rt61pci_config_intf(struct rt2x00_dev *rt2x00dev,
551 struct rt2x00intf_conf *conf, 551 struct rt2x00intf_conf *conf,
552 const unsigned int flags) 552 const unsigned int flags)
553{ 553{
554 unsigned int beacon_base;
555 u32 reg; 554 u32 reg;
556 555
557 if (flags & CONFIG_UPDATE_TYPE) { 556 if (flags & CONFIG_UPDATE_TYPE) {
558 /* 557 /*
559 * Clear current synchronisation setup.
560 * For the Beacon base registers, we only need to clear
561 * the first byte since that byte contains the VALID and OWNER
562 * bits which (when set to 0) will invalidate the entire beacon.
563 */
564 beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
565 rt2x00pci_register_write(rt2x00dev, beacon_base, 0);
566
567 /*
568 * Enable synchronisation. 558 * Enable synchronisation.
569 */ 559 */
570 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg); 560 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
571 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
572 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, conf->sync); 561 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, conf->sync);
573 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
574 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); 562 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
575 } 563 }
576 564
@@ -1154,6 +1142,11 @@ static void rt61pci_start_queue(struct data_queue *queue)
1154 rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg); 1142 rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg);
1155 break; 1143 break;
1156 case QID_BEACON: 1144 case QID_BEACON:
1145 /*
1146 * Allow the tbtt tasklet to be scheduled.
1147 */
1148 tasklet_enable(&rt2x00dev->tbtt_tasklet);
1149
1157 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg); 1150 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
1158 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1); 1151 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
1159 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1); 1152 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
@@ -1233,6 +1226,11 @@ static void rt61pci_stop_queue(struct data_queue *queue)
1233 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 0); 1226 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 0);
1234 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0); 1227 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
1235 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); 1228 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
1229
1230 /*
1231 * Wait for possibly running tbtt tasklets.
1232 */
1233 tasklet_disable(&rt2x00dev->tbtt_tasklet);
1236 break; 1234 break;
1237 default: 1235 default:
1238 break; 1236 break;
@@ -1719,9 +1717,9 @@ static int rt61pci_init_bbp(struct rt2x00_dev *rt2x00dev)
1719static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev, 1717static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
1720 enum dev_state state) 1718 enum dev_state state)
1721{ 1719{
1722 int mask = (state == STATE_RADIO_IRQ_OFF) || 1720 int mask = (state == STATE_RADIO_IRQ_OFF);
1723 (state == STATE_RADIO_IRQ_OFF_ISR);
1724 u32 reg; 1721 u32 reg;
1722 unsigned long flags;
1725 1723
1726 /* 1724 /*
1727 * When interrupts are being enabled, the interrupt registers 1725 * When interrupts are being enabled, the interrupt registers
@@ -1733,12 +1731,21 @@ static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
1733 1731
1734 rt2x00pci_register_read(rt2x00dev, MCU_INT_SOURCE_CSR, &reg); 1732 rt2x00pci_register_read(rt2x00dev, MCU_INT_SOURCE_CSR, &reg);
1735 rt2x00pci_register_write(rt2x00dev, MCU_INT_SOURCE_CSR, reg); 1733 rt2x00pci_register_write(rt2x00dev, MCU_INT_SOURCE_CSR, reg);
1734
1735 /*
1736 * Enable tasklets.
1737 */
1738 tasklet_enable(&rt2x00dev->txstatus_tasklet);
1739 tasklet_enable(&rt2x00dev->rxdone_tasklet);
1740 tasklet_enable(&rt2x00dev->autowake_tasklet);
1736 } 1741 }
1737 1742
1738 /* 1743 /*
1739 * Only toggle the interrupts bits we are going to use. 1744 * Only toggle the interrupts bits we are going to use.
1740 * Non-checked interrupt bits are disabled by default. 1745 * Non-checked interrupt bits are disabled by default.
1741 */ 1746 */
1747 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
1748
1742 rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg); 1749 rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
1743 rt2x00_set_field32(&reg, INT_MASK_CSR_TXDONE, mask); 1750 rt2x00_set_field32(&reg, INT_MASK_CSR_TXDONE, mask);
1744 rt2x00_set_field32(&reg, INT_MASK_CSR_RXDONE, mask); 1751 rt2x00_set_field32(&reg, INT_MASK_CSR_RXDONE, mask);
@@ -1758,6 +1765,17 @@ static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
1758 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_7, mask); 1765 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_7, mask);
1759 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_TWAKEUP, mask); 1766 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_TWAKEUP, mask);
1760 rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg); 1767 rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
1768
1769 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
1770
1771 if (state == STATE_RADIO_IRQ_OFF) {
1772 /*
1773 * Ensure that all tasklets are finished.
1774 */
1775 tasklet_disable(&rt2x00dev->txstatus_tasklet);
1776 tasklet_disable(&rt2x00dev->rxdone_tasklet);
1777 tasklet_disable(&rt2x00dev->autowake_tasklet);
1778 }
1761} 1779}
1762 1780
1763static int rt61pci_enable_radio(struct rt2x00_dev *rt2x00dev) 1781static int rt61pci_enable_radio(struct rt2x00_dev *rt2x00dev)
@@ -1833,9 +1851,7 @@ static int rt61pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1833 rt61pci_disable_radio(rt2x00dev); 1851 rt61pci_disable_radio(rt2x00dev);
1834 break; 1852 break;
1835 case STATE_RADIO_IRQ_ON: 1853 case STATE_RADIO_IRQ_ON:
1836 case STATE_RADIO_IRQ_ON_ISR:
1837 case STATE_RADIO_IRQ_OFF: 1854 case STATE_RADIO_IRQ_OFF:
1838 case STATE_RADIO_IRQ_OFF_ISR:
1839 rt61pci_toggle_irq(rt2x00dev, state); 1855 rt61pci_toggle_irq(rt2x00dev, state);
1840 break; 1856 break;
1841 case STATE_DEEP_SLEEP: 1857 case STATE_DEEP_SLEEP:
@@ -1882,10 +1898,12 @@ static void rt61pci_write_tx_desc(struct queue_entry *entry,
1882 rt2x00_desc_write(txd, 1, word); 1898 rt2x00_desc_write(txd, 1, word);
1883 1899
1884 rt2x00_desc_read(txd, 2, &word); 1900 rt2x00_desc_read(txd, 2, &word);
1885 rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->signal); 1901 rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal);
1886 rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->service); 1902 rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service);
1887 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->length_low); 1903 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW,
1888 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high); 1904 txdesc->u.plcp.length_low);
1905 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH,
1906 txdesc->u.plcp.length_high);
1889 rt2x00_desc_write(txd, 2, word); 1907 rt2x00_desc_write(txd, 2, word);
1890 1908
1891 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) { 1909 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) {
@@ -1930,7 +1948,7 @@ static void rt61pci_write_tx_desc(struct queue_entry *entry,
1930 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); 1948 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
1931 rt2x00_set_field32(&word, TXD_W0_OFDM, 1949 rt2x00_set_field32(&word, TXD_W0_OFDM,
1932 (txdesc->rate_mode == RATE_MODE_OFDM)); 1950 (txdesc->rate_mode == RATE_MODE_OFDM));
1933 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1951 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs);
1934 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1952 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
1935 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); 1953 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
1936 rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, 1954 rt2x00_set_field32(&word, TXD_W0_TKIP_MIC,
@@ -1962,13 +1980,14 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
1962 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 1980 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
1963 unsigned int beacon_base; 1981 unsigned int beacon_base;
1964 unsigned int padding_len; 1982 unsigned int padding_len;
1965 u32 reg; 1983 u32 orig_reg, reg;
1966 1984
1967 /* 1985 /*
1968 * Disable beaconing while we are reloading the beacon data, 1986 * Disable beaconing while we are reloading the beacon data,
1969 * otherwise we might be sending out invalid data. 1987 * otherwise we might be sending out invalid data.
1970 */ 1988 */
1971 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg); 1989 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
1990 orig_reg = reg;
1972 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0); 1991 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
1973 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); 1992 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
1974 1993
@@ -1986,7 +2005,14 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
1986 * Write entire beacon with descriptor and padding to register. 2005 * Write entire beacon with descriptor and padding to register.
1987 */ 2006 */
1988 padding_len = roundup(entry->skb->len, 4) - entry->skb->len; 2007 padding_len = roundup(entry->skb->len, 4) - entry->skb->len;
1989 skb_pad(entry->skb, padding_len); 2008 if (padding_len && skb_pad(entry->skb, padding_len)) {
2009 ERROR(rt2x00dev, "Failure padding beacon, aborting\n");
2010 /* skb freed by skb_pad() on failure */
2011 entry->skb = NULL;
2012 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, orig_reg);
2013 return;
2014 }
2015
1990 beacon_base = HW_BEACON_OFFSET(entry->entry_idx); 2016 beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
1991 rt2x00pci_register_multiwrite(rt2x00dev, beacon_base, 2017 rt2x00pci_register_multiwrite(rt2x00dev, beacon_base,
1992 entry_priv->desc, TXINFO_SIZE); 2018 entry_priv->desc, TXINFO_SIZE);
@@ -2002,8 +2028,6 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
2002 */ 2028 */
2003 rt2x00pci_register_write(rt2x00dev, TXRX_CSR10, 0x00001008); 2029 rt2x00pci_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
2004 2030
2005 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
2006 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
2007 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1); 2031 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
2008 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); 2032 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
2009 2033
@@ -2014,6 +2038,32 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
2014 entry->skb = NULL; 2038 entry->skb = NULL;
2015} 2039}
2016 2040
2041static void rt61pci_clear_beacon(struct queue_entry *entry)
2042{
2043 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
2044 u32 reg;
2045
2046 /*
2047 * Disable beaconing while we are reloading the beacon data,
2048 * otherwise we might be sending out invalid data.
2049 */
2050 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
2051 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
2052 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
2053
2054 /*
2055 * Clear beacon.
2056 */
2057 rt2x00pci_register_write(rt2x00dev,
2058 HW_BEACON_OFFSET(entry->entry_idx), 0);
2059
2060 /*
2061 * Enable beaconing again.
2062 */
2063 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
2064 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
2065}
2066
2017/* 2067/*
2018 * RX control handlers 2068 * RX control handlers
2019 */ 2069 */
@@ -2078,9 +2128,8 @@ static void rt61pci_fill_rxdone(struct queue_entry *entry,
2078 rxdesc->flags |= RX_FLAG_IV_STRIPPED; 2128 rxdesc->flags |= RX_FLAG_IV_STRIPPED;
2079 2129
2080 /* 2130 /*
2081 * FIXME: Legacy driver indicates that the frame does 2131 * The hardware has already checked the Michael Mic and has
2082 * contain the Michael Mic. Unfortunately, in rt2x00 2132 * stripped it from the frame. Signal this to mac80211.
2083 * the MIC seems to be missing completely...
2084 */ 2133 */
2085 rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; 2134 rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
2086 2135
@@ -2143,7 +2192,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
2143 * queue identication number. 2192 * queue identication number.
2144 */ 2193 */
2145 type = rt2x00_get_field32(reg, STA_CSR4_PID_TYPE); 2194 type = rt2x00_get_field32(reg, STA_CSR4_PID_TYPE);
2146 queue = rt2x00queue_get_queue(rt2x00dev, type); 2195 queue = rt2x00queue_get_tx_queue(rt2x00dev, type);
2147 if (unlikely(!queue)) 2196 if (unlikely(!queue))
2148 continue; 2197 continue;
2149 2198
@@ -2211,61 +2260,77 @@ static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev)
2211 rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS); 2260 rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
2212} 2261}
2213 2262
2214static irqreturn_t rt61pci_interrupt_thread(int irq, void *dev_instance) 2263static void rt61pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
2264 struct rt2x00_field32 irq_field)
2215{ 2265{
2216 struct rt2x00_dev *rt2x00dev = dev_instance; 2266 u32 reg;
2217 u32 reg = rt2x00dev->irqvalue[0];
2218 u32 reg_mcu = rt2x00dev->irqvalue[1];
2219 2267
2220 /* 2268 /*
2221 * Handle interrupts, walk through all bits 2269 * Enable a single interrupt. The interrupt mask register
2222 * and run the tasks, the bits are checked in order of 2270 * access needs locking.
2223 * priority.
2224 */ 2271 */
2272 spin_lock_irq(&rt2x00dev->irqmask_lock);
2225 2273
2226 /* 2274 rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
2227 * 1 - Rx ring done interrupt. 2275 rt2x00_set_field32(&reg, irq_field, 0);
2228 */ 2276 rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
2229 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RXDONE))
2230 rt2x00pci_rxdone(rt2x00dev);
2231 2277
2232 /* 2278 spin_unlock_irq(&rt2x00dev->irqmask_lock);
2233 * 2 - Tx ring done interrupt. 2279}
2234 */
2235 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TXDONE))
2236 rt61pci_txdone(rt2x00dev);
2237 2280
2238 /* 2281static void rt61pci_enable_mcu_interrupt(struct rt2x00_dev *rt2x00dev,
2239 * 3 - Handle MCU command done. 2282 struct rt2x00_field32 irq_field)
2240 */ 2283{
2241 if (reg_mcu) 2284 u32 reg;
2242 rt2x00pci_register_write(rt2x00dev,
2243 M2H_CMD_DONE_CSR, 0xffffffff);
2244 2285
2245 /* 2286 /*
2246 * 4 - MCU Autowakeup interrupt. 2287 * Enable a single MCU interrupt. The interrupt mask register
2288 * access needs locking.
2247 */ 2289 */
2248 if (rt2x00_get_field32(reg_mcu, MCU_INT_SOURCE_CSR_TWAKEUP)) 2290 spin_lock_irq(&rt2x00dev->irqmask_lock);
2249 rt61pci_wakeup(rt2x00dev);
2250 2291
2251 /* 2292 rt2x00pci_register_read(rt2x00dev, MCU_INT_MASK_CSR, &reg);
2252 * 5 - Beacon done interrupt. 2293 rt2x00_set_field32(&reg, irq_field, 0);
2253 */ 2294 rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
2254 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_BEACON_DONE))
2255 rt2x00lib_beacondone(rt2x00dev);
2256 2295
2257 /* Enable interrupts again. */ 2296 spin_unlock_irq(&rt2x00dev->irqmask_lock);
2258 rt2x00dev->ops->lib->set_device_state(rt2x00dev, 2297}
2259 STATE_RADIO_IRQ_ON_ISR); 2298
2260 return IRQ_HANDLED; 2299static void rt61pci_txstatus_tasklet(unsigned long data)
2300{
2301 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
2302 rt61pci_txdone(rt2x00dev);
2303 rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TXDONE);
2304}
2305
2306static void rt61pci_tbtt_tasklet(unsigned long data)
2307{
2308 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
2309 rt2x00lib_beacondone(rt2x00dev);
2310 rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_BEACON_DONE);
2311}
2312
2313static void rt61pci_rxdone_tasklet(unsigned long data)
2314{
2315 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
2316 rt2x00pci_rxdone(rt2x00dev);
2317 rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RXDONE);
2261} 2318}
2262 2319
2320static void rt61pci_autowake_tasklet(unsigned long data)
2321{
2322 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
2323 rt61pci_wakeup(rt2x00dev);
2324 rt2x00pci_register_write(rt2x00dev,
2325 M2H_CMD_DONE_CSR, 0xffffffff);
2326 rt61pci_enable_mcu_interrupt(rt2x00dev, MCU_INT_MASK_CSR_TWAKEUP);
2327}
2263 2328
2264static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance) 2329static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
2265{ 2330{
2266 struct rt2x00_dev *rt2x00dev = dev_instance; 2331 struct rt2x00_dev *rt2x00dev = dev_instance;
2267 u32 reg_mcu; 2332 u32 reg_mcu, mask_mcu;
2268 u32 reg; 2333 u32 reg, mask;
2269 2334
2270 /* 2335 /*
2271 * Get the interrupt sources & saved to local variable. 2336 * Get the interrupt sources & saved to local variable.
@@ -2283,14 +2348,46 @@ static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
2283 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 2348 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
2284 return IRQ_HANDLED; 2349 return IRQ_HANDLED;
2285 2350
2286 /* Store irqvalues for use in the interrupt thread. */ 2351 /*
2287 rt2x00dev->irqvalue[0] = reg; 2352 * Schedule tasklets for interrupt handling.
2288 rt2x00dev->irqvalue[1] = reg_mcu; 2353 */
2354 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RXDONE))
2355 tasklet_schedule(&rt2x00dev->rxdone_tasklet);
2356
2357 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TXDONE))
2358 tasklet_schedule(&rt2x00dev->txstatus_tasklet);
2359
2360 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_BEACON_DONE))
2361 tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
2362
2363 if (rt2x00_get_field32(reg_mcu, MCU_INT_SOURCE_CSR_TWAKEUP))
2364 tasklet_schedule(&rt2x00dev->autowake_tasklet);
2365
2366 /*
2367 * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
2368 * for interrupts and interrupt masks we can just use the value of
2369 * INT_SOURCE_CSR to create the interrupt mask.
2370 */
2371 mask = reg;
2372 mask_mcu = reg_mcu;
2289 2373
2290 /* Disable interrupts, will be enabled again in the interrupt thread. */ 2374 /*
2291 rt2x00dev->ops->lib->set_device_state(rt2x00dev, 2375 * Disable all interrupts for which a tasklet was scheduled right now,
2292 STATE_RADIO_IRQ_OFF_ISR); 2376 * the tasklet will reenable the appropriate interrupts.
2293 return IRQ_WAKE_THREAD; 2377 */
2378 spin_lock(&rt2x00dev->irqmask_lock);
2379
2380 rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
2381 reg |= mask;
2382 rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
2383
2384 rt2x00pci_register_read(rt2x00dev, MCU_INT_MASK_CSR, &reg);
2385 reg |= mask_mcu;
2386 rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
2387
2388 spin_unlock(&rt2x00dev->irqmask_lock);
2389
2390 return IRQ_HANDLED;
2294} 2391}
2295 2392
2296/* 2393/*
@@ -2819,7 +2916,7 @@ static int rt61pci_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
2819 if (queue_idx >= 4) 2916 if (queue_idx >= 4)
2820 return 0; 2917 return 0;
2821 2918
2822 queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); 2919 queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
2823 2920
2824 /* Update WMM TXOP register */ 2921 /* Update WMM TXOP register */
2825 offset = AC_TXOP_CSR0 + (sizeof(u32) * (!!(queue_idx & 2))); 2922 offset = AC_TXOP_CSR0 + (sizeof(u32) * (!!(queue_idx & 2)));
@@ -2884,7 +2981,10 @@ static const struct ieee80211_ops rt61pci_mac80211_ops = {
2884 2981
2885static const struct rt2x00lib_ops rt61pci_rt2x00_ops = { 2982static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
2886 .irq_handler = rt61pci_interrupt, 2983 .irq_handler = rt61pci_interrupt,
2887 .irq_handler_thread = rt61pci_interrupt_thread, 2984 .txstatus_tasklet = rt61pci_txstatus_tasklet,
2985 .tbtt_tasklet = rt61pci_tbtt_tasklet,
2986 .rxdone_tasklet = rt61pci_rxdone_tasklet,
2987 .autowake_tasklet = rt61pci_autowake_tasklet,
2888 .probe_hw = rt61pci_probe_hw, 2988 .probe_hw = rt61pci_probe_hw,
2889 .get_firmware_name = rt61pci_get_firmware_name, 2989 .get_firmware_name = rt61pci_get_firmware_name,
2890 .check_firmware = rt61pci_check_firmware, 2990 .check_firmware = rt61pci_check_firmware,
@@ -2903,6 +3003,7 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
2903 .stop_queue = rt61pci_stop_queue, 3003 .stop_queue = rt61pci_stop_queue,
2904 .write_tx_desc = rt61pci_write_tx_desc, 3004 .write_tx_desc = rt61pci_write_tx_desc,
2905 .write_beacon = rt61pci_write_beacon, 3005 .write_beacon = rt61pci_write_beacon,
3006 .clear_beacon = rt61pci_clear_beacon,
2906 .fill_rxdone = rt61pci_fill_rxdone, 3007 .fill_rxdone = rt61pci_fill_rxdone,
2907 .config_shared_key = rt61pci_config_shared_key, 3008 .config_shared_key = rt61pci_config_shared_key,
2908 .config_pairwise_key = rt61pci_config_pairwise_key, 3009 .config_pairwise_key = rt61pci_config_pairwise_key,
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 029be3c6c030..02f1148c577e 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -502,26 +502,14 @@ static void rt73usb_config_intf(struct rt2x00_dev *rt2x00dev,
502 struct rt2x00intf_conf *conf, 502 struct rt2x00intf_conf *conf,
503 const unsigned int flags) 503 const unsigned int flags)
504{ 504{
505 unsigned int beacon_base;
506 u32 reg; 505 u32 reg;
507 506
508 if (flags & CONFIG_UPDATE_TYPE) { 507 if (flags & CONFIG_UPDATE_TYPE) {
509 /* 508 /*
510 * Clear current synchronisation setup.
511 * For the Beacon base registers we only need to clear
512 * the first byte since that byte contains the VALID and OWNER
513 * bits which (when set to 0) will invalidate the entire beacon.
514 */
515 beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
516 rt2x00usb_register_write(rt2x00dev, beacon_base, 0);
517
518 /*
519 * Enable synchronisation. 509 * Enable synchronisation.
520 */ 510 */
521 rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg); 511 rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
522 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
523 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, conf->sync); 512 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, conf->sync);
524 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
525 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); 513 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
526 } 514 }
527 515
@@ -1440,9 +1428,7 @@ static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev,
1440 rt73usb_disable_radio(rt2x00dev); 1428 rt73usb_disable_radio(rt2x00dev);
1441 break; 1429 break;
1442 case STATE_RADIO_IRQ_ON: 1430 case STATE_RADIO_IRQ_ON:
1443 case STATE_RADIO_IRQ_ON_ISR:
1444 case STATE_RADIO_IRQ_OFF: 1431 case STATE_RADIO_IRQ_OFF:
1445 case STATE_RADIO_IRQ_OFF_ISR:
1446 /* No support, but no error either */ 1432 /* No support, but no error either */
1447 break; 1433 break;
1448 case STATE_DEEP_SLEEP: 1434 case STATE_DEEP_SLEEP:
@@ -1488,7 +1474,7 @@ static void rt73usb_write_tx_desc(struct queue_entry *entry,
1488 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); 1474 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
1489 rt2x00_set_field32(&word, TXD_W0_OFDM, 1475 rt2x00_set_field32(&word, TXD_W0_OFDM,
1490 (txdesc->rate_mode == RATE_MODE_OFDM)); 1476 (txdesc->rate_mode == RATE_MODE_OFDM));
1491 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1477 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs);
1492 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1478 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
1493 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); 1479 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
1494 rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, 1480 rt2x00_set_field32(&word, TXD_W0_TKIP_MIC,
@@ -1513,10 +1499,12 @@ static void rt73usb_write_tx_desc(struct queue_entry *entry,
1513 rt2x00_desc_write(txd, 1, word); 1499 rt2x00_desc_write(txd, 1, word);
1514 1500
1515 rt2x00_desc_read(txd, 2, &word); 1501 rt2x00_desc_read(txd, 2, &word);
1516 rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->signal); 1502 rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal);
1517 rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->service); 1503 rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service);
1518 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->length_low); 1504 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW,
1519 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high); 1505 txdesc->u.plcp.length_low);
1506 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH,
1507 txdesc->u.plcp.length_high);
1520 rt2x00_desc_write(txd, 2, word); 1508 rt2x00_desc_write(txd, 2, word);
1521 1509
1522 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) { 1510 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) {
@@ -1547,13 +1535,14 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
1547 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 1535 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
1548 unsigned int beacon_base; 1536 unsigned int beacon_base;
1549 unsigned int padding_len; 1537 unsigned int padding_len;
1550 u32 reg; 1538 u32 orig_reg, reg;
1551 1539
1552 /* 1540 /*
1553 * Disable beaconing while we are reloading the beacon data, 1541 * Disable beaconing while we are reloading the beacon data,
1554 * otherwise we might be sending out invalid data. 1542 * otherwise we might be sending out invalid data.
1555 */ 1543 */
1556 rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg); 1544 rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
1545 orig_reg = reg;
1557 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0); 1546 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
1558 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); 1547 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
1559 1548
@@ -1577,7 +1566,14 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
1577 * Write entire beacon with descriptor and padding to register. 1566 * Write entire beacon with descriptor and padding to register.
1578 */ 1567 */
1579 padding_len = roundup(entry->skb->len, 4) - entry->skb->len; 1568 padding_len = roundup(entry->skb->len, 4) - entry->skb->len;
1580 skb_pad(entry->skb, padding_len); 1569 if (padding_len && skb_pad(entry->skb, padding_len)) {
1570 ERROR(rt2x00dev, "Failure padding beacon, aborting\n");
1571 /* skb freed by skb_pad() on failure */
1572 entry->skb = NULL;
1573 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, orig_reg);
1574 return;
1575 }
1576
1581 beacon_base = HW_BEACON_OFFSET(entry->entry_idx); 1577 beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
1582 rt2x00usb_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data, 1578 rt2x00usb_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data,
1583 entry->skb->len + padding_len); 1579 entry->skb->len + padding_len);
@@ -1590,8 +1586,6 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
1590 */ 1586 */
1591 rt2x00usb_register_write(rt2x00dev, TXRX_CSR10, 0x00001008); 1587 rt2x00usb_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
1592 1588
1593 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
1594 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
1595 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1); 1589 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
1596 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); 1590 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
1597 1591
@@ -1602,6 +1596,33 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
1602 entry->skb = NULL; 1596 entry->skb = NULL;
1603} 1597}
1604 1598
1599static void rt73usb_clear_beacon(struct queue_entry *entry)
1600{
1601 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
1602 unsigned int beacon_base;
1603 u32 reg;
1604
1605 /*
1606 * Disable beaconing while we are reloading the beacon data,
1607 * otherwise we might be sending out invalid data.
1608 */
1609 rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
1610 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
1611 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
1612
1613 /*
1614 * Clear beacon.
1615 */
1616 beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
1617 rt2x00usb_register_write(rt2x00dev, beacon_base, 0);
1618
1619 /*
1620 * Enable beaconing again.
1621 */
1622 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
1623 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
1624}
1625
1605static int rt73usb_get_tx_data_len(struct queue_entry *entry) 1626static int rt73usb_get_tx_data_len(struct queue_entry *entry)
1606{ 1627{
1607 int length; 1628 int length;
@@ -1698,9 +1719,8 @@ static void rt73usb_fill_rxdone(struct queue_entry *entry,
1698 rxdesc->flags |= RX_FLAG_IV_STRIPPED; 1719 rxdesc->flags |= RX_FLAG_IV_STRIPPED;
1699 1720
1700 /* 1721 /*
1701 * FIXME: Legacy driver indicates that the frame does 1722 * The hardware has already checked the Michael Mic and has
1702 * contain the Michael Mic. Unfortunately, in rt2x00 1723 * stripped it from the frame. Signal this to mac80211.
1703 * the MIC seems to be missing completely...
1704 */ 1724 */
1705 rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; 1725 rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
1706 1726
@@ -2229,7 +2249,7 @@ static int rt73usb_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
2229 if (queue_idx >= 4) 2249 if (queue_idx >= 4)
2230 return 0; 2250 return 0;
2231 2251
2232 queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); 2252 queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
2233 2253
2234 /* Update WMM TXOP register */ 2254 /* Update WMM TXOP register */
2235 offset = AC_TXOP_CSR0 + (sizeof(u32) * (!!(queue_idx & 2))); 2255 offset = AC_TXOP_CSR0 + (sizeof(u32) * (!!(queue_idx & 2)));
@@ -2313,6 +2333,7 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
2313 .flush_queue = rt2x00usb_flush_queue, 2333 .flush_queue = rt2x00usb_flush_queue,
2314 .write_tx_desc = rt73usb_write_tx_desc, 2334 .write_tx_desc = rt73usb_write_tx_desc,
2315 .write_beacon = rt73usb_write_beacon, 2335 .write_beacon = rt73usb_write_beacon,
2336 .clear_beacon = rt73usb_clear_beacon,
2316 .get_tx_data_len = rt73usb_get_tx_data_len, 2337 .get_tx_data_len = rt73usb_get_tx_data_len,
2317 .fill_rxdone = rt73usb_fill_rxdone, 2338 .fill_rxdone = rt73usb_fill_rxdone,
2318 .config_shared_key = rt73usb_config_shared_key, 2339 .config_shared_key = rt73usb_config_shared_key,
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 5851cbc1e957..80db5cabc9b9 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -146,7 +146,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
146 rx_status.freq = dev->conf.channel->center_freq; 146 rx_status.freq = dev->conf.channel->center_freq;
147 rx_status.band = dev->conf.channel->band; 147 rx_status.band = dev->conf.channel->band;
148 rx_status.mactime = le64_to_cpu(entry->tsft); 148 rx_status.mactime = le64_to_cpu(entry->tsft);
149 rx_status.flag |= RX_FLAG_TSFT; 149 rx_status.flag |= RX_FLAG_MACTIME_MPDU;
150 if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR) 150 if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
151 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; 151 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
152 152
@@ -240,7 +240,7 @@ static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
240 return IRQ_HANDLED; 240 return IRQ_HANDLED;
241} 241}
242 242
243static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb) 243static void rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
244{ 244{
245 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 245 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
246 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 246 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -321,8 +321,6 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
321 spin_unlock_irqrestore(&priv->lock, flags); 321 spin_unlock_irqrestore(&priv->lock, flags);
322 322
323 rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4))); 323 rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4)));
324
325 return 0;
326} 324}
327 325
328void rtl8180_set_anaparam(struct rtl8180_priv *priv, u32 anaparam) 326void rtl8180_set_anaparam(struct rtl8180_priv *priv, u32 anaparam)
@@ -687,7 +685,6 @@ static void rtl8180_beacon_work(struct work_struct *work)
687 struct ieee80211_hw *dev = vif_priv->dev; 685 struct ieee80211_hw *dev = vif_priv->dev;
688 struct ieee80211_mgmt *mgmt; 686 struct ieee80211_mgmt *mgmt;
689 struct sk_buff *skb; 687 struct sk_buff *skb;
690 int err = 0;
691 688
692 /* don't overflow the tx ring */ 689 /* don't overflow the tx ring */
693 if (ieee80211_queue_stopped(dev, 0)) 690 if (ieee80211_queue_stopped(dev, 0))
@@ -708,8 +705,7 @@ static void rtl8180_beacon_work(struct work_struct *work)
708 /* TODO: use actual beacon queue */ 705 /* TODO: use actual beacon queue */
709 skb_set_queue_mapping(skb, 0); 706 skb_set_queue_mapping(skb, 0);
710 707
711 err = rtl8180_tx(dev, skb); 708 rtl8180_tx(dev, skb);
712 WARN_ON(err);
713 709
714resched: 710resched:
715 /* 711 /*
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 6b82cac37ee3..1e0be14d10d4 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -227,7 +227,7 @@ static void rtl8187_tx_cb(struct urb *urb)
227 } 227 }
228} 228}
229 229
230static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb) 230static void rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
231{ 231{
232 struct rtl8187_priv *priv = dev->priv; 232 struct rtl8187_priv *priv = dev->priv;
233 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 233 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -241,7 +241,7 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
241 urb = usb_alloc_urb(0, GFP_ATOMIC); 241 urb = usb_alloc_urb(0, GFP_ATOMIC);
242 if (!urb) { 242 if (!urb) {
243 kfree_skb(skb); 243 kfree_skb(skb);
244 return NETDEV_TX_OK; 244 return;
245 } 245 }
246 246
247 flags = skb->len; 247 flags = skb->len;
@@ -309,8 +309,6 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
309 kfree_skb(skb); 309 kfree_skb(skb);
310 } 310 }
311 usb_free_urb(urb); 311 usb_free_urb(urb);
312
313 return NETDEV_TX_OK;
314} 312}
315 313
316static void rtl8187_rx_cb(struct urb *urb) 314static void rtl8187_rx_cb(struct urb *urb)
@@ -373,7 +371,7 @@ static void rtl8187_rx_cb(struct urb *urb)
373 rx_status.rate_idx = rate; 371 rx_status.rate_idx = rate;
374 rx_status.freq = dev->conf.channel->center_freq; 372 rx_status.freq = dev->conf.channel->center_freq;
375 rx_status.band = dev->conf.channel->band; 373 rx_status.band = dev->conf.channel->band;
376 rx_status.flag |= RX_FLAG_TSFT; 374 rx_status.flag |= RX_FLAG_MACTIME_MPDU;
377 if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR) 375 if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
378 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; 376 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
379 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); 377 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
@@ -871,23 +869,35 @@ static void rtl8187_work(struct work_struct *work)
871 /* The RTL8187 returns the retry count through register 0xFFFA. In 869 /* The RTL8187 returns the retry count through register 0xFFFA. In
872 * addition, it appears to be a cumulative retry count, not the 870 * addition, it appears to be a cumulative retry count, not the
873 * value for the current TX packet. When multiple TX entries are 871 * value for the current TX packet. When multiple TX entries are
874 * queued, the retry count will be valid for the last one in the queue. 872 * waiting in the queue, the retry count will be the total for all.
875 * The "error" should not matter for purposes of rate setting. */ 873 * The "error" may matter for purposes of rate setting, but there is
874 * no other choice with this hardware.
875 */
876 struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv, 876 struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv,
877 work.work); 877 work.work);
878 struct ieee80211_tx_info *info; 878 struct ieee80211_tx_info *info;
879 struct ieee80211_hw *dev = priv->dev; 879 struct ieee80211_hw *dev = priv->dev;
880 static u16 retry; 880 static u16 retry;
881 u16 tmp; 881 u16 tmp;
882 u16 avg_retry;
883 int length;
882 884
883 mutex_lock(&priv->conf_mutex); 885 mutex_lock(&priv->conf_mutex);
884 tmp = rtl818x_ioread16(priv, (__le16 *)0xFFFA); 886 tmp = rtl818x_ioread16(priv, (__le16 *)0xFFFA);
887 length = skb_queue_len(&priv->b_tx_status.queue);
888 if (unlikely(!length))
889 length = 1;
890 if (unlikely(tmp < retry))
891 tmp = retry;
892 avg_retry = (tmp - retry) / length;
885 while (skb_queue_len(&priv->b_tx_status.queue) > 0) { 893 while (skb_queue_len(&priv->b_tx_status.queue) > 0) {
886 struct sk_buff *old_skb; 894 struct sk_buff *old_skb;
887 895
888 old_skb = skb_dequeue(&priv->b_tx_status.queue); 896 old_skb = skb_dequeue(&priv->b_tx_status.queue);
889 info = IEEE80211_SKB_CB(old_skb); 897 info = IEEE80211_SKB_CB(old_skb);
890 info->status.rates[0].count = tmp - retry + 1; 898 info->status.rates[0].count = avg_retry + 1;
899 if (info->status.rates[0].count > RETRY_COUNT)
900 info->flags &= ~IEEE80211_TX_STAT_ACK;
891 ieee80211_tx_status_irqsafe(dev, old_skb); 901 ieee80211_tx_status_irqsafe(dev, old_skb);
892 } 902 }
893 retry = tmp; 903 retry = tmp;
@@ -933,8 +943,8 @@ static int rtl8187_start(struct ieee80211_hw *dev)
933 rtl818x_iowrite32(priv, &priv->map->TX_CONF, 943 rtl818x_iowrite32(priv, &priv->map->TX_CONF,
934 RTL818X_TX_CONF_HW_SEQNUM | 944 RTL818X_TX_CONF_HW_SEQNUM |
935 RTL818X_TX_CONF_DISREQQSIZE | 945 RTL818X_TX_CONF_DISREQQSIZE |
936 (7 << 8 /* short retry limit */) | 946 (RETRY_COUNT << 8 /* short retry limit */) |
937 (7 << 0 /* long retry limit */) | 947 (RETRY_COUNT << 0 /* long retry limit */) |
938 (7 << 21 /* MAX TX DMA */)); 948 (7 << 21 /* MAX TX DMA */));
939 rtl8187_init_urbs(dev); 949 rtl8187_init_urbs(dev);
940 rtl8187b_init_status_urb(dev); 950 rtl8187b_init_status_urb(dev);
@@ -1378,6 +1388,9 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
1378 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 1388 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1379 IEEE80211_HW_SIGNAL_DBM | 1389 IEEE80211_HW_SIGNAL_DBM |
1380 IEEE80211_HW_RX_INCLUDES_FCS; 1390 IEEE80211_HW_RX_INCLUDES_FCS;
1391 /* Initialize rate-control variables */
1392 dev->max_rates = 1;
1393 dev->max_rate_tries = RETRY_COUNT;
1381 1394
1382 eeprom.data = dev; 1395 eeprom.data = dev;
1383 eeprom.register_read = rtl8187_eeprom_register_read; 1396 eeprom.register_read = rtl8187_eeprom_register_read;
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
index 0d7b1423f77b..f1cc90751dbf 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
@@ -35,6 +35,8 @@
35#define RFKILL_MASK_8187_89_97 0x2 35#define RFKILL_MASK_8187_89_97 0x2
36#define RFKILL_MASK_8198 0x4 36#define RFKILL_MASK_8198 0x4
37 37
38#define RETRY_COUNT 7
39
38struct rtl8187_rx_info { 40struct rtl8187_rx_info {
39 struct urb *urb; 41 struct urb *urb;
40 struct ieee80211_hw *dev; 42 struct ieee80211_hw *dev;
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
index 7f6573f7f470..ce49e0ce7cad 100644
--- a/drivers/net/wireless/rtlwifi/Kconfig
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -1,15 +1,33 @@
1config RTL8192CE 1config RTL8192CE
2 tristate "Realtek RTL8192CE/RTL8188SE Wireless Network Adapter" 2 tristate "Realtek RTL8192CE/RTL8188CE Wireless Network Adapter"
3 depends on MAC80211 && EXPERIMENTAL 3 depends on MAC80211 && PCI && EXPERIMENTAL
4 select FW_LOADER 4 select FW_LOADER
5 select RTLWIFI 5 select RTLWIFI
6 select RTL8192C_COMMON
6 ---help--- 7 ---help---
7 This is the driver for Realtek RTL8192CE/RTL8188CE 802.11n PCIe 8 This is the driver for Realtek RTL8192CE/RTL8188CE 802.11n PCIe
8 wireless network adapters. 9 wireless network adapters.
9 10
10 If you choose to build it as a module, it will be called rtl8192ce 11 If you choose to build it as a module, it will be called rtl8192ce
11 12
13config RTL8192CU
14 tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter"
15 depends on MAC80211 && USB && EXPERIMENTAL
16 select FW_LOADER
17 select RTLWIFI
18 select RTL8192C_COMMON
19 ---help---
20 This is the driver for Realtek RTL8192CU/RTL8188CU 802.11n USB
21 wireless network adapters.
22
23 If you choose to build it as a module, it will be called rtl8192cu
24
12config RTLWIFI 25config RTLWIFI
13 tristate 26 tristate
14 depends on RTL8192CE 27 depends on RTL8192CE || RTL8192CU
28 default m
29
30config RTL8192C_COMMON
31 tristate
32 depends on RTL8192CE || RTL8192CU
15 default m 33 default m
diff --git a/drivers/net/wireless/rtlwifi/Makefile b/drivers/net/wireless/rtlwifi/Makefile
index 2a7a4384f8ee..ec9393f24799 100644
--- a/drivers/net/wireless/rtlwifi/Makefile
+++ b/drivers/net/wireless/rtlwifi/Makefile
@@ -5,9 +5,22 @@ rtlwifi-objs := \
5 core.o \ 5 core.o \
6 debug.o \ 6 debug.o \
7 efuse.o \ 7 efuse.o \
8 pci.o \
9 ps.o \ 8 ps.o \
10 rc.o \ 9 rc.o \
11 regd.o 10 regd.o
12 11
12rtl8192c_common-objs += \
13
14ifneq ($(CONFIG_PCI),)
15rtlwifi-objs += pci.o
16endif
17
18ifneq ($(CONFIG_USB),)
19rtlwifi-objs += usb.o
20endif
21
22obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c/
13obj-$(CONFIG_RTL8192CE) += rtl8192ce/ 23obj-$(CONFIG_RTL8192CE) += rtl8192ce/
24obj-$(CONFIG_RTL8192CU) += rtl8192cu/
25
26ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index cf0b73e51fc2..bb0c781f4a1b 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -144,7 +144,7 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
144 ht_cap->mcs.rx_mask[1] = 0xFF; 144 ht_cap->mcs.rx_mask[1] = 0xFF;
145 ht_cap->mcs.rx_mask[4] = 0x01; 145 ht_cap->mcs.rx_mask[4] = 0x01;
146 146
147 ht_cap->mcs.rx_highest = MAX_BIT_RATE_40MHZ_MCS15; 147 ht_cap->mcs.rx_highest = cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS15);
148 } else if (get_rf_type(rtlphy) == RF_1T1R) { 148 } else if (get_rf_type(rtlphy) == RF_1T1R) {
149 149
150 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("1T1R\n")); 150 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("1T1R\n"));
@@ -153,7 +153,7 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
153 ht_cap->mcs.rx_mask[1] = 0x00; 153 ht_cap->mcs.rx_mask[1] = 0x00;
154 ht_cap->mcs.rx_mask[4] = 0x01; 154 ht_cap->mcs.rx_mask[4] = 0x01;
155 155
156 ht_cap->mcs.rx_highest = MAX_BIT_RATE_40MHZ_MCS7; 156 ht_cap->mcs.rx_highest = cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS7);
157 } 157 }
158} 158}
159 159
@@ -283,13 +283,7 @@ int rtl_init_core(struct ieee80211_hw *hw)
283 rtlmac->hw = hw; 283 rtlmac->hw = hw;
284 284
285 /* <2> rate control register */ 285 /* <2> rate control register */
286 if (rtl_rate_control_register()) { 286 hw->rate_control_algorithm = "rtl_rc";
287 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
288 ("rtl: Unable to register rtl_rc,"
289 "use default RC !!\n"));
290 } else {
291 hw->rate_control_algorithm = "rtl_rc";
292 }
293 287
294 /* 288 /*
295 * <3> init CRDA must come after init 289 * <3> init CRDA must come after init
@@ -325,8 +319,6 @@ int rtl_init_core(struct ieee80211_hw *hw)
325 319
326void rtl_deinit_core(struct ieee80211_hw *hw) 320void rtl_deinit_core(struct ieee80211_hw *hw)
327{ 321{
328 /*RC*/
329 rtl_rate_control_unregister();
330} 322}
331 323
332void rtl_init_rx_config(struct ieee80211_hw *hw) 324void rtl_init_rx_config(struct ieee80211_hw *hw)
@@ -399,21 +391,21 @@ static void _rtl_query_protection_mode(struct ieee80211_hw *hw,
399 u8 rate_flag = info->control.rates[0].flags; 391 u8 rate_flag = info->control.rates[0].flags;
400 392
401 /* Common Settings */ 393 /* Common Settings */
402 tcb_desc->b_rts_stbc = false; 394 tcb_desc->rts_stbc = false;
403 tcb_desc->b_cts_enable = false; 395 tcb_desc->cts_enable = false;
404 tcb_desc->rts_sc = 0; 396 tcb_desc->rts_sc = 0;
405 tcb_desc->b_rts_bw = false; 397 tcb_desc->rts_bw = false;
406 tcb_desc->b_rts_use_shortpreamble = false; 398 tcb_desc->rts_use_shortpreamble = false;
407 tcb_desc->b_rts_use_shortgi = false; 399 tcb_desc->rts_use_shortgi = false;
408 400
409 if (rate_flag & IEEE80211_TX_RC_USE_CTS_PROTECT) { 401 if (rate_flag & IEEE80211_TX_RC_USE_CTS_PROTECT) {
410 /* Use CTS-to-SELF in protection mode. */ 402 /* Use CTS-to-SELF in protection mode. */
411 tcb_desc->b_rts_enable = true; 403 tcb_desc->rts_enable = true;
412 tcb_desc->b_cts_enable = true; 404 tcb_desc->cts_enable = true;
413 tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M]; 405 tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M];
414 } else if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) { 406 } else if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
415 /* Use RTS-CTS in protection mode. */ 407 /* Use RTS-CTS in protection mode. */
416 tcb_desc->b_rts_enable = true; 408 tcb_desc->rts_enable = true;
417 tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M]; 409 tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M];
418 } 410 }
419 411
@@ -429,7 +421,7 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
429 if (mac->opmode == NL80211_IFTYPE_STATION) 421 if (mac->opmode == NL80211_IFTYPE_STATION)
430 tcb_desc->ratr_index = 0; 422 tcb_desc->ratr_index = 0;
431 else if (mac->opmode == NL80211_IFTYPE_ADHOC) { 423 else if (mac->opmode == NL80211_IFTYPE_ADHOC) {
432 if (tcb_desc->b_multicast || tcb_desc->b_broadcast) { 424 if (tcb_desc->multicast || tcb_desc->broadcast) {
433 tcb_desc->hw_rate = 425 tcb_desc->hw_rate =
434 rtlpriv->cfg->maps[RTL_RC_CCK_RATE2M]; 426 rtlpriv->cfg->maps[RTL_RC_CCK_RATE2M];
435 tcb_desc->use_driver_rate = 1; 427 tcb_desc->use_driver_rate = 1;
@@ -439,7 +431,7 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
439 } 431 }
440 } 432 }
441 433
442 if (rtlpriv->dm.b_useramask) { 434 if (rtlpriv->dm.useramask) {
443 /* TODO we will differentiate adhoc and station futrue */ 435 /* TODO we will differentiate adhoc and station futrue */
444 tcb_desc->mac_id = 0; 436 tcb_desc->mac_id = 0;
445 437
@@ -461,19 +453,19 @@ static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw,
461 struct rtl_priv *rtlpriv = rtl_priv(hw); 453 struct rtl_priv *rtlpriv = rtl_priv(hw);
462 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 454 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
463 455
464 tcb_desc->b_packet_bw = false; 456 tcb_desc->packet_bw = false;
465 457
466 if (!mac->bw_40 || !mac->ht_enable) 458 if (!mac->bw_40 || !mac->ht_enable)
467 return; 459 return;
468 460
469 if (tcb_desc->b_multicast || tcb_desc->b_broadcast) 461 if (tcb_desc->multicast || tcb_desc->broadcast)
470 return; 462 return;
471 463
472 /*use legency rate, shall use 20MHz */ 464 /*use legency rate, shall use 20MHz */
473 if (tcb_desc->hw_rate <= rtlpriv->cfg->maps[RTL_RC_OFDM_RATE54M]) 465 if (tcb_desc->hw_rate <= rtlpriv->cfg->maps[RTL_RC_OFDM_RATE54M])
474 return; 466 return;
475 467
476 tcb_desc->b_packet_bw = true; 468 tcb_desc->packet_bw = true;
477} 469}
478 470
479static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw) 471static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw)
@@ -498,7 +490,7 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
498 struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw)); 490 struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
499 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); 491 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
500 struct ieee80211_rate *txrate; 492 struct ieee80211_rate *txrate;
501 u16 fc = le16_to_cpu(hdr->frame_control); 493 __le16 fc = hdr->frame_control;
502 494
503 memset(tcb_desc, 0, sizeof(struct rtl_tcb_desc)); 495 memset(tcb_desc, 0, sizeof(struct rtl_tcb_desc));
504 496
@@ -545,9 +537,9 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
545 } 537 }
546 538
547 if (is_multicast_ether_addr(ieee80211_get_DA(hdr))) 539 if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
548 tcb_desc->b_multicast = 1; 540 tcb_desc->multicast = 1;
549 else if (is_broadcast_ether_addr(ieee80211_get_DA(hdr))) 541 else if (is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
550 tcb_desc->b_broadcast = 1; 542 tcb_desc->broadcast = 1;
551 543
552 _rtl_txrate_selectmode(hw, tcb_desc); 544 _rtl_txrate_selectmode(hw, tcb_desc);
553 _rtl_query_bandwidth_mode(hw, tcb_desc); 545 _rtl_query_bandwidth_mode(hw, tcb_desc);
@@ -570,7 +562,7 @@ bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb)
570 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 562 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
571 struct rtl_priv *rtlpriv = rtl_priv(hw); 563 struct rtl_priv *rtlpriv = rtl_priv(hw);
572 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); 564 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
573 u16 fc = le16_to_cpu(hdr->frame_control); 565 __le16 fc = hdr->frame_control;
574 566
575 if (ieee80211_is_auth(fc)) { 567 if (ieee80211_is_auth(fc)) {
576 RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n")); 568 RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n"));
@@ -587,7 +579,7 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
587 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 579 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
588 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); 580 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
589 struct rtl_priv *rtlpriv = rtl_priv(hw); 581 struct rtl_priv *rtlpriv = rtl_priv(hw);
590 u16 fc = le16_to_cpu(hdr->frame_control); 582 __le16 fc = hdr->frame_control;
591 u8 *act = (u8 *) (((u8 *) skb->data + MAC80211_3ADDR_LEN)); 583 u8 *act = (u8 *) (((u8 *) skb->data + MAC80211_3ADDR_LEN));
592 u8 category; 584 u8 category;
593 585
@@ -632,7 +624,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
632 struct rtl_priv *rtlpriv = rtl_priv(hw); 624 struct rtl_priv *rtlpriv = rtl_priv(hw);
633 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); 625 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
634 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 626 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
635 u16 fc = le16_to_cpu(hdr->frame_control); 627 __le16 fc = hdr->frame_control;
636 u16 ether_type; 628 u16 ether_type;
637 u8 mac_hdr_len = ieee80211_get_hdrlen_from_skb(skb); 629 u8 mac_hdr_len = ieee80211_get_hdrlen_from_skb(skb);
638 const struct iphdr *ip; 630 const struct iphdr *ip;
@@ -646,7 +638,6 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
646 ip = (struct iphdr *)((u8 *) skb->data + mac_hdr_len + 638 ip = (struct iphdr *)((u8 *) skb->data + mac_hdr_len +
647 SNAP_SIZE + PROTOC_TYPE_SIZE); 639 SNAP_SIZE + PROTOC_TYPE_SIZE);
648 ether_type = *(u16 *) ((u8 *) skb->data + mac_hdr_len + SNAP_SIZE); 640 ether_type = *(u16 *) ((u8 *) skb->data + mac_hdr_len + SNAP_SIZE);
649 ether_type = ntohs(ether_type);
650 641
651 if (ETH_P_IP == ether_type) { 642 if (ETH_P_IP == ether_type) {
652 if (IPPROTO_UDP == ip->protocol) { 643 if (IPPROTO_UDP == ip->protocol) {
@@ -690,7 +681,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
690 } 681 }
691 682
692 return true; 683 return true;
693 } else if (0x86DD == ether_type) { 684 } else if (ETH_P_IPV6 == ether_type) {
685 /* IPv6 */
694 return true; 686 return true;
695 } 687 }
696 688
@@ -777,10 +769,10 @@ void rtl_watchdog_wq_callback(void *data)
777 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 769 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
778 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 770 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
779 771
780 bool b_busytraffic = false; 772 bool busytraffic = false;
781 bool b_higher_busytraffic = false; 773 bool higher_busytraffic = false;
782 bool b_higher_busyrxtraffic = false; 774 bool higher_busyrxtraffic = false;
783 bool b_higher_busytxtraffic = false; 775 bool higher_busytxtraffic = false;
784 776
785 u8 idx = 0; 777 u8 idx = 0;
786 u32 rx_cnt_inp4eriod = 0; 778 u32 rx_cnt_inp4eriod = 0;
@@ -788,7 +780,7 @@ void rtl_watchdog_wq_callback(void *data)
788 u32 aver_rx_cnt_inperiod = 0; 780 u32 aver_rx_cnt_inperiod = 0;
789 u32 aver_tx_cnt_inperiod = 0; 781 u32 aver_tx_cnt_inperiod = 0;
790 782
791 bool benter_ps = false; 783 bool enter_ps = false;
792 784
793 if (is_hal_stop(rtlhal)) 785 if (is_hal_stop(rtlhal))
794 return; 786 return;
@@ -832,29 +824,29 @@ void rtl_watchdog_wq_callback(void *data)
832 824
833 /* (2) check traffic busy */ 825 /* (2) check traffic busy */
834 if (aver_rx_cnt_inperiod > 100 || aver_tx_cnt_inperiod > 100) 826 if (aver_rx_cnt_inperiod > 100 || aver_tx_cnt_inperiod > 100)
835 b_busytraffic = true; 827 busytraffic = true;
836 828
837 /* Higher Tx/Rx data. */ 829 /* Higher Tx/Rx data. */
838 if (aver_rx_cnt_inperiod > 4000 || 830 if (aver_rx_cnt_inperiod > 4000 ||
839 aver_tx_cnt_inperiod > 4000) { 831 aver_tx_cnt_inperiod > 4000) {
840 b_higher_busytraffic = true; 832 higher_busytraffic = true;
841 833
842 /* Extremely high Rx data. */ 834 /* Extremely high Rx data. */
843 if (aver_rx_cnt_inperiod > 5000) 835 if (aver_rx_cnt_inperiod > 5000)
844 b_higher_busyrxtraffic = true; 836 higher_busyrxtraffic = true;
845 else 837 else
846 b_higher_busytxtraffic = false; 838 higher_busytxtraffic = false;
847 } 839 }
848 840
849 if (((rtlpriv->link_info.num_rx_inperiod + 841 if (((rtlpriv->link_info.num_rx_inperiod +
850 rtlpriv->link_info.num_tx_inperiod) > 8) || 842 rtlpriv->link_info.num_tx_inperiod) > 8) ||
851 (rtlpriv->link_info.num_rx_inperiod > 2)) 843 (rtlpriv->link_info.num_rx_inperiod > 2))
852 benter_ps = false; 844 enter_ps = false;
853 else 845 else
854 benter_ps = true; 846 enter_ps = true;
855 847
856 /* LeisurePS only work in infra mode. */ 848 /* LeisurePS only work in infra mode. */
857 if (benter_ps) 849 if (enter_ps)
858 rtl_lps_enter(hw); 850 rtl_lps_enter(hw);
859 else 851 else
860 rtl_lps_leave(hw); 852 rtl_lps_leave(hw);
@@ -863,9 +855,9 @@ void rtl_watchdog_wq_callback(void *data)
863 rtlpriv->link_info.num_rx_inperiod = 0; 855 rtlpriv->link_info.num_rx_inperiod = 0;
864 rtlpriv->link_info.num_tx_inperiod = 0; 856 rtlpriv->link_info.num_tx_inperiod = 0;
865 857
866 rtlpriv->link_info.b_busytraffic = b_busytraffic; 858 rtlpriv->link_info.busytraffic = busytraffic;
867 rtlpriv->link_info.b_higher_busytraffic = b_higher_busytraffic; 859 rtlpriv->link_info.higher_busytraffic = higher_busytraffic;
868 rtlpriv->link_info.b_higher_busyrxtraffic = b_higher_busyrxtraffic; 860 rtlpriv->link_info.higher_busyrxtraffic = higher_busyrxtraffic;
869 861
870} 862}
871 863
@@ -945,11 +937,16 @@ MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
945 937
946static int __init rtl_core_module_init(void) 938static int __init rtl_core_module_init(void)
947{ 939{
940 if (rtl_rate_control_register())
941 printk(KERN_ERR "rtlwifi: Unable to register rtl_rc,"
942 "use default RC !!\n");
948 return 0; 943 return 0;
949} 944}
950 945
951static void __exit rtl_core_module_exit(void) 946static void __exit rtl_core_module_exit(void)
952{ 947{
948 /*RC*/
949 rtl_rate_control_unregister();
953} 950}
954 951
955module_init(rtl_core_module_init); 952module_init(rtl_core_module_init);
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index 3de5a14745f1..043045342bc7 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -30,6 +30,7 @@
30#define __RTL_BASE_H__ 30#define __RTL_BASE_H__
31 31
32#define RTL_DUMMY_OFFSET 0 32#define RTL_DUMMY_OFFSET 0
33#define RTL_RX_DESC_SIZE 24
33#define RTL_DUMMY_UNIT 8 34#define RTL_DUMMY_UNIT 8
34#define RTL_TX_DUMMY_SIZE (RTL_DUMMY_OFFSET * RTL_DUMMY_UNIT) 35#define RTL_TX_DUMMY_SIZE (RTL_DUMMY_OFFSET * RTL_DUMMY_UNIT)
35#define RTL_TX_DESC_SIZE 32 36#define RTL_TX_DESC_SIZE 32
@@ -52,46 +53,22 @@
52#define FRAME_OFFSET_SEQUENCE 22 53#define FRAME_OFFSET_SEQUENCE 22
53#define FRAME_OFFSET_ADDRESS4 24 54#define FRAME_OFFSET_ADDRESS4 24
54 55
55#define SET_80211_HDR_FRAME_CONTROL(_hdr, _val) \
56 WRITEEF2BYTE(_hdr, _val)
57#define SET_80211_HDR_TYPE_AND_SUBTYPE(_hdr, _val) \
58 WRITEEF1BYTE(_hdr, _val)
59#define SET_80211_HDR_PWR_MGNT(_hdr, _val) \
60 SET_BITS_TO_LE_2BYTE(_hdr, 12, 1, _val)
61#define SET_80211_HDR_TO_DS(_hdr, _val) \
62 SET_BITS_TO_LE_2BYTE(_hdr, 8, 1, _val)
63 56
64#define SET_80211_PS_POLL_AID(_hdr, _val) \ 57#define SET_80211_PS_POLL_AID(_hdr, _val) \
65 WRITEEF2BYTE(((u8 *)(_hdr)) + 2, _val) 58 (*(u16 *)((u8 *)(_hdr) + 2) = le16_to_cpu(_val))
66#define SET_80211_PS_POLL_BSSID(_hdr, _val) \ 59#define SET_80211_PS_POLL_BSSID(_hdr, _val) \
67 CP_MACADDR(((u8 *)(_hdr)) + 4, (u8 *)(_val)) 60 memcpy(((u8 *)(_hdr)) + 4, (u8 *)(_val), ETH_ALEN)
68#define SET_80211_PS_POLL_TA(_hdr, _val) \ 61#define SET_80211_PS_POLL_TA(_hdr, _val) \
69 CP_MACADDR(((u8 *)(_hdr)) + 10, (u8 *)(_val)) 62 memcpy(((u8 *)(_hdr)) + 10, (u8 *)(_val), ETH_ALEN)
70 63
71#define SET_80211_HDR_DURATION(_hdr, _val) \ 64#define SET_80211_HDR_DURATION(_hdr, _val) \
72 WRITEEF2BYTE((u8 *)(_hdr)+FRAME_OFFSET_DURATION, _val) 65 (*(u16 *)((u8 *)(_hdr) + FRAME_OFFSET_DURATION) = le16_to_cpu(_val))
73#define SET_80211_HDR_ADDRESS1(_hdr, _val) \ 66#define SET_80211_HDR_ADDRESS1(_hdr, _val) \
74 CP_MACADDR((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS1, (u8*)(_val)) 67 memcpy((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS1, (u8*)(_val), ETH_ALEN)
75#define SET_80211_HDR_ADDRESS2(_hdr, _val) \ 68#define SET_80211_HDR_ADDRESS2(_hdr, _val) \
76 CP_MACADDR((u8 *)(_hdr) + FRAME_OFFSET_ADDRESS2, (u8 *)(_val)) 69 memcpy((u8 *)(_hdr) + FRAME_OFFSET_ADDRESS2, (u8 *)(_val), ETH_ALEN)
77#define SET_80211_HDR_ADDRESS3(_hdr, _val) \ 70#define SET_80211_HDR_ADDRESS3(_hdr, _val) \
78 CP_MACADDR((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS3, (u8 *)(_val)) 71 memcpy((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS3, (u8 *)(_val), ETH_ALEN)
79#define SET_80211_HDR_FRAGMENT_SEQUENCE(_hdr, _val) \
80 WRITEEF2BYTE((u8 *)(_hdr)+FRAME_OFFSET_SEQUENCE, _val)
81
82#define SET_BEACON_PROBE_RSP_TIME_STAMP_LOW(__phdr, __val) \
83 WRITEEF4BYTE(((u8 *)(__phdr)) + 24, __val)
84#define SET_BEACON_PROBE_RSP_TIME_STAMP_HIGH(__phdr, __val) \
85 WRITEEF4BYTE(((u8 *)(__phdr)) + 28, __val)
86#define SET_BEACON_PROBE_RSP_BEACON_INTERVAL(__phdr, __val) \
87 WRITEEF2BYTE(((u8 *)(__phdr)) + 32, __val)
88#define GET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr) \
89 READEF2BYTE(((u8 *)(__phdr)) + 34)
90#define SET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, __val) \
91 WRITEEF2BYTE(((u8 *)(__phdr)) + 34, __val)
92#define MASK_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, __val) \
93 SET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, \
94 (GET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr) & (~(__val))))
95 72
96int rtl_init_core(struct ieee80211_hw *hw); 73int rtl_init_core(struct ieee80211_hw *hw);
97void rtl_deinit_core(struct ieee80211_hw *hw); 74void rtl_deinit_core(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index d6a924a05654..e4f4aee8f298 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -82,7 +82,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
82 mutex_unlock(&rtlpriv->locks.conf_mutex); 82 mutex_unlock(&rtlpriv->locks.conf_mutex);
83} 83}
84 84
85static int rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 85static void rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
86{ 86{
87 struct rtl_priv *rtlpriv = rtl_priv(hw); 87 struct rtl_priv *rtlpriv = rtl_priv(hw);
88 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 88 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -97,11 +97,10 @@ static int rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
97 97
98 rtlpriv->intf_ops->adapter_tx(hw, skb); 98 rtlpriv->intf_ops->adapter_tx(hw, skb);
99 99
100 return NETDEV_TX_OK; 100 return;
101 101
102err_free: 102err_free:
103 dev_kfree_skb_any(skb); 103 dev_kfree_skb_any(skb);
104 return NETDEV_TX_OK;
105} 104}
106 105
107static int rtl_op_add_interface(struct ieee80211_hw *hw, 106static int rtl_op_add_interface(struct ieee80211_hw *hw,
@@ -434,9 +433,9 @@ static int rtl_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
434 433
435 aci = _rtl_get_hal_qnum(queue); 434 aci = _rtl_get_hal_qnum(queue);
436 mac->ac[aci].aifs = param->aifs; 435 mac->ac[aci].aifs = param->aifs;
437 mac->ac[aci].cw_min = param->cw_min; 436 mac->ac[aci].cw_min = cpu_to_le16(param->cw_min);
438 mac->ac[aci].cw_max = param->cw_max; 437 mac->ac[aci].cw_max = cpu_to_le16(param->cw_max);
439 mac->ac[aci].tx_op = param->txop; 438 mac->ac[aci].tx_op = cpu_to_le16(param->txop);
440 memcpy(&mac->edca_param[aci], param, sizeof(*param)); 439 memcpy(&mac->edca_param[aci], param, sizeof(*param));
441 rtlpriv->cfg->ops->set_qos(hw, aci); 440 rtlpriv->cfg->ops->set_qos(hw, aci);
442 return 0; 441 return 0;
@@ -552,6 +551,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
552 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE, 551 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
553 ("BSS_CHANGED_HT\n")); 552 ("BSS_CHANGED_HT\n"));
554 553
554 rcu_read_lock();
555 sta = ieee80211_find_sta(mac->vif, mac->bssid); 555 sta = ieee80211_find_sta(mac->vif, mac->bssid);
556 556
557 if (sta) { 557 if (sta) {
@@ -564,6 +564,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
564 mac->current_ampdu_factor = 564 mac->current_ampdu_factor =
565 sta->ht_cap.ampdu_factor; 565 sta->ht_cap.ampdu_factor;
566 } 566 }
567 rcu_read_unlock();
567 568
568 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SHORTGI_DENSITY, 569 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SHORTGI_DENSITY,
569 (u8 *) (&mac->max_mss_density)); 570 (u8 *) (&mac->max_mss_density));
@@ -615,6 +616,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
615 else 616 else
616 mac->mode = WIRELESS_MODE_G; 617 mac->mode = WIRELESS_MODE_G;
617 618
619 rcu_read_lock();
618 sta = ieee80211_find_sta(mac->vif, mac->bssid); 620 sta = ieee80211_find_sta(mac->vif, mac->bssid);
619 621
620 if (sta) { 622 if (sta) {
@@ -649,6 +651,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
649 */ 651 */
650 } 652 }
651 } 653 }
654 rcu_read_unlock();
652 655
653 /*mac80211 just give us CCK rates any time 656 /*mac80211 just give us CCK rates any time
654 *So we add G rate in basic rates when 657 *So we add G rate in basic rates when
@@ -666,7 +669,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
666 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE, 669 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
667 (u8 *) (&basic_rates)); 670 (u8 *) (&basic_rates));
668 671
669 if (rtlpriv->dm.b_useramask) 672 if (rtlpriv->dm.useramask)
670 rtlpriv->cfg->ops->update_rate_mask(hw, 0); 673 rtlpriv->cfg->ops->update_rate_mask(hw, 0);
671 else 674 else
672 rtlpriv->cfg->ops->update_rate_table(hw); 675 rtlpriv->cfg->ops->update_rate_table(hw);
@@ -681,7 +684,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
681 */ 684 */
682 if (changed & BSS_CHANGED_ASSOC) { 685 if (changed & BSS_CHANGED_ASSOC) {
683 if (bss_conf->assoc) { 686 if (bss_conf->assoc) {
684 if (ppsc->b_fwctrl_lps) { 687 if (ppsc->fwctrl_lps) {
685 u8 mstatus = RT_MEDIA_CONNECT; 688 u8 mstatus = RT_MEDIA_CONNECT;
686 rtlpriv->cfg->ops->set_hw_reg(hw, 689 rtlpriv->cfg->ops->set_hw_reg(hw,
687 HW_VAR_H2C_FW_JOINBSSRPT, 690 HW_VAR_H2C_FW_JOINBSSRPT,
@@ -689,7 +692,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
689 ppsc->report_linked = true; 692 ppsc->report_linked = true;
690 } 693 }
691 } else { 694 } else {
692 if (ppsc->b_fwctrl_lps) { 695 if (ppsc->fwctrl_lps) {
693 u8 mstatus = RT_MEDIA_DISCONNECT; 696 u8 mstatus = RT_MEDIA_DISCONNECT;
694 rtlpriv->cfg->ops->set_hw_reg(hw, 697 rtlpriv->cfg->ops->set_hw_reg(hw,
695 HW_VAR_H2C_FW_JOINBSSRPT, 698 HW_VAR_H2C_FW_JOINBSSRPT,
@@ -748,7 +751,8 @@ static void rtl_op_sta_notify(struct ieee80211_hw *hw,
748static int rtl_op_ampdu_action(struct ieee80211_hw *hw, 751static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
749 struct ieee80211_vif *vif, 752 struct ieee80211_vif *vif,
750 enum ieee80211_ampdu_mlme_action action, 753 enum ieee80211_ampdu_mlme_action action,
751 struct ieee80211_sta *sta, u16 tid, u16 * ssn) 754 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
755 u8 buf_size)
752{ 756{
753 struct rtl_priv *rtlpriv = rtl_priv(hw); 757 struct rtl_priv *rtlpriv = rtl_priv(hw);
754 758
@@ -817,7 +821,7 @@ static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw)
817 /* fix fwlps issue */ 821 /* fix fwlps issue */
818 rtlpriv->cfg->ops->set_network_type(hw, mac->opmode); 822 rtlpriv->cfg->ops->set_network_type(hw, mac->opmode);
819 823
820 if (rtlpriv->dm.b_useramask) 824 if (rtlpriv->dm.useramask)
821 rtlpriv->cfg->ops->update_rate_mask(hw, 0); 825 rtlpriv->cfg->ops->update_rate_mask(hw, 0);
822 else 826 else
823 rtlpriv->cfg->ops->update_rate_table(hw); 827 rtlpriv->cfg->ops->update_rate_table(hw);
diff --git a/drivers/net/wireless/rtlwifi/debug.h b/drivers/net/wireless/rtlwifi/debug.h
index 08bdec2ceda4..e4aa8687408c 100644
--- a/drivers/net/wireless/rtlwifi/debug.h
+++ b/drivers/net/wireless/rtlwifi/debug.h
@@ -105,6 +105,7 @@
105#define COMP_MAC80211 BIT(26) 105#define COMP_MAC80211 BIT(26)
106#define COMP_REGD BIT(27) 106#define COMP_REGD BIT(27)
107#define COMP_CHAN BIT(28) 107#define COMP_CHAN BIT(28)
108#define COMP_USB BIT(29)
108 109
109/*-------------------------------------------------------------- 110/*--------------------------------------------------------------
110 Define the rt_print components 111 Define the rt_print components
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index 62876cd5c41a..4f92cba6810a 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -1169,21 +1169,3 @@ static u8 efuse_calculate_word_cnts(u8 word_en)
1169 return word_cnts; 1169 return word_cnts;
1170} 1170}
1171 1171
1172void efuse_reset_loader(struct ieee80211_hw *hw)
1173{
1174 struct rtl_priv *rtlpriv = rtl_priv(hw);
1175 u16 tmp_u2b;
1176
1177 tmp_u2b = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN]);
1178 rtl_write_word(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN],
1179 (tmp_u2b & ~(BIT(12))));
1180 udelay(10000);
1181 rtl_write_word(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN],
1182 (tmp_u2b | BIT(12)));
1183 udelay(10000);
1184}
1185
1186bool efuse_program_map(struct ieee80211_hw *hw, char *p_filename, u8 tabletype)
1187{
1188 return true;
1189}
diff --git a/drivers/net/wireless/rtlwifi/efuse.h b/drivers/net/wireless/rtlwifi/efuse.h
index 2d39a4df181b..47774dd4c2a6 100644
--- a/drivers/net/wireless/rtlwifi/efuse.h
+++ b/drivers/net/wireless/rtlwifi/efuse.h
@@ -117,8 +117,5 @@ extern bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
117extern void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw); 117extern void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
118extern void efuse_force_write_vendor_Id(struct ieee80211_hw *hw); 118extern void efuse_force_write_vendor_Id(struct ieee80211_hw *hw);
119extern void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx); 119extern void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
120extern bool efuse_program_map(struct ieee80211_hw *hw,
121 char *p_filename, u8 tabletype);
122extern void efuse_reset_loader(struct ieee80211_hw *hw);
123 120
124#endif 121#endif
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 1758d4463247..9cd7703c2a30 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -50,7 +50,7 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
50 u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor; 50 u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
51 51
52 ppsc->reg_rfps_level = 0; 52 ppsc->reg_rfps_level = 0;
53 ppsc->b_support_aspm = 0; 53 ppsc->support_aspm = 0;
54 54
55 /*Update PCI ASPM setting */ 55 /*Update PCI ASPM setting */
56 ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm; 56 ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm;
@@ -115,29 +115,29 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
115 switch (rtlpci->const_support_pciaspm) { 115 switch (rtlpci->const_support_pciaspm) {
116 case 0:{ 116 case 0:{
117 /*Not support ASPM. */ 117 /*Not support ASPM. */
118 bool b_support_aspm = false; 118 bool support_aspm = false;
119 ppsc->b_support_aspm = b_support_aspm; 119 ppsc->support_aspm = support_aspm;
120 break; 120 break;
121 } 121 }
122 case 1:{ 122 case 1:{
123 /*Support ASPM. */ 123 /*Support ASPM. */
124 bool b_support_aspm = true; 124 bool support_aspm = true;
125 bool b_support_backdoor = true; 125 bool support_backdoor = true;
126 ppsc->b_support_aspm = b_support_aspm; 126 ppsc->support_aspm = support_aspm;
127 127
128 /*if(priv->oem_id == RT_CID_TOSHIBA && 128 /*if(priv->oem_id == RT_CID_TOSHIBA &&
129 !priv->ndis_adapter.amd_l1_patch) 129 !priv->ndis_adapter.amd_l1_patch)
130 b_support_backdoor = false; */ 130 support_backdoor = false; */
131 131
132 ppsc->b_support_backdoor = b_support_backdoor; 132 ppsc->support_backdoor = support_backdoor;
133 133
134 break; 134 break;
135 } 135 }
136 case 2: 136 case 2:
137 /*ASPM value set by chipset. */ 137 /*ASPM value set by chipset. */
138 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL) { 138 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL) {
139 bool b_support_aspm = true; 139 bool support_aspm = true;
140 ppsc->b_support_aspm = b_support_aspm; 140 ppsc->support_aspm = support_aspm;
141 } 141 }
142 break; 142 break;
143 default: 143 default:
@@ -476,9 +476,9 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
476 476
477 skb = __skb_dequeue(&ring->queue); 477 skb = __skb_dequeue(&ring->queue);
478 pci_unmap_single(rtlpci->pdev, 478 pci_unmap_single(rtlpci->pdev,
479 le32_to_cpu(rtlpriv->cfg->ops-> 479 rtlpriv->cfg->ops->
480 get_desc((u8 *) entry, true, 480 get_desc((u8 *) entry, true,
481 HW_DESC_TXBUFF_ADDR)), 481 HW_DESC_TXBUFF_ADDR),
482 skb->len, PCI_DMA_TODEVICE); 482 skb->len, PCI_DMA_TODEVICE);
483 483
484 RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE, 484 RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE,
@@ -557,7 +557,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
557 return; 557 return;
558 } else { 558 } else {
559 struct ieee80211_hdr *hdr; 559 struct ieee80211_hdr *hdr;
560 u16 fc; 560 __le16 fc;
561 struct sk_buff *new_skb = NULL; 561 struct sk_buff *new_skb = NULL;
562 562
563 rtlpriv->cfg->ops->query_rx_desc(hw, &stats, 563 rtlpriv->cfg->ops->query_rx_desc(hw, &stats,
@@ -583,9 +583,9 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
583 */ 583 */
584 584
585 hdr = (struct ieee80211_hdr *)(skb->data); 585 hdr = (struct ieee80211_hdr *)(skb->data);
586 fc = le16_to_cpu(hdr->frame_control); 586 fc = hdr->frame_control;
587 587
588 if (!stats.b_crc) { 588 if (!stats.crc) {
589 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, 589 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
590 sizeof(rx_status)); 590 sizeof(rx_status));
591 591
@@ -666,7 +666,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
666 666
667 } 667 }
668done: 668done:
669 bufferaddress = cpu_to_le32(*((dma_addr_t *) skb->cb)); 669 bufferaddress = (u32)(*((dma_addr_t *) skb->cb));
670 tmp_one = 1; 670 tmp_one = 1;
671 rtlpriv->cfg->ops->set_desc((u8 *) pdesc, false, 671 rtlpriv->cfg->ops->set_desc((u8 *) pdesc, false,
672 HW_DESC_RXBUFF_ADDR, 672 HW_DESC_RXBUFF_ADDR,
@@ -690,75 +690,6 @@ done:
690 690
691} 691}
692 692
693void _rtl_pci_tx_interrupt(struct ieee80211_hw *hw)
694{
695 struct rtl_priv *rtlpriv = rtl_priv(hw);
696 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
697 int prio;
698
699 for (prio = 0; prio < RTL_PCI_MAX_TX_QUEUE_COUNT; prio++) {
700 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
701
702 while (skb_queue_len(&ring->queue)) {
703 struct rtl_tx_desc *entry = &ring->desc[ring->idx];
704 struct sk_buff *skb;
705 struct ieee80211_tx_info *info;
706 u8 own;
707
708 /*
709 *beacon packet will only use the first
710 *descriptor defautly, and the own may not
711 *be cleared by the hardware, and
712 *beacon will free in prepare beacon
713 */
714 if (prio == BEACON_QUEUE || prio == TXCMD_QUEUE ||
715 prio == HCCA_QUEUE)
716 break;
717
718 own = (u8)rtlpriv->cfg->ops->get_desc((u8 *)entry,
719 true,
720 HW_DESC_OWN);
721
722 if (own)
723 break;
724
725 skb = __skb_dequeue(&ring->queue);
726 pci_unmap_single(rtlpci->pdev,
727 le32_to_cpu(rtlpriv->cfg->ops->
728 get_desc((u8 *) entry,
729 true,
730 HW_DESC_TXBUFF_ADDR)),
731 skb->len, PCI_DMA_TODEVICE);
732
733 ring->idx = (ring->idx + 1) % ring->entries;
734
735 info = IEEE80211_SKB_CB(skb);
736 ieee80211_tx_info_clear_status(info);
737
738 info->flags |= IEEE80211_TX_STAT_ACK;
739 /*info->status.rates[0].count = 1; */
740
741 ieee80211_tx_status_irqsafe(hw, skb);
742
743 if ((ring->entries - skb_queue_len(&ring->queue))
744 == 2 && prio != BEACON_QUEUE) {
745 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
746 ("more desc left, wake "
747 "skb_queue@%d,ring->idx = %d,"
748 "skb_queue_len = 0x%d\n",
749 prio, ring->idx,
750 skb_queue_len(&ring->queue)));
751
752 ieee80211_wake_queue(hw,
753 skb_get_queue_mapping
754 (skb));
755 }
756
757 skb = NULL;
758 }
759 }
760}
761
762static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id) 693static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
763{ 694{
764 struct ieee80211_hw *hw = dev_id; 695 struct ieee80211_hw *hw = dev_id;
@@ -959,17 +890,17 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
959 rtlhal->hw = hw; 890 rtlhal->hw = hw;
960 rtlpci->pdev = pdev; 891 rtlpci->pdev = pdev;
961 892
962 ppsc->b_inactiveps = false; 893 ppsc->inactiveps = false;
963 ppsc->b_leisure_ps = true; 894 ppsc->leisure_ps = true;
964 ppsc->b_fwctrl_lps = true; 895 ppsc->fwctrl_lps = true;
965 ppsc->b_reg_fwctrl_lps = 3; 896 ppsc->reg_fwctrl_lps = 3;
966 ppsc->reg_max_lps_awakeintvl = 5; 897 ppsc->reg_max_lps_awakeintvl = 5;
967 898
968 if (ppsc->b_reg_fwctrl_lps == 1) 899 if (ppsc->reg_fwctrl_lps == 1)
969 ppsc->fwctrl_psmode = FW_PS_MIN_MODE; 900 ppsc->fwctrl_psmode = FW_PS_MIN_MODE;
970 else if (ppsc->b_reg_fwctrl_lps == 2) 901 else if (ppsc->reg_fwctrl_lps == 2)
971 ppsc->fwctrl_psmode = FW_PS_MAX_MODE; 902 ppsc->fwctrl_psmode = FW_PS_MAX_MODE;
972 else if (ppsc->b_reg_fwctrl_lps == 3) 903 else if (ppsc->reg_fwctrl_lps == 3)
973 ppsc->fwctrl_psmode = FW_PS_DTIM_MODE; 904 ppsc->fwctrl_psmode = FW_PS_DTIM_MODE;
974 905
975 /*Tx/Rx related var */ 906 /*Tx/Rx related var */
@@ -1024,9 +955,8 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
1024 ("queue:%d, ring_addr:%p\n", prio, ring)); 955 ("queue:%d, ring_addr:%p\n", prio, ring));
1025 956
1026 for (i = 0; i < entries; i++) { 957 for (i = 0; i < entries; i++) {
1027 nextdescaddress = cpu_to_le32((u32) dma + 958 nextdescaddress = (u32) dma + ((i + 1) % entries) *
1028 ((i + 1) % entries) * 959 sizeof(*ring);
1029 sizeof(*ring));
1030 960
1031 rtlpriv->cfg->ops->set_desc((u8 *)&(ring[i]), 961 rtlpriv->cfg->ops->set_desc((u8 *)&(ring[i]),
1032 true, HW_DESC_TX_NEXTDESC_ADDR, 962 true, HW_DESC_TX_NEXTDESC_ADDR,
@@ -1090,7 +1020,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
1090 rtlpci->rxbuffersize, 1020 rtlpci->rxbuffersize,
1091 PCI_DMA_FROMDEVICE); 1021 PCI_DMA_FROMDEVICE);
1092 1022
1093 bufferaddress = cpu_to_le32(*((dma_addr_t *)skb->cb)); 1023 bufferaddress = (u32)(*((dma_addr_t *)skb->cb));
1094 rtlpriv->cfg->ops->set_desc((u8 *)entry, false, 1024 rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
1095 HW_DESC_RXBUFF_ADDR, 1025 HW_DESC_RXBUFF_ADDR,
1096 (u8 *)&bufferaddress); 1026 (u8 *)&bufferaddress);
@@ -1121,9 +1051,9 @@ static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
1121 struct sk_buff *skb = __skb_dequeue(&ring->queue); 1051 struct sk_buff *skb = __skb_dequeue(&ring->queue);
1122 1052
1123 pci_unmap_single(rtlpci->pdev, 1053 pci_unmap_single(rtlpci->pdev,
1124 le32_to_cpu(rtlpriv->cfg-> 1054 rtlpriv->cfg->
1125 ops->get_desc((u8 *) entry, true, 1055 ops->get_desc((u8 *) entry, true,
1126 HW_DESC_TXBUFF_ADDR)), 1056 HW_DESC_TXBUFF_ADDR),
1127 skb->len, PCI_DMA_TODEVICE); 1057 skb->len, PCI_DMA_TODEVICE);
1128 kfree_skb(skb); 1058 kfree_skb(skb);
1129 ring->idx = (ring->idx + 1) % ring->entries; 1059 ring->idx = (ring->idx + 1) % ring->entries;
@@ -1255,11 +1185,11 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
1255 __skb_dequeue(&ring->queue); 1185 __skb_dequeue(&ring->queue);
1256 1186
1257 pci_unmap_single(rtlpci->pdev, 1187 pci_unmap_single(rtlpci->pdev,
1258 le32_to_cpu(rtlpriv->cfg->ops-> 1188 rtlpriv->cfg->ops->
1259 get_desc((u8 *) 1189 get_desc((u8 *)
1260 entry, 1190 entry,
1261 true, 1191 true,
1262 HW_DESC_TXBUFF_ADDR)), 1192 HW_DESC_TXBUFF_ADDR),
1263 skb->len, PCI_DMA_TODEVICE); 1193 skb->len, PCI_DMA_TODEVICE);
1264 kfree_skb(skb); 1194 kfree_skb(skb);
1265 ring->idx = (ring->idx + 1) % ring->entries; 1195 ring->idx = (ring->idx + 1) % ring->entries;
@@ -1273,7 +1203,7 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
1273 return 0; 1203 return 0;
1274} 1204}
1275 1205
1276unsigned int _rtl_mac_to_hwqueue(u16 fc, 1206static unsigned int _rtl_mac_to_hwqueue(__le16 fc,
1277 unsigned int mac80211_queue_index) 1207 unsigned int mac80211_queue_index)
1278{ 1208{
1279 unsigned int hw_queue_index; 1209 unsigned int hw_queue_index;
@@ -1312,7 +1242,7 @@ out:
1312 return hw_queue_index; 1242 return hw_queue_index;
1313} 1243}
1314 1244
1315int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 1245static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1316{ 1246{
1317 struct rtl_priv *rtlpriv = rtl_priv(hw); 1247 struct rtl_priv *rtlpriv = rtl_priv(hw);
1318 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 1248 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -1323,7 +1253,7 @@ int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1323 unsigned int queue_index, hw_queue; 1253 unsigned int queue_index, hw_queue;
1324 unsigned long flags; 1254 unsigned long flags;
1325 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); 1255 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
1326 u16 fc = le16_to_cpu(hdr->frame_control); 1256 __le16 fc = hdr->frame_control;
1327 u8 *pda_addr = hdr->addr1; 1257 u8 *pda_addr = hdr->addr1;
1328 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 1258 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1329 /*ssn */ 1259 /*ssn */
@@ -1429,7 +1359,7 @@ int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1429 return 0; 1359 return 0;
1430} 1360}
1431 1361
1432void rtl_pci_deinit(struct ieee80211_hw *hw) 1362static void rtl_pci_deinit(struct ieee80211_hw *hw)
1433{ 1363{
1434 struct rtl_priv *rtlpriv = rtl_priv(hw); 1364 struct rtl_priv *rtlpriv = rtl_priv(hw);
1435 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 1365 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -1444,7 +1374,7 @@ void rtl_pci_deinit(struct ieee80211_hw *hw)
1444 1374
1445} 1375}
1446 1376
1447int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev) 1377static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
1448{ 1378{
1449 struct rtl_priv *rtlpriv = rtl_priv(hw); 1379 struct rtl_priv *rtlpriv = rtl_priv(hw);
1450 int err; 1380 int err;
@@ -1461,7 +1391,7 @@ int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
1461 return 1; 1391 return 1;
1462} 1392}
1463 1393
1464int rtl_pci_start(struct ieee80211_hw *hw) 1394static int rtl_pci_start(struct ieee80211_hw *hw)
1465{ 1395{
1466 struct rtl_priv *rtlpriv = rtl_priv(hw); 1396 struct rtl_priv *rtlpriv = rtl_priv(hw);
1467 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 1397 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -1496,7 +1426,7 @@ int rtl_pci_start(struct ieee80211_hw *hw)
1496 return 0; 1426 return 0;
1497} 1427}
1498 1428
1499void rtl_pci_stop(struct ieee80211_hw *hw) 1429static void rtl_pci_stop(struct ieee80211_hw *hw)
1500{ 1430{
1501 struct rtl_priv *rtlpriv = rtl_priv(hw); 1431 struct rtl_priv *rtlpriv = rtl_priv(hw);
1502 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 1432 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -1547,13 +1477,11 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1547 struct pci_dev *bridge_pdev = pdev->bus->self; 1477 struct pci_dev *bridge_pdev = pdev->bus->self;
1548 u16 venderid; 1478 u16 venderid;
1549 u16 deviceid; 1479 u16 deviceid;
1550 u8 revisionid;
1551 u16 irqline; 1480 u16 irqline;
1552 u8 tmp; 1481 u8 tmp;
1553 1482
1554 venderid = pdev->vendor; 1483 venderid = pdev->vendor;
1555 deviceid = pdev->device; 1484 deviceid = pdev->device;
1556 pci_read_config_byte(pdev, 0x8, &revisionid);
1557 pci_read_config_word(pdev, 0x3C, &irqline); 1485 pci_read_config_word(pdev, 0x3C, &irqline);
1558 1486
1559 if (deviceid == RTL_PCI_8192_DID || 1487 if (deviceid == RTL_PCI_8192_DID ||
@@ -1564,7 +1492,7 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1564 deviceid == RTL_PCI_8173_DID || 1492 deviceid == RTL_PCI_8173_DID ||
1565 deviceid == RTL_PCI_8172_DID || 1493 deviceid == RTL_PCI_8172_DID ||
1566 deviceid == RTL_PCI_8171_DID) { 1494 deviceid == RTL_PCI_8171_DID) {
1567 switch (revisionid) { 1495 switch (pdev->revision) {
1568 case RTL_PCI_REVISION_ID_8192PCIE: 1496 case RTL_PCI_REVISION_ID_8192PCIE:
1569 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, 1497 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1570 ("8192 PCI-E is found - " 1498 ("8192 PCI-E is found - "
@@ -1838,7 +1766,7 @@ fail3:
1838 ieee80211_free_hw(hw); 1766 ieee80211_free_hw(hw);
1839 1767
1840 if (rtlpriv->io.pci_mem_start != 0) 1768 if (rtlpriv->io.pci_mem_start != 0)
1841 pci_iounmap(pdev, (void *)rtlpriv->io.pci_mem_start); 1769 pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
1842 1770
1843fail2: 1771fail2:
1844 pci_release_regions(pdev); 1772 pci_release_regions(pdev);
@@ -1888,7 +1816,7 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
1888 } 1816 }
1889 1817
1890 if (rtlpriv->io.pci_mem_start != 0) { 1818 if (rtlpriv->io.pci_mem_start != 0) {
1891 pci_iounmap(pdev, (void *)rtlpriv->io.pci_mem_start); 1819 pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
1892 pci_release_regions(pdev); 1820 pci_release_regions(pdev);
1893 } 1821 }
1894 1822
diff --git a/drivers/net/wireless/rtlwifi/pci.h b/drivers/net/wireless/rtlwifi/pci.h
index d36a66939958..0caa81429726 100644
--- a/drivers/net/wireless/rtlwifi/pci.h
+++ b/drivers/net/wireless/rtlwifi/pci.h
@@ -244,34 +244,34 @@ int rtl_pci_resume(struct pci_dev *pdev);
244 244
245static inline u8 pci_read8_sync(struct rtl_priv *rtlpriv, u32 addr) 245static inline u8 pci_read8_sync(struct rtl_priv *rtlpriv, u32 addr)
246{ 246{
247 return 0xff & readb((u8 *) rtlpriv->io.pci_mem_start + addr); 247 return readb((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
248} 248}
249 249
250static inline u16 pci_read16_sync(struct rtl_priv *rtlpriv, u32 addr) 250static inline u16 pci_read16_sync(struct rtl_priv *rtlpriv, u32 addr)
251{ 251{
252 return readw((u8 *) rtlpriv->io.pci_mem_start + addr); 252 return readw((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
253} 253}
254 254
255static inline u32 pci_read32_sync(struct rtl_priv *rtlpriv, u32 addr) 255static inline u32 pci_read32_sync(struct rtl_priv *rtlpriv, u32 addr)
256{ 256{
257 return readl((u8 *) rtlpriv->io.pci_mem_start + addr); 257 return readl((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
258} 258}
259 259
260static inline void pci_write8_async(struct rtl_priv *rtlpriv, u32 addr, u8 val) 260static inline void pci_write8_async(struct rtl_priv *rtlpriv, u32 addr, u8 val)
261{ 261{
262 writeb(val, (u8 *) rtlpriv->io.pci_mem_start + addr); 262 writeb(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
263} 263}
264 264
265static inline void pci_write16_async(struct rtl_priv *rtlpriv, 265static inline void pci_write16_async(struct rtl_priv *rtlpriv,
266 u32 addr, u16 val) 266 u32 addr, u16 val)
267{ 267{
268 writew(val, (u8 *) rtlpriv->io.pci_mem_start + addr); 268 writew(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
269} 269}
270 270
271static inline void pci_write32_async(struct rtl_priv *rtlpriv, 271static inline void pci_write32_async(struct rtl_priv *rtlpriv,
272 u32 addr, u32 val) 272 u32 addr, u32 val)
273{ 273{
274 writel(val, (u8 *) rtlpriv->io.pci_mem_start + addr); 274 writel(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
275} 275}
276 276
277static inline void rtl_pci_raw_write_port_ulong(u32 port, u32 val) 277static inline void rtl_pci_raw_write_port_ulong(u32 port, u32 val)
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index d2326c13449e..6b7e217b6b89 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -86,7 +86,7 @@ bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
86 struct rtl_priv *rtlpriv = rtl_priv(hw); 86 struct rtl_priv *rtlpriv = rtl_priv(hw);
87 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 87 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
88 enum rf_pwrstate rtstate; 88 enum rf_pwrstate rtstate;
89 bool b_actionallowed = false; 89 bool actionallowed = false;
90 u16 rfwait_cnt = 0; 90 u16 rfwait_cnt = 0;
91 unsigned long flag; 91 unsigned long flag;
92 92
@@ -139,13 +139,13 @@ no_protect:
139 ppsc->rfoff_reason &= (~changesource); 139 ppsc->rfoff_reason &= (~changesource);
140 140
141 if ((changesource == RF_CHANGE_BY_HW) && 141 if ((changesource == RF_CHANGE_BY_HW) &&
142 (ppsc->b_hwradiooff == true)) { 142 (ppsc->hwradiooff == true)) {
143 ppsc->b_hwradiooff = false; 143 ppsc->hwradiooff = false;
144 } 144 }
145 145
146 if (!ppsc->rfoff_reason) { 146 if (!ppsc->rfoff_reason) {
147 ppsc->rfoff_reason = 0; 147 ppsc->rfoff_reason = 0;
148 b_actionallowed = true; 148 actionallowed = true;
149 } 149 }
150 150
151 break; 151 break;
@@ -153,17 +153,17 @@ no_protect:
153 case ERFOFF: 153 case ERFOFF:
154 154
155 if ((changesource == RF_CHANGE_BY_HW) 155 if ((changesource == RF_CHANGE_BY_HW)
156 && (ppsc->b_hwradiooff == false)) { 156 && (ppsc->hwradiooff == false)) {
157 ppsc->b_hwradiooff = true; 157 ppsc->hwradiooff = true;
158 } 158 }
159 159
160 ppsc->rfoff_reason |= changesource; 160 ppsc->rfoff_reason |= changesource;
161 b_actionallowed = true; 161 actionallowed = true;
162 break; 162 break;
163 163
164 case ERFSLEEP: 164 case ERFSLEEP:
165 ppsc->rfoff_reason |= changesource; 165 ppsc->rfoff_reason |= changesource;
166 b_actionallowed = true; 166 actionallowed = true;
167 break; 167 break;
168 168
169 default: 169 default:
@@ -172,7 +172,7 @@ no_protect:
172 break; 172 break;
173 } 173 }
174 174
175 if (b_actionallowed) 175 if (actionallowed)
176 rtlpriv->cfg->ops->set_rf_power_state(hw, state_toset); 176 rtlpriv->cfg->ops->set_rf_power_state(hw, state_toset);
177 177
178 if (!protect_or_not) { 178 if (!protect_or_not) {
@@ -181,7 +181,7 @@ no_protect:
181 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag); 181 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
182 } 182 }
183 183
184 return b_actionallowed; 184 return actionallowed;
185} 185}
186EXPORT_SYMBOL(rtl_ps_set_rf_state); 186EXPORT_SYMBOL(rtl_ps_set_rf_state);
187 187
@@ -191,7 +191,7 @@ static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw)
191 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 191 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
192 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 192 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
193 193
194 ppsc->b_swrf_processing = true; 194 ppsc->swrf_processing = true;
195 195
196 if (ppsc->inactive_pwrstate == ERFON && rtlhal->interface == INTF_PCI) { 196 if (ppsc->inactive_pwrstate == ERFON && rtlhal->interface == INTF_PCI) {
197 if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) && 197 if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
@@ -213,7 +213,7 @@ static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw)
213 } 213 }
214 } 214 }
215 215
216 ppsc->b_swrf_processing = false; 216 ppsc->swrf_processing = false;
217} 217}
218 218
219void rtl_ips_nic_off_wq_callback(void *data) 219void rtl_ips_nic_off_wq_callback(void *data)
@@ -239,13 +239,13 @@ void rtl_ips_nic_off_wq_callback(void *data)
239 if (rtlpriv->sec.being_setkey) 239 if (rtlpriv->sec.being_setkey)
240 return; 240 return;
241 241
242 if (ppsc->b_inactiveps) { 242 if (ppsc->inactiveps) {
243 rtstate = ppsc->rfpwr_state; 243 rtstate = ppsc->rfpwr_state;
244 244
245 /* 245 /*
246 *Do not enter IPS in the following conditions: 246 *Do not enter IPS in the following conditions:
247 *(1) RF is already OFF or Sleep 247 *(1) RF is already OFF or Sleep
248 *(2) b_swrf_processing (indicates the IPS is still under going) 248 *(2) swrf_processing (indicates the IPS is still under going)
249 *(3) Connectted (only disconnected can trigger IPS) 249 *(3) Connectted (only disconnected can trigger IPS)
250 *(4) IBSS (send Beacon) 250 *(4) IBSS (send Beacon)
251 *(5) AP mode (send Beacon) 251 *(5) AP mode (send Beacon)
@@ -253,14 +253,14 @@ void rtl_ips_nic_off_wq_callback(void *data)
253 */ 253 */
254 254
255 if (rtstate == ERFON && 255 if (rtstate == ERFON &&
256 !ppsc->b_swrf_processing && 256 !ppsc->swrf_processing &&
257 (mac->link_state == MAC80211_NOLINK) && 257 (mac->link_state == MAC80211_NOLINK) &&
258 !mac->act_scanning) { 258 !mac->act_scanning) {
259 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, 259 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
260 ("IPSEnter(): Turn off RF.\n")); 260 ("IPSEnter(): Turn off RF.\n"));
261 261
262 ppsc->inactive_pwrstate = ERFOFF; 262 ppsc->inactive_pwrstate = ERFOFF;
263 ppsc->b_in_powersavemode = true; 263 ppsc->in_powersavemode = true;
264 264
265 /*rtl_pci_reset_trx_ring(hw); */ 265 /*rtl_pci_reset_trx_ring(hw); */
266 _rtl_ps_inactive_ps(hw); 266 _rtl_ps_inactive_ps(hw);
@@ -290,15 +290,15 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
290 290
291 spin_lock_irqsave(&rtlpriv->locks.ips_lock, flags); 291 spin_lock_irqsave(&rtlpriv->locks.ips_lock, flags);
292 292
293 if (ppsc->b_inactiveps) { 293 if (ppsc->inactiveps) {
294 rtstate = ppsc->rfpwr_state; 294 rtstate = ppsc->rfpwr_state;
295 295
296 if (rtstate != ERFON && 296 if (rtstate != ERFON &&
297 !ppsc->b_swrf_processing && 297 !ppsc->swrf_processing &&
298 ppsc->rfoff_reason <= RF_CHANGE_BY_IPS) { 298 ppsc->rfoff_reason <= RF_CHANGE_BY_IPS) {
299 299
300 ppsc->inactive_pwrstate = ERFON; 300 ppsc->inactive_pwrstate = ERFON;
301 ppsc->b_in_powersavemode = false; 301 ppsc->in_powersavemode = false;
302 302
303 _rtl_ps_inactive_ps(hw); 303 _rtl_ps_inactive_ps(hw);
304 } 304 }
@@ -370,9 +370,9 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
370 * mode and set RPWM to turn RF on. 370 * mode and set RPWM to turn RF on.
371 */ 371 */
372 372
373 if ((ppsc->b_fwctrl_lps) && (ppsc->b_leisure_ps) && 373 if ((ppsc->fwctrl_lps) && (ppsc->leisure_ps) &&
374 ppsc->report_linked) { 374 ppsc->report_linked) {
375 bool b_fw_current_inps; 375 bool fw_current_inps;
376 if (ppsc->dot11_psmode == EACTIVE) { 376 if (ppsc->dot11_psmode == EACTIVE) {
377 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, 377 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
378 ("FW LPS leave ps_mode:%x\n", 378 ("FW LPS leave ps_mode:%x\n",
@@ -385,11 +385,11 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
385 rtlpriv->cfg->ops->set_hw_reg(hw, 385 rtlpriv->cfg->ops->set_hw_reg(hw,
386 HW_VAR_H2C_FW_PWRMODE, 386 HW_VAR_H2C_FW_PWRMODE,
387 (u8 *) (&fw_pwrmode)); 387 (u8 *) (&fw_pwrmode));
388 b_fw_current_inps = false; 388 fw_current_inps = false;
389 389
390 rtlpriv->cfg->ops->set_hw_reg(hw, 390 rtlpriv->cfg->ops->set_hw_reg(hw,
391 HW_VAR_FW_PSMODE_STATUS, 391 HW_VAR_FW_PSMODE_STATUS,
392 (u8 *) (&b_fw_current_inps)); 392 (u8 *) (&fw_current_inps));
393 393
394 } else { 394 } else {
395 if (rtl_get_fwlps_doze(hw)) { 395 if (rtl_get_fwlps_doze(hw)) {
@@ -398,10 +398,10 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
398 ppsc->fwctrl_psmode)); 398 ppsc->fwctrl_psmode));
399 399
400 rpwm_val = 0x02; /* RF off */ 400 rpwm_val = 0x02; /* RF off */
401 b_fw_current_inps = true; 401 fw_current_inps = true;
402 rtlpriv->cfg->ops->set_hw_reg(hw, 402 rtlpriv->cfg->ops->set_hw_reg(hw,
403 HW_VAR_FW_PSMODE_STATUS, 403 HW_VAR_FW_PSMODE_STATUS,
404 (u8 *) (&b_fw_current_inps)); 404 (u8 *) (&fw_current_inps));
405 rtlpriv->cfg->ops->set_hw_reg(hw, 405 rtlpriv->cfg->ops->set_hw_reg(hw,
406 HW_VAR_H2C_FW_PWRMODE, 406 HW_VAR_H2C_FW_PWRMODE,
407 (u8 *) (&ppsc->fwctrl_psmode)); 407 (u8 *) (&ppsc->fwctrl_psmode));
@@ -425,13 +425,13 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
425 struct rtl_priv *rtlpriv = rtl_priv(hw); 425 struct rtl_priv *rtlpriv = rtl_priv(hw);
426 unsigned long flag; 426 unsigned long flag;
427 427
428 if (!(ppsc->b_fwctrl_lps && ppsc->b_leisure_ps)) 428 if (!(ppsc->fwctrl_lps && ppsc->leisure_ps))
429 return; 429 return;
430 430
431 if (rtlpriv->sec.being_setkey) 431 if (rtlpriv->sec.being_setkey)
432 return; 432 return;
433 433
434 if (rtlpriv->link_info.b_busytraffic) 434 if (rtlpriv->link_info.busytraffic)
435 return; 435 return;
436 436
437 /*sleep after linked 10s, to let DHCP and 4-way handshake ok enough!! */ 437 /*sleep after linked 10s, to let DHCP and 4-way handshake ok enough!! */
@@ -446,7 +446,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
446 446
447 spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag); 447 spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
448 448
449 if (ppsc->b_leisure_ps) { 449 if (ppsc->leisure_ps) {
450 /* Idle for a while if we connect to AP a while ago. */ 450 /* Idle for a while if we connect to AP a while ago. */
451 if (mac->cnt_after_linked >= 2) { 451 if (mac->cnt_after_linked >= 2) {
452 if (ppsc->dot11_psmode == EACTIVE) { 452 if (ppsc->dot11_psmode == EACTIVE) {
@@ -470,7 +470,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
470 470
471 spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag); 471 spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
472 472
473 if (ppsc->b_fwctrl_lps && ppsc->b_leisure_ps) { 473 if (ppsc->fwctrl_lps && ppsc->leisure_ps) {
474 if (ppsc->dot11_psmode != EACTIVE) { 474 if (ppsc->dot11_psmode != EACTIVE) {
475 475
476 /*FIX ME */ 476 /*FIX ME */
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/Makefile b/drivers/net/wireless/rtlwifi/rtl8192c/Makefile
new file mode 100644
index 000000000000..aee42d7ae8a2
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/Makefile
@@ -0,0 +1,9 @@
1rtl8192c-common-objs := \
2 main.o \
3 dm_common.o \
4 fw_common.o \
5 phy_common.o
6
7obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c-common.o
8
9ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
new file mode 100644
index 000000000000..bb023274414c
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -0,0 +1,1398 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "dm_common.h"
31
32struct dig_t dm_digtable;
33static struct ps_t dm_pstable;
34
35static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = {
36 0x7f8001fe,
37 0x788001e2,
38 0x71c001c7,
39 0x6b8001ae,
40 0x65400195,
41 0x5fc0017f,
42 0x5a400169,
43 0x55400155,
44 0x50800142,
45 0x4c000130,
46 0x47c0011f,
47 0x43c0010f,
48 0x40000100,
49 0x3c8000f2,
50 0x390000e4,
51 0x35c000d7,
52 0x32c000cb,
53 0x300000c0,
54 0x2d4000b5,
55 0x2ac000ab,
56 0x288000a2,
57 0x26000098,
58 0x24000090,
59 0x22000088,
60 0x20000080,
61 0x1e400079,
62 0x1c800072,
63 0x1b00006c,
64 0x19800066,
65 0x18000060,
66 0x16c0005b,
67 0x15800056,
68 0x14400051,
69 0x1300004c,
70 0x12000048,
71 0x11000044,
72 0x10000040,
73};
74
75static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
76 {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04},
77 {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04},
78 {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03},
79 {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03},
80 {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03},
81 {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03},
82 {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03},
83 {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03},
84 {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02},
85 {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02},
86 {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02},
87 {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02},
88 {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02},
89 {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02},
90 {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02},
91 {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02},
92 {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01},
93 {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02},
94 {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01},
95 {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
96 {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
97 {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01},
98 {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01},
99 {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01},
100 {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01},
101 {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01},
102 {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01},
103 {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01},
104 {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01},
105 {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01},
106 {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01},
107 {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01},
108 {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}
109};
110
111static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
112 {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00},
113 {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00},
114 {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00},
115 {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00},
116 {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00},
117 {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00},
118 {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00},
119 {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00},
120 {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00},
121 {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00},
122 {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00},
123 {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00},
124 {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00},
125 {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00},
126 {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00},
127 {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00},
128 {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00},
129 {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00},
130 {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00},
131 {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
132 {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
133 {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00},
134 {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00},
135 {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
136 {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
137 {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00},
138 {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
139 {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
140 {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
141 {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
142 {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
143 {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
144 {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}
145};
146
147static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
148{
149 dm_digtable.dig_enable_flag = true;
150 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
151 dm_digtable.cur_igvalue = 0x20;
152 dm_digtable.pre_igvalue = 0x0;
153 dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
154 dm_digtable.presta_connectstate = DIG_STA_DISCONNECT;
155 dm_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
156 dm_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW;
157 dm_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH;
158 dm_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
159 dm_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
160 dm_digtable.rx_gain_range_max = DM_DIG_MAX;
161 dm_digtable.rx_gain_range_min = DM_DIG_MIN;
162 dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
163 dm_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX;
164 dm_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN;
165 dm_digtable.pre_cck_pd_state = CCK_PD_STAGE_MAX;
166 dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
167}
168
169static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
170{
171 struct rtl_priv *rtlpriv = rtl_priv(hw);
172 long rssi_val_min = 0;
173
174 if ((dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) &&
175 (dm_digtable.cursta_connectctate == DIG_STA_CONNECT)) {
176 if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0)
177 rssi_val_min =
178 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb >
179 rtlpriv->dm.undecorated_smoothed_pwdb) ?
180 rtlpriv->dm.undecorated_smoothed_pwdb :
181 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
182 else
183 rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
184 } else if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT ||
185 dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT) {
186 rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
187 } else if (dm_digtable.curmultista_connectstate ==
188 DIG_MULTISTA_CONNECT) {
189 rssi_val_min = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
190 }
191
192 return (u8) rssi_val_min;
193}
194
195static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
196{
197 u32 ret_value;
198 struct rtl_priv *rtlpriv = rtl_priv(hw);
199 struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
200
201 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD);
202 falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16);
203
204 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD);
205 falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff);
206 falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16);
207
208 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
209 falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
210 falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
211 falsealm_cnt->cnt_rate_illegal +
212 falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail;
213
214 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(14), 1);
215 ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0);
216 falsealm_cnt->cnt_cck_fail = ret_value;
217
218 ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3);
219 falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
220 falsealm_cnt->cnt_all = (falsealm_cnt->cnt_parity_fail +
221 falsealm_cnt->cnt_rate_illegal +
222 falsealm_cnt->cnt_crc8_fail +
223 falsealm_cnt->cnt_mcs_fail +
224 falsealm_cnt->cnt_cck_fail);
225
226 rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1);
227 rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0);
228 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0);
229 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
230
231 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
232 ("cnt_parity_fail = %d, cnt_rate_illegal = %d, "
233 "cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
234 falsealm_cnt->cnt_parity_fail,
235 falsealm_cnt->cnt_rate_illegal,
236 falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail));
237
238 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
239 ("cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
240 falsealm_cnt->cnt_ofdm_fail,
241 falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all));
242}
243
244static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
245{
246 struct rtl_priv *rtlpriv = rtl_priv(hw);
247 u8 value_igi = dm_digtable.cur_igvalue;
248
249 if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0)
250 value_igi--;
251 else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH1)
252 value_igi += 0;
253 else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH2)
254 value_igi++;
255 else if (rtlpriv->falsealm_cnt.cnt_all >= DM_DIG_FA_TH2)
256 value_igi += 2;
257 if (value_igi > DM_DIG_FA_UPPER)
258 value_igi = DM_DIG_FA_UPPER;
259 else if (value_igi < DM_DIG_FA_LOWER)
260 value_igi = DM_DIG_FA_LOWER;
261 if (rtlpriv->falsealm_cnt.cnt_all > 10000)
262 value_igi = 0x32;
263
264 dm_digtable.cur_igvalue = value_igi;
265 rtl92c_dm_write_dig(hw);
266}
267
268static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
269{
270 struct rtl_priv *rtlpriv = rtl_priv(hw);
271
272 if (rtlpriv->falsealm_cnt.cnt_all > dm_digtable.fa_highthresh) {
273 if ((dm_digtable.backoff_val - 2) <
274 dm_digtable.backoff_val_range_min)
275 dm_digtable.backoff_val =
276 dm_digtable.backoff_val_range_min;
277 else
278 dm_digtable.backoff_val -= 2;
279 } else if (rtlpriv->falsealm_cnt.cnt_all < dm_digtable.fa_lowthresh) {
280 if ((dm_digtable.backoff_val + 2) >
281 dm_digtable.backoff_val_range_max)
282 dm_digtable.backoff_val =
283 dm_digtable.backoff_val_range_max;
284 else
285 dm_digtable.backoff_val += 2;
286 }
287
288 if ((dm_digtable.rssi_val_min + 10 - dm_digtable.backoff_val) >
289 dm_digtable.rx_gain_range_max)
290 dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_max;
291 else if ((dm_digtable.rssi_val_min + 10 -
292 dm_digtable.backoff_val) < dm_digtable.rx_gain_range_min)
293 dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_min;
294 else
295 dm_digtable.cur_igvalue = dm_digtable.rssi_val_min + 10 -
296 dm_digtable.backoff_val;
297
298 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
299 ("rssi_val_min = %x backoff_val %x\n",
300 dm_digtable.rssi_val_min, dm_digtable.backoff_val));
301
302 rtl92c_dm_write_dig(hw);
303}
304
305static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
306{
307 static u8 binitialized; /* initialized to false */
308 struct rtl_priv *rtlpriv = rtl_priv(hw);
309 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
310 long rssi_strength = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
311 bool multi_sta = false;
312
313 if (mac->opmode == NL80211_IFTYPE_ADHOC)
314 multi_sta = true;
315
316 if ((multi_sta == false) || (dm_digtable.cursta_connectctate !=
317 DIG_STA_DISCONNECT)) {
318 binitialized = false;
319 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
320 return;
321 } else if (binitialized == false) {
322 binitialized = true;
323 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
324 dm_digtable.cur_igvalue = 0x20;
325 rtl92c_dm_write_dig(hw);
326 }
327
328 if (dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) {
329 if ((rssi_strength < dm_digtable.rssi_lowthresh) &&
330 (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) {
331
332 if (dm_digtable.dig_ext_port_stage ==
333 DIG_EXT_PORT_STAGE_2) {
334 dm_digtable.cur_igvalue = 0x20;
335 rtl92c_dm_write_dig(hw);
336 }
337
338 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_1;
339 } else if (rssi_strength > dm_digtable.rssi_highthresh) {
340 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_2;
341 rtl92c_dm_ctrl_initgain_by_fa(hw);
342 }
343 } else if (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_0) {
344 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
345 dm_digtable.cur_igvalue = 0x20;
346 rtl92c_dm_write_dig(hw);
347 }
348
349 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
350 ("curmultista_connectstate = "
351 "%x dig_ext_port_stage %x\n",
352 dm_digtable.curmultista_connectstate,
353 dm_digtable.dig_ext_port_stage));
354}
355
356static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
357{
358 struct rtl_priv *rtlpriv = rtl_priv(hw);
359
360 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
361 ("presta_connectstate = %x,"
362 " cursta_connectctate = %x\n",
363 dm_digtable.presta_connectstate,
364 dm_digtable.cursta_connectctate));
365
366 if (dm_digtable.presta_connectstate == dm_digtable.cursta_connectctate
367 || dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT
368 || dm_digtable.cursta_connectctate == DIG_STA_CONNECT) {
369
370 if (dm_digtable.cursta_connectctate != DIG_STA_DISCONNECT) {
371 dm_digtable.rssi_val_min =
372 rtl92c_dm_initial_gain_min_pwdb(hw);
373 rtl92c_dm_ctrl_initgain_by_rssi(hw);
374 }
375 } else {
376 dm_digtable.rssi_val_min = 0;
377 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
378 dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
379 dm_digtable.cur_igvalue = 0x20;
380 dm_digtable.pre_igvalue = 0;
381 rtl92c_dm_write_dig(hw);
382 }
383}
384
385static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
386{
387 struct rtl_priv *rtlpriv = rtl_priv(hw);
388 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
389
390 if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT) {
391 dm_digtable.rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw);
392
393 if (dm_digtable.pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
394 if (dm_digtable.rssi_val_min <= 25)
395 dm_digtable.cur_cck_pd_state =
396 CCK_PD_STAGE_LowRssi;
397 else
398 dm_digtable.cur_cck_pd_state =
399 CCK_PD_STAGE_HighRssi;
400 } else {
401 if (dm_digtable.rssi_val_min <= 20)
402 dm_digtable.cur_cck_pd_state =
403 CCK_PD_STAGE_LowRssi;
404 else
405 dm_digtable.cur_cck_pd_state =
406 CCK_PD_STAGE_HighRssi;
407 }
408 } else {
409 dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
410 }
411
412 if (dm_digtable.pre_cck_pd_state != dm_digtable.cur_cck_pd_state) {
413 if (dm_digtable.cur_cck_pd_state == CCK_PD_STAGE_LowRssi) {
414 if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800)
415 dm_digtable.cur_cck_fa_state =
416 CCK_FA_STAGE_High;
417 else
418 dm_digtable.cur_cck_fa_state = CCK_FA_STAGE_Low;
419
420 if (dm_digtable.pre_cck_fa_state !=
421 dm_digtable.cur_cck_fa_state) {
422 if (dm_digtable.cur_cck_fa_state ==
423 CCK_FA_STAGE_Low)
424 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
425 0x83);
426 else
427 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
428 0xcd);
429
430 dm_digtable.pre_cck_fa_state =
431 dm_digtable.cur_cck_fa_state;
432 }
433
434 rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x40);
435
436 if (IS_92C_SERIAL(rtlhal->version))
437 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
438 MASKBYTE2, 0xd7);
439 } else {
440 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
441 rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x47);
442
443 if (IS_92C_SERIAL(rtlhal->version))
444 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
445 MASKBYTE2, 0xd3);
446 }
447 dm_digtable.pre_cck_pd_state = dm_digtable.cur_cck_pd_state;
448 }
449
450 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
451 ("CCKPDStage=%x\n", dm_digtable.cur_cck_pd_state));
452
453 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
454 ("is92C=%x\n", IS_92C_SERIAL(rtlhal->version)));
455}
456
457static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
458{
459 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
460
461 if (mac->act_scanning == true)
462 return;
463
464 if ((mac->link_state > MAC80211_NOLINK) &&
465 (mac->link_state < MAC80211_LINKED))
466 dm_digtable.cursta_connectctate = DIG_STA_BEFORE_CONNECT;
467 else if (mac->link_state >= MAC80211_LINKED)
468 dm_digtable.cursta_connectctate = DIG_STA_CONNECT;
469 else
470 dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
471
472 rtl92c_dm_initial_gain_sta(hw);
473 rtl92c_dm_initial_gain_multi_sta(hw);
474 rtl92c_dm_cck_packet_detection_thresh(hw);
475
476 dm_digtable.presta_connectstate = dm_digtable.cursta_connectctate;
477
478}
479
480static void rtl92c_dm_dig(struct ieee80211_hw *hw)
481{
482 struct rtl_priv *rtlpriv = rtl_priv(hw);
483
484 if (rtlpriv->dm.dm_initialgain_enable == false)
485 return;
486 if (dm_digtable.dig_enable_flag == false)
487 return;
488
489 rtl92c_dm_ctrl_initgain_by_twoport(hw);
490
491}
492
493static void rtl92c_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
494{
495 struct rtl_priv *rtlpriv = rtl_priv(hw);
496
497 rtlpriv->dm.dynamic_txpower_enable = false;
498
499 rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
500 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
501}
502
503void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
504{
505 struct rtl_priv *rtlpriv = rtl_priv(hw);
506
507 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
508 ("cur_igvalue = 0x%x, "
509 "pre_igvalue = 0x%x, backoff_val = %d\n",
510 dm_digtable.cur_igvalue, dm_digtable.pre_igvalue,
511 dm_digtable.backoff_val));
512
513 if (dm_digtable.pre_igvalue != dm_digtable.cur_igvalue) {
514 rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
515 dm_digtable.cur_igvalue);
516 rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f,
517 dm_digtable.cur_igvalue);
518
519 dm_digtable.pre_igvalue = dm_digtable.cur_igvalue;
520 }
521}
522EXPORT_SYMBOL(rtl92c_dm_write_dig);
523
524static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw)
525{
526 struct rtl_priv *rtlpriv = rtl_priv(hw);
527 long tmpentry_max_pwdb = 0, tmpentry_min_pwdb = 0xff;
528
529 u8 h2c_parameter[3] = { 0 };
530
531 return;
532
533 if (tmpentry_max_pwdb != 0) {
534 rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb =
535 tmpentry_max_pwdb;
536 } else {
537 rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = 0;
538 }
539
540 if (tmpentry_min_pwdb != 0xff) {
541 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb =
542 tmpentry_min_pwdb;
543 } else {
544 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = 0;
545 }
546
547 h2c_parameter[2] = (u8) (rtlpriv->dm.undecorated_smoothed_pwdb & 0xFF);
548 h2c_parameter[0] = 0;
549
550 rtl92c_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
551}
552
553void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw)
554{
555 struct rtl_priv *rtlpriv = rtl_priv(hw);
556 rtlpriv->dm.current_turbo_edca = false;
557 rtlpriv->dm.is_any_nonbepkts = false;
558 rtlpriv->dm.is_cur_rdlstate = false;
559}
560EXPORT_SYMBOL(rtl92c_dm_init_edca_turbo);
561
562static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
563{
564 struct rtl_priv *rtlpriv = rtl_priv(hw);
565 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
566 static u64 last_txok_cnt;
567 static u64 last_rxok_cnt;
568 u64 cur_txok_cnt;
569 u64 cur_rxok_cnt;
570 u32 edca_be_ul = 0x5ea42b;
571 u32 edca_be_dl = 0x5ea42b;
572
573 if (mac->opmode == NL80211_IFTYPE_ADHOC)
574 goto dm_checkedcaturbo_exit;
575
576 if (mac->link_state != MAC80211_LINKED) {
577 rtlpriv->dm.current_turbo_edca = false;
578 return;
579 }
580
581 if (!mac->ht_enable) { /*FIX MERGE */
582 if (!(edca_be_ul & 0xffff0000))
583 edca_be_ul |= 0x005e0000;
584
585 if (!(edca_be_dl & 0xffff0000))
586 edca_be_dl |= 0x005e0000;
587 }
588
589 if ((!rtlpriv->dm.is_any_nonbepkts) &&
590 (!rtlpriv->dm.disable_framebursting)) {
591 cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
592 cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
593 if (cur_rxok_cnt > 4 * cur_txok_cnt) {
594 if (!rtlpriv->dm.is_cur_rdlstate ||
595 !rtlpriv->dm.current_turbo_edca) {
596 rtl_write_dword(rtlpriv,
597 REG_EDCA_BE_PARAM,
598 edca_be_dl);
599 rtlpriv->dm.is_cur_rdlstate = true;
600 }
601 } else {
602 if (rtlpriv->dm.is_cur_rdlstate ||
603 !rtlpriv->dm.current_turbo_edca) {
604 rtl_write_dword(rtlpriv,
605 REG_EDCA_BE_PARAM,
606 edca_be_ul);
607 rtlpriv->dm.is_cur_rdlstate = false;
608 }
609 }
610 rtlpriv->dm.current_turbo_edca = true;
611 } else {
612 if (rtlpriv->dm.current_turbo_edca) {
613 u8 tmp = AC0_BE;
614 rtlpriv->cfg->ops->set_hw_reg(hw,
615 HW_VAR_AC_PARAM,
616 (u8 *) (&tmp));
617 rtlpriv->dm.current_turbo_edca = false;
618 }
619 }
620
621dm_checkedcaturbo_exit:
622 rtlpriv->dm.is_any_nonbepkts = false;
623 last_txok_cnt = rtlpriv->stats.txbytesunicast;
624 last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
625}
626
627static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
628 *hw)
629{
630 struct rtl_priv *rtlpriv = rtl_priv(hw);
631 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
632 struct rtl_phy *rtlphy = &(rtlpriv->phy);
633 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
634 u8 thermalvalue, delta, delta_lck, delta_iqk;
635 long ele_a, ele_d, temp_cck, val_x, value32;
636 long val_y, ele_c;
637 u8 ofdm_index[2], cck_index, ofdm_index_old[2], cck_index_old;
638 int i;
639 bool is2t = IS_92C_SERIAL(rtlhal->version);
640 u8 txpwr_level[2] = {0, 0};
641 u8 ofdm_min_index = 6, rf;
642
643 rtlpriv->dm.txpower_trackingInit = true;
644 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
645 ("rtl92c_dm_txpower_tracking_callback_thermalmeter\n"));
646
647 thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0x1f);
648
649 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
650 ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
651 "eeprom_thermalmeter 0x%x\n",
652 thermalvalue, rtlpriv->dm.thermalvalue,
653 rtlefuse->eeprom_thermalmeter));
654
655 rtl92c_phy_ap_calibrate(hw, (thermalvalue -
656 rtlefuse->eeprom_thermalmeter));
657 if (is2t)
658 rf = 2;
659 else
660 rf = 1;
661
662 if (thermalvalue) {
663 ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
664 MASKDWORD) & MASKOFDM_D;
665
666 for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
667 if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
668 ofdm_index_old[0] = (u8) i;
669
670 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
671 ("Initial pathA ele_d reg0x%x = 0x%lx, "
672 "ofdm_index=0x%x\n",
673 ROFDM0_XATXIQIMBALANCE,
674 ele_d, ofdm_index_old[0]));
675 break;
676 }
677 }
678
679 if (is2t) {
680 ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
681 MASKDWORD) & MASKOFDM_D;
682
683 for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
684 if (ele_d == (ofdmswing_table[i] &
685 MASKOFDM_D)) {
686 ofdm_index_old[1] = (u8) i;
687
688 RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
689 DBG_LOUD,
690 ("Initial pathB ele_d reg0x%x = "
691 "0x%lx, ofdm_index=0x%x\n",
692 ROFDM0_XBTXIQIMBALANCE, ele_d,
693 ofdm_index_old[1]));
694 break;
695 }
696 }
697 }
698
699 temp_cck =
700 rtl_get_bbreg(hw, RCCK0_TXFILTER2, MASKDWORD) & MASKCCK;
701
702 for (i = 0; i < CCK_TABLE_LENGTH; i++) {
703 if (rtlpriv->dm.cck_inch14) {
704 if (memcmp((void *)&temp_cck,
705 (void *)&cckswing_table_ch14[i][2],
706 4) == 0) {
707 cck_index_old = (u8) i;
708
709 RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
710 DBG_LOUD,
711 ("Initial reg0x%x = 0x%lx, "
712 "cck_index=0x%x, ch 14 %d\n",
713 RCCK0_TXFILTER2, temp_cck,
714 cck_index_old,
715 rtlpriv->dm.cck_inch14));
716 break;
717 }
718 } else {
719 if (memcmp((void *)&temp_cck,
720 (void *)
721 &cckswing_table_ch1ch13[i][2],
722 4) == 0) {
723 cck_index_old = (u8) i;
724
725 RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
726 DBG_LOUD,
727 ("Initial reg0x%x = 0x%lx, "
728 "cck_index=0x%x, ch14 %d\n",
729 RCCK0_TXFILTER2, temp_cck,
730 cck_index_old,
731 rtlpriv->dm.cck_inch14));
732 break;
733 }
734 }
735 }
736
737 if (!rtlpriv->dm.thermalvalue) {
738 rtlpriv->dm.thermalvalue =
739 rtlefuse->eeprom_thermalmeter;
740 rtlpriv->dm.thermalvalue_lck = thermalvalue;
741 rtlpriv->dm.thermalvalue_iqk = thermalvalue;
742 for (i = 0; i < rf; i++)
743 rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
744 rtlpriv->dm.cck_index = cck_index_old;
745 }
746
747 delta = (thermalvalue > rtlpriv->dm.thermalvalue) ?
748 (thermalvalue - rtlpriv->dm.thermalvalue) :
749 (rtlpriv->dm.thermalvalue - thermalvalue);
750
751 delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ?
752 (thermalvalue - rtlpriv->dm.thermalvalue_lck) :
753 (rtlpriv->dm.thermalvalue_lck - thermalvalue);
754
755 delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ?
756 (thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
757 (rtlpriv->dm.thermalvalue_iqk - thermalvalue);
758
759 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
760 ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
761 "eeprom_thermalmeter 0x%x delta 0x%x "
762 "delta_lck 0x%x delta_iqk 0x%x\n",
763 thermalvalue, rtlpriv->dm.thermalvalue,
764 rtlefuse->eeprom_thermalmeter, delta, delta_lck,
765 delta_iqk));
766
767 if (delta_lck > 1) {
768 rtlpriv->dm.thermalvalue_lck = thermalvalue;
769 rtl92c_phy_lc_calibrate(hw);
770 }
771
772 if (delta > 0 && rtlpriv->dm.txpower_track_control) {
773 if (thermalvalue > rtlpriv->dm.thermalvalue) {
774 for (i = 0; i < rf; i++)
775 rtlpriv->dm.ofdm_index[i] -= delta;
776 rtlpriv->dm.cck_index -= delta;
777 } else {
778 for (i = 0; i < rf; i++)
779 rtlpriv->dm.ofdm_index[i] += delta;
780 rtlpriv->dm.cck_index += delta;
781 }
782
783 if (is2t) {
784 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
785 ("temp OFDM_A_index=0x%x, "
786 "OFDM_B_index=0x%x,"
787 "cck_index=0x%x\n",
788 rtlpriv->dm.ofdm_index[0],
789 rtlpriv->dm.ofdm_index[1],
790 rtlpriv->dm.cck_index));
791 } else {
792 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
793 ("temp OFDM_A_index=0x%x,"
794 "cck_index=0x%x\n",
795 rtlpriv->dm.ofdm_index[0],
796 rtlpriv->dm.cck_index));
797 }
798
799 if (thermalvalue > rtlefuse->eeprom_thermalmeter) {
800 for (i = 0; i < rf; i++)
801 ofdm_index[i] =
802 rtlpriv->dm.ofdm_index[i]
803 + 1;
804 cck_index = rtlpriv->dm.cck_index + 1;
805 } else {
806 for (i = 0; i < rf; i++)
807 ofdm_index[i] =
808 rtlpriv->dm.ofdm_index[i];
809 cck_index = rtlpriv->dm.cck_index;
810 }
811
812 for (i = 0; i < rf; i++) {
813 if (txpwr_level[i] >= 0 &&
814 txpwr_level[i] <= 26) {
815 if (thermalvalue >
816 rtlefuse->eeprom_thermalmeter) {
817 if (delta < 5)
818 ofdm_index[i] -= 1;
819
820 else
821 ofdm_index[i] -= 2;
822 } else if (delta > 5 && thermalvalue <
823 rtlefuse->
824 eeprom_thermalmeter) {
825 ofdm_index[i] += 1;
826 }
827 } else if (txpwr_level[i] >= 27 &&
828 txpwr_level[i] <= 32
829 && thermalvalue >
830 rtlefuse->eeprom_thermalmeter) {
831 if (delta < 5)
832 ofdm_index[i] -= 1;
833
834 else
835 ofdm_index[i] -= 2;
836 } else if (txpwr_level[i] >= 32 &&
837 txpwr_level[i] <= 38 &&
838 thermalvalue >
839 rtlefuse->eeprom_thermalmeter
840 && delta > 5) {
841 ofdm_index[i] -= 1;
842 }
843 }
844
845 if (txpwr_level[i] >= 0 && txpwr_level[i] <= 26) {
846 if (thermalvalue >
847 rtlefuse->eeprom_thermalmeter) {
848 if (delta < 5)
849 cck_index -= 1;
850
851 else
852 cck_index -= 2;
853 } else if (delta > 5 && thermalvalue <
854 rtlefuse->eeprom_thermalmeter) {
855 cck_index += 1;
856 }
857 } else if (txpwr_level[i] >= 27 &&
858 txpwr_level[i] <= 32 &&
859 thermalvalue >
860 rtlefuse->eeprom_thermalmeter) {
861 if (delta < 5)
862 cck_index -= 1;
863
864 else
865 cck_index -= 2;
866 } else if (txpwr_level[i] >= 32 &&
867 txpwr_level[i] <= 38 &&
868 thermalvalue > rtlefuse->eeprom_thermalmeter
869 && delta > 5) {
870 cck_index -= 1;
871 }
872
873 for (i = 0; i < rf; i++) {
874 if (ofdm_index[i] > OFDM_TABLE_SIZE - 1)
875 ofdm_index[i] = OFDM_TABLE_SIZE - 1;
876
877 else if (ofdm_index[i] < ofdm_min_index)
878 ofdm_index[i] = ofdm_min_index;
879 }
880
881 if (cck_index > CCK_TABLE_SIZE - 1)
882 cck_index = CCK_TABLE_SIZE - 1;
883 else if (cck_index < 0)
884 cck_index = 0;
885
886 if (is2t) {
887 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
888 ("new OFDM_A_index=0x%x, "
889 "OFDM_B_index=0x%x,"
890 "cck_index=0x%x\n",
891 ofdm_index[0], ofdm_index[1],
892 cck_index));
893 } else {
894 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
895 ("new OFDM_A_index=0x%x,"
896 "cck_index=0x%x\n",
897 ofdm_index[0], cck_index));
898 }
899 }
900
901 if (rtlpriv->dm.txpower_track_control && delta != 0) {
902 ele_d =
903 (ofdmswing_table[ofdm_index[0]] & 0xFFC00000) >> 22;
904 val_x = rtlphy->reg_e94;
905 val_y = rtlphy->reg_e9c;
906
907 if (val_x != 0) {
908 if ((val_x & 0x00000200) != 0)
909 val_x = val_x | 0xFFFFFC00;
910 ele_a = ((val_x * ele_d) >> 8) & 0x000003FF;
911
912 if ((val_y & 0x00000200) != 0)
913 val_y = val_y | 0xFFFFFC00;
914 ele_c = ((val_y * ele_d) >> 8) & 0x000003FF;
915
916 value32 = (ele_d << 22) |
917 ((ele_c & 0x3F) << 16) | ele_a;
918
919 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
920 MASKDWORD, value32);
921
922 value32 = (ele_c & 0x000003C0) >> 6;
923 rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
924 value32);
925
926 value32 = ((val_x * ele_d) >> 7) & 0x01;
927 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
928 BIT(31), value32);
929
930 value32 = ((val_y * ele_d) >> 7) & 0x01;
931 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
932 BIT(29), value32);
933 } else {
934 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
935 MASKDWORD,
936 ofdmswing_table[ofdm_index[0]]);
937
938 rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
939 0x00);
940 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
941 BIT(31) | BIT(29), 0x00);
942 }
943
944 if (!rtlpriv->dm.cck_inch14) {
945 rtl_write_byte(rtlpriv, 0xa22,
946 cckswing_table_ch1ch13[cck_index]
947 [0]);
948 rtl_write_byte(rtlpriv, 0xa23,
949 cckswing_table_ch1ch13[cck_index]
950 [1]);
951 rtl_write_byte(rtlpriv, 0xa24,
952 cckswing_table_ch1ch13[cck_index]
953 [2]);
954 rtl_write_byte(rtlpriv, 0xa25,
955 cckswing_table_ch1ch13[cck_index]
956 [3]);
957 rtl_write_byte(rtlpriv, 0xa26,
958 cckswing_table_ch1ch13[cck_index]
959 [4]);
960 rtl_write_byte(rtlpriv, 0xa27,
961 cckswing_table_ch1ch13[cck_index]
962 [5]);
963 rtl_write_byte(rtlpriv, 0xa28,
964 cckswing_table_ch1ch13[cck_index]
965 [6]);
966 rtl_write_byte(rtlpriv, 0xa29,
967 cckswing_table_ch1ch13[cck_index]
968 [7]);
969 } else {
970 rtl_write_byte(rtlpriv, 0xa22,
971 cckswing_table_ch14[cck_index]
972 [0]);
973 rtl_write_byte(rtlpriv, 0xa23,
974 cckswing_table_ch14[cck_index]
975 [1]);
976 rtl_write_byte(rtlpriv, 0xa24,
977 cckswing_table_ch14[cck_index]
978 [2]);
979 rtl_write_byte(rtlpriv, 0xa25,
980 cckswing_table_ch14[cck_index]
981 [3]);
982 rtl_write_byte(rtlpriv, 0xa26,
983 cckswing_table_ch14[cck_index]
984 [4]);
985 rtl_write_byte(rtlpriv, 0xa27,
986 cckswing_table_ch14[cck_index]
987 [5]);
988 rtl_write_byte(rtlpriv, 0xa28,
989 cckswing_table_ch14[cck_index]
990 [6]);
991 rtl_write_byte(rtlpriv, 0xa29,
992 cckswing_table_ch14[cck_index]
993 [7]);
994 }
995
996 if (is2t) {
997 ele_d = (ofdmswing_table[ofdm_index[1]] &
998 0xFFC00000) >> 22;
999
1000 val_x = rtlphy->reg_eb4;
1001 val_y = rtlphy->reg_ebc;
1002
1003 if (val_x != 0) {
1004 if ((val_x & 0x00000200) != 0)
1005 val_x = val_x | 0xFFFFFC00;
1006 ele_a = ((val_x * ele_d) >> 8) &
1007 0x000003FF;
1008
1009 if ((val_y & 0x00000200) != 0)
1010 val_y = val_y | 0xFFFFFC00;
1011 ele_c = ((val_y * ele_d) >> 8) &
1012 0x00003FF;
1013
1014 value32 = (ele_d << 22) |
1015 ((ele_c & 0x3F) << 16) | ele_a;
1016 rtl_set_bbreg(hw,
1017 ROFDM0_XBTXIQIMBALANCE,
1018 MASKDWORD, value32);
1019
1020 value32 = (ele_c & 0x000003C0) >> 6;
1021 rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
1022 MASKH4BITS, value32);
1023
1024 value32 = ((val_x * ele_d) >> 7) & 0x01;
1025 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
1026 BIT(27), value32);
1027
1028 value32 = ((val_y * ele_d) >> 7) & 0x01;
1029 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
1030 BIT(25), value32);
1031 } else {
1032 rtl_set_bbreg(hw,
1033 ROFDM0_XBTXIQIMBALANCE,
1034 MASKDWORD,
1035 ofdmswing_table[ofdm_index
1036 [1]]);
1037 rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
1038 MASKH4BITS, 0x00);
1039 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
1040 BIT(27) | BIT(25), 0x00);
1041 }
1042
1043 }
1044 }
1045
1046 if (delta_iqk > 3) {
1047 rtlpriv->dm.thermalvalue_iqk = thermalvalue;
1048 rtl92c_phy_iq_calibrate(hw, false);
1049 }
1050
1051 if (rtlpriv->dm.txpower_track_control)
1052 rtlpriv->dm.thermalvalue = thermalvalue;
1053 }
1054
1055 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, ("<===\n"));
1056
1057}
1058
1059static void rtl92c_dm_initialize_txpower_tracking_thermalmeter(
1060 struct ieee80211_hw *hw)
1061{
1062 struct rtl_priv *rtlpriv = rtl_priv(hw);
1063
1064 rtlpriv->dm.txpower_tracking = true;
1065 rtlpriv->dm.txpower_trackingInit = false;
1066
1067 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
1068 ("pMgntInfo->txpower_tracking = %d\n",
1069 rtlpriv->dm.txpower_tracking));
1070}
1071
1072static void rtl92c_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
1073{
1074 rtl92c_dm_initialize_txpower_tracking_thermalmeter(hw);
1075}
1076
1077static void rtl92c_dm_txpower_tracking_directcall(struct ieee80211_hw *hw)
1078{
1079 rtl92c_dm_txpower_tracking_callback_thermalmeter(hw);
1080}
1081
1082static void rtl92c_dm_check_txpower_tracking_thermal_meter(
1083 struct ieee80211_hw *hw)
1084{
1085 struct rtl_priv *rtlpriv = rtl_priv(hw);
1086 static u8 tm_trigger;
1087
1088 if (!rtlpriv->dm.txpower_tracking)
1089 return;
1090
1091 if (!tm_trigger) {
1092 rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, RFREG_OFFSET_MASK,
1093 0x60);
1094 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
1095 ("Trigger 92S Thermal Meter!!\n"));
1096 tm_trigger = 1;
1097 return;
1098 } else {
1099 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
1100 ("Schedule TxPowerTracking direct call!!\n"));
1101 rtl92c_dm_txpower_tracking_directcall(hw);
1102 tm_trigger = 0;
1103 }
1104}
1105
1106void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw)
1107{
1108 rtl92c_dm_check_txpower_tracking_thermal_meter(hw);
1109}
1110EXPORT_SYMBOL(rtl92c_dm_check_txpower_tracking);
1111
1112void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
1113{
1114 struct rtl_priv *rtlpriv = rtl_priv(hw);
1115 struct rate_adaptive *p_ra = &(rtlpriv->ra);
1116
1117 p_ra->ratr_state = DM_RATR_STA_INIT;
1118 p_ra->pre_ratr_state = DM_RATR_STA_INIT;
1119
1120 if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
1121 rtlpriv->dm.useramask = true;
1122 else
1123 rtlpriv->dm.useramask = false;
1124
1125}
1126EXPORT_SYMBOL(rtl92c_dm_init_rate_adaptive_mask);
1127
1128static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
1129{
1130 struct rtl_priv *rtlpriv = rtl_priv(hw);
1131 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1132 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1133 struct rate_adaptive *p_ra = &(rtlpriv->ra);
1134 u32 low_rssithresh_for_ra, high_rssithresh_for_ra;
1135
1136 if (is_hal_stop(rtlhal)) {
1137 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1138 ("<---- driver is going to unload\n"));
1139 return;
1140 }
1141
1142 if (!rtlpriv->dm.useramask) {
1143 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1144 ("<---- driver does not control rate adaptive mask\n"));
1145 return;
1146 }
1147
1148 if (mac->link_state == MAC80211_LINKED) {
1149
1150 switch (p_ra->pre_ratr_state) {
1151 case DM_RATR_STA_HIGH:
1152 high_rssithresh_for_ra = 50;
1153 low_rssithresh_for_ra = 20;
1154 break;
1155 case DM_RATR_STA_MIDDLE:
1156 high_rssithresh_for_ra = 55;
1157 low_rssithresh_for_ra = 20;
1158 break;
1159 case DM_RATR_STA_LOW:
1160 high_rssithresh_for_ra = 50;
1161 low_rssithresh_for_ra = 25;
1162 break;
1163 default:
1164 high_rssithresh_for_ra = 50;
1165 low_rssithresh_for_ra = 20;
1166 break;
1167 }
1168
1169 if (rtlpriv->dm.undecorated_smoothed_pwdb >
1170 (long)high_rssithresh_for_ra)
1171 p_ra->ratr_state = DM_RATR_STA_HIGH;
1172 else if (rtlpriv->dm.undecorated_smoothed_pwdb >
1173 (long)low_rssithresh_for_ra)
1174 p_ra->ratr_state = DM_RATR_STA_MIDDLE;
1175 else
1176 p_ra->ratr_state = DM_RATR_STA_LOW;
1177
1178 if (p_ra->pre_ratr_state != p_ra->ratr_state) {
1179 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1180 ("RSSI = %ld\n",
1181 rtlpriv->dm.undecorated_smoothed_pwdb));
1182 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1183 ("RSSI_LEVEL = %d\n", p_ra->ratr_state));
1184 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1185 ("PreState = %d, CurState = %d\n",
1186 p_ra->pre_ratr_state, p_ra->ratr_state));
1187
1188 rtlpriv->cfg->ops->update_rate_mask(hw,
1189 p_ra->ratr_state);
1190
1191 p_ra->pre_ratr_state = p_ra->ratr_state;
1192 }
1193 }
1194}
1195
1196static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw)
1197{
1198 dm_pstable.pre_ccastate = CCA_MAX;
1199 dm_pstable.cur_ccasate = CCA_MAX;
1200 dm_pstable.pre_rfstate = RF_MAX;
1201 dm_pstable.cur_rfstate = RF_MAX;
1202 dm_pstable.rssi_val_min = 0;
1203}
1204
1205static void rtl92c_dm_1r_cca(struct ieee80211_hw *hw)
1206{
1207 struct rtl_priv *rtlpriv = rtl_priv(hw);
1208 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1209
1210 if (dm_pstable.rssi_val_min != 0) {
1211 if (dm_pstable.pre_ccastate == CCA_2R) {
1212 if (dm_pstable.rssi_val_min >= 35)
1213 dm_pstable.cur_ccasate = CCA_1R;
1214 else
1215 dm_pstable.cur_ccasate = CCA_2R;
1216 } else {
1217 if (dm_pstable.rssi_val_min <= 30)
1218 dm_pstable.cur_ccasate = CCA_2R;
1219 else
1220 dm_pstable.cur_ccasate = CCA_1R;
1221 }
1222 } else {
1223 dm_pstable.cur_ccasate = CCA_MAX;
1224 }
1225
1226 if (dm_pstable.pre_ccastate != dm_pstable.cur_ccasate) {
1227 if (dm_pstable.cur_ccasate == CCA_1R) {
1228 if (get_rf_type(rtlphy) == RF_2T2R) {
1229 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE,
1230 MASKBYTE0, 0x13);
1231 rtl_set_bbreg(hw, 0xe70, MASKBYTE3, 0x20);
1232 } else {
1233 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE,
1234 MASKBYTE0, 0x23);
1235 rtl_set_bbreg(hw, 0xe70, 0x7fc00000, 0x10c);
1236 }
1237 } else {
1238 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0,
1239 0x33);
1240 rtl_set_bbreg(hw, 0xe70, MASKBYTE3, 0x63);
1241 }
1242 dm_pstable.pre_ccastate = dm_pstable.cur_ccasate;
1243 }
1244
1245 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, ("CCAStage = %s\n",
1246 (dm_pstable.cur_ccasate ==
1247 0) ? "1RCCA" : "2RCCA"));
1248}
1249
1250void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
1251{
1252 static u8 initialize;
1253 static u32 reg_874, reg_c70, reg_85c, reg_a74;
1254
1255 if (initialize == 0) {
1256 reg_874 = (rtl_get_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
1257 MASKDWORD) & 0x1CC000) >> 14;
1258
1259 reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1,
1260 MASKDWORD) & BIT(3)) >> 3;
1261
1262 reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
1263 MASKDWORD) & 0xFF000000) >> 24;
1264
1265 reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) & 0xF000) >> 12;
1266
1267 initialize = 1;
1268 }
1269
1270 if (!bforce_in_normal) {
1271 if (dm_pstable.rssi_val_min != 0) {
1272 if (dm_pstable.pre_rfstate == RF_NORMAL) {
1273 if (dm_pstable.rssi_val_min >= 30)
1274 dm_pstable.cur_rfstate = RF_SAVE;
1275 else
1276 dm_pstable.cur_rfstate = RF_NORMAL;
1277 } else {
1278 if (dm_pstable.rssi_val_min <= 25)
1279 dm_pstable.cur_rfstate = RF_NORMAL;
1280 else
1281 dm_pstable.cur_rfstate = RF_SAVE;
1282 }
1283 } else {
1284 dm_pstable.cur_rfstate = RF_MAX;
1285 }
1286 } else {
1287 dm_pstable.cur_rfstate = RF_NORMAL;
1288 }
1289
1290 if (dm_pstable.pre_rfstate != dm_pstable.cur_rfstate) {
1291 if (dm_pstable.cur_rfstate == RF_SAVE) {
1292 rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
1293 0x1C0000, 0x2);
1294 rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), 0);
1295 rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
1296 0xFF000000, 0x63);
1297 rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
1298 0xC000, 0x2);
1299 rtl_set_bbreg(hw, 0xa74, 0xF000, 0x3);
1300 rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
1301 rtl_set_bbreg(hw, 0x818, BIT(28), 0x1);
1302 } else {
1303 rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
1304 0x1CC000, reg_874);
1305 rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3),
1306 reg_c70);
1307 rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, 0xFF000000,
1308 reg_85c);
1309 rtl_set_bbreg(hw, 0xa74, 0xF000, reg_a74);
1310 rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
1311 }
1312
1313 dm_pstable.pre_rfstate = dm_pstable.cur_rfstate;
1314 }
1315}
1316EXPORT_SYMBOL(rtl92c_dm_rf_saving);
1317
1318static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
1319{
1320 struct rtl_priv *rtlpriv = rtl_priv(hw);
1321 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1322 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1323
1324 if (((mac->link_state == MAC80211_NOLINK)) &&
1325 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
1326 dm_pstable.rssi_val_min = 0;
1327 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
1328 ("Not connected to any\n"));
1329 }
1330
1331 if (mac->link_state == MAC80211_LINKED) {
1332 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
1333 dm_pstable.rssi_val_min =
1334 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
1335 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
1336 ("AP Client PWDB = 0x%lx\n",
1337 dm_pstable.rssi_val_min));
1338 } else {
1339 dm_pstable.rssi_val_min =
1340 rtlpriv->dm.undecorated_smoothed_pwdb;
1341 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
1342 ("STA Default Port PWDB = 0x%lx\n",
1343 dm_pstable.rssi_val_min));
1344 }
1345 } else {
1346 dm_pstable.rssi_val_min =
1347 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
1348
1349 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
1350 ("AP Ext Port PWDB = 0x%lx\n",
1351 dm_pstable.rssi_val_min));
1352 }
1353
1354 if (IS_92C_SERIAL(rtlhal->version))
1355 rtl92c_dm_1r_cca(hw);
1356}
1357
1358void rtl92c_dm_init(struct ieee80211_hw *hw)
1359{
1360 struct rtl_priv *rtlpriv = rtl_priv(hw);
1361
1362 rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
1363 rtl92c_dm_diginit(hw);
1364 rtl92c_dm_init_dynamic_txpower(hw);
1365 rtl92c_dm_init_edca_turbo(hw);
1366 rtl92c_dm_init_rate_adaptive_mask(hw);
1367 rtl92c_dm_initialize_txpower_tracking(hw);
1368 rtl92c_dm_init_dynamic_bb_powersaving(hw);
1369}
1370EXPORT_SYMBOL(rtl92c_dm_init);
1371
1372void rtl92c_dm_watchdog(struct ieee80211_hw *hw)
1373{
1374 struct rtl_priv *rtlpriv = rtl_priv(hw);
1375 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1376 bool fw_current_inpsmode = false;
1377 bool fw_ps_awake = true;
1378
1379 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
1380 (u8 *) (&fw_current_inpsmode));
1381 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
1382 (u8 *) (&fw_ps_awake));
1383
1384 if ((ppsc->rfpwr_state == ERFON) && ((!fw_current_inpsmode) &&
1385 fw_ps_awake)
1386 && (!ppsc->rfchange_inprogress)) {
1387 rtl92c_dm_pwdb_monitor(hw);
1388 rtl92c_dm_dig(hw);
1389 rtl92c_dm_false_alarm_counter_statistics(hw);
1390 rtl92c_dm_dynamic_bb_powersaving(hw);
1391 rtlpriv->cfg->ops->dm_dynamic_txpower(hw);
1392 rtl92c_dm_check_txpower_tracking(hw);
1393 rtl92c_dm_refresh_rate_adaptive_mask(hw);
1394 rtl92c_dm_check_edca_turbo(hw);
1395
1396 }
1397}
1398EXPORT_SYMBOL(rtl92c_dm_watchdog);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
new file mode 100644
index 000000000000..b9cbb0a3c03f
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
@@ -0,0 +1,204 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92COMMON_DM_H__
31#define __RTL92COMMON_DM_H__
32
33#include "../wifi.h"
34#include "../rtl8192ce/def.h"
35#include "../rtl8192ce/reg.h"
36#include "fw_common.h"
37
38#define HAL_DM_DIG_DISABLE BIT(0)
39#define HAL_DM_HIPWR_DISABLE BIT(1)
40
41#define OFDM_TABLE_LENGTH 37
42#define CCK_TABLE_LENGTH 33
43
44#define OFDM_TABLE_SIZE 37
45#define CCK_TABLE_SIZE 33
46
47#define BW_AUTO_SWITCH_HIGH_LOW 25
48#define BW_AUTO_SWITCH_LOW_HIGH 30
49
50#define DM_DIG_THRESH_HIGH 40
51#define DM_DIG_THRESH_LOW 35
52
53#define DM_FALSEALARM_THRESH_LOW 400
54#define DM_FALSEALARM_THRESH_HIGH 1000
55
56#define DM_DIG_MAX 0x3e
57#define DM_DIG_MIN 0x1e
58
59#define DM_DIG_FA_UPPER 0x32
60#define DM_DIG_FA_LOWER 0x20
61#define DM_DIG_FA_TH0 0x20
62#define DM_DIG_FA_TH1 0x100
63#define DM_DIG_FA_TH2 0x200
64
65#define DM_DIG_BACKOFF_MAX 12
66#define DM_DIG_BACKOFF_MIN -4
67#define DM_DIG_BACKOFF_DEFAULT 10
68
69#define RXPATHSELECTION_SS_TH_lOW 30
70#define RXPATHSELECTION_DIFF_TH 18
71
72#define DM_RATR_STA_INIT 0
73#define DM_RATR_STA_HIGH 1
74#define DM_RATR_STA_MIDDLE 2
75#define DM_RATR_STA_LOW 3
76
77#define CTS2SELF_THVAL 30
78#define REGC38_TH 20
79
80#define WAIOTTHVal 25
81
82#define TXHIGHPWRLEVEL_NORMAL 0
83#define TXHIGHPWRLEVEL_LEVEL1 1
84#define TXHIGHPWRLEVEL_LEVEL2 2
85#define TXHIGHPWRLEVEL_BT1 3
86#define TXHIGHPWRLEVEL_BT2 4
87
88#define DM_TYPE_BYFW 0
89#define DM_TYPE_BYDRIVER 1
90
91#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
92#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
93
94struct ps_t {
95 u8 pre_ccastate;
96 u8 cur_ccasate;
97 u8 pre_rfstate;
98 u8 cur_rfstate;
99 long rssi_val_min;
100};
101
102struct dig_t {
103 u8 dig_enable_flag;
104 u8 dig_ext_port_stage;
105 u32 rssi_lowthresh;
106 u32 rssi_highthresh;
107 u32 fa_lowthresh;
108 u32 fa_highthresh;
109 u8 cursta_connectctate;
110 u8 presta_connectstate;
111 u8 curmultista_connectstate;
112 u8 pre_igvalue;
113 u8 cur_igvalue;
114 char backoff_val;
115 char backoff_val_range_max;
116 char backoff_val_range_min;
117 u8 rx_gain_range_max;
118 u8 rx_gain_range_min;
119 u8 rssi_val_min;
120 u8 pre_cck_pd_state;
121 u8 cur_cck_pd_state;
122 u8 pre_cck_fa_state;
123 u8 cur_cck_fa_state;
124 u8 pre_ccastate;
125 u8 cur_ccasate;
126};
127
128struct swat_t {
129 u8 failure_cnt;
130 u8 try_flag;
131 u8 stop_trying;
132 long pre_rssi;
133 long trying_threshold;
134 u8 cur_antenna;
135 u8 pre_antenna;
136};
137
138enum tag_dynamic_init_gain_operation_type_definition {
139 DIG_TYPE_THRESH_HIGH = 0,
140 DIG_TYPE_THRESH_LOW = 1,
141 DIG_TYPE_BACKOFF = 2,
142 DIG_TYPE_RX_GAIN_MIN = 3,
143 DIG_TYPE_RX_GAIN_MAX = 4,
144 DIG_TYPE_ENABLE = 5,
145 DIG_TYPE_DISABLE = 6,
146 DIG_OP_TYPE_MAX
147};
148
149enum tag_cck_packet_detection_threshold_type_definition {
150 CCK_PD_STAGE_LowRssi = 0,
151 CCK_PD_STAGE_HighRssi = 1,
152 CCK_FA_STAGE_Low = 2,
153 CCK_FA_STAGE_High = 3,
154 CCK_PD_STAGE_MAX = 4,
155};
156
157enum dm_1r_cca_e {
158 CCA_1R = 0,
159 CCA_2R = 1,
160 CCA_MAX = 2,
161};
162
163enum dm_rf_e {
164 RF_SAVE = 0,
165 RF_NORMAL = 1,
166 RF_MAX = 2,
167};
168
169enum dm_sw_ant_switch_e {
170 ANS_ANTENNA_B = 1,
171 ANS_ANTENNA_A = 2,
172 ANS_ANTENNA_MAX = 3,
173};
174
175enum dm_dig_ext_port_alg_e {
176 DIG_EXT_PORT_STAGE_0 = 0,
177 DIG_EXT_PORT_STAGE_1 = 1,
178 DIG_EXT_PORT_STAGE_2 = 2,
179 DIG_EXT_PORT_STAGE_3 = 3,
180 DIG_EXT_PORT_STAGE_MAX = 4,
181};
182
183enum dm_dig_connect_e {
184 DIG_STA_DISCONNECT = 0,
185 DIG_STA_CONNECT = 1,
186 DIG_STA_BEFORE_CONNECT = 2,
187 DIG_MULTISTA_DISCONNECT = 3,
188 DIG_MULTISTA_CONNECT = 4,
189 DIG_CONNECT_MAX
190};
191
192extern struct dig_t dm_digtable;
193void rtl92c_dm_init(struct ieee80211_hw *hw);
194void rtl92c_dm_watchdog(struct ieee80211_hw *hw);
195void rtl92c_dm_write_dig(struct ieee80211_hw *hw);
196void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw);
197void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw);
198void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
199void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal);
200void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
201void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
202void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery);
203
204#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/fw.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index 11dd22b987e7..5ef91374b230 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -31,10 +31,9 @@
31#include "../wifi.h" 31#include "../wifi.h"
32#include "../pci.h" 32#include "../pci.h"
33#include "../base.h" 33#include "../base.h"
34#include "reg.h" 34#include "../rtl8192ce/reg.h"
35#include "def.h" 35#include "../rtl8192ce/def.h"
36#include "fw.h" 36#include "fw_common.h"
37#include "table.h"
38 37
39static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable) 38static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable)
40{ 39{
@@ -133,17 +132,15 @@ static void _rtl92c_write_fw(struct ieee80211_hw *hw,
133{ 132{
134 struct rtl_priv *rtlpriv = rtl_priv(hw); 133 struct rtl_priv *rtlpriv = rtl_priv(hw);
135 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 134 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
136 bool is_version_b;
137 u8 *bufferPtr = (u8 *) buffer; 135 u8 *bufferPtr = (u8 *) buffer;
138 136
139 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, ("FW size is %d bytes,\n", size)); 137 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, ("FW size is %d bytes,\n", size));
140 138
141 is_version_b = IS_CHIP_VER_B(version); 139 if (IS_CHIP_VER_B(version)) {
142 if (is_version_b) {
143 u32 pageNums, remainSize; 140 u32 pageNums, remainSize;
144 u32 page, offset; 141 u32 page, offset;
145 142
146 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE) 143 if (IS_HARDWARE_TYPE_8192CE(rtlhal))
147 _rtl92c_fill_dummy(bufferPtr, &size); 144 _rtl92c_fill_dummy(bufferPtr, &size);
148 145
149 pageNums = size / FW_8192C_PAGE_SIZE; 146 pageNums = size / FW_8192C_PAGE_SIZE;
@@ -231,14 +228,14 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
231 u32 fwsize; 228 u32 fwsize;
232 int err; 229 int err;
233 enum version_8192c version = rtlhal->version; 230 enum version_8192c version = rtlhal->version;
231 const struct firmware *firmware;
234 232
235 const struct firmware *firmware = NULL; 233 printk(KERN_INFO "rtl8192cu: Loading firmware file %s\n",
236 234 rtlpriv->cfg->fw_name);
237 err = request_firmware(&firmware, rtlpriv->cfg->fw_name, 235 err = request_firmware(&firmware, rtlpriv->cfg->fw_name,
238 rtlpriv->io.dev); 236 rtlpriv->io.dev);
239 if (err) { 237 if (err) {
240 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 238 printk(KERN_ERR "rtl8192cu: Firmware loading failed\n");
241 ("Failed to request firmware!\n"));
242 return 1; 239 return 1;
243 } 240 }
244 241
@@ -281,6 +278,7 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
281 278
282 return 0; 279 return 0;
283} 280}
281EXPORT_SYMBOL(rtl92c_download_fw);
284 282
285static bool _rtl92c_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum) 283static bool _rtl92c_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum)
286{ 284{
@@ -318,12 +316,12 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
318 316
319 while (true) { 317 while (true) {
320 spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); 318 spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
321 if (rtlhal->b_h2c_setinprogress) { 319 if (rtlhal->h2c_setinprogress) {
322 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, 320 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
323 ("H2C set in progress! Wait to set.." 321 ("H2C set in progress! Wait to set.."
324 "element_id(%d).\n", element_id)); 322 "element_id(%d).\n", element_id));
325 323
326 while (rtlhal->b_h2c_setinprogress) { 324 while (rtlhal->h2c_setinprogress) {
327 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, 325 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
328 flag); 326 flag);
329 h2c_waitcounter++; 327 h2c_waitcounter++;
@@ -339,7 +337,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
339 } 337 }
340 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); 338 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
341 } else { 339 } else {
342 rtlhal->b_h2c_setinprogress = true; 340 rtlhal->h2c_setinprogress = true;
343 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); 341 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
344 break; 342 break;
345 } 343 }
@@ -495,7 +493,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
495 } 493 }
496 494
497 spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); 495 spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
498 rtlhal->b_h2c_setinprogress = false; 496 rtlhal->h2c_setinprogress = false;
499 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); 497 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
500 498
501 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("go out\n")); 499 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("go out\n"));
@@ -507,7 +505,7 @@ void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
507 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 505 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
508 u32 tmp_cmdbuf[2]; 506 u32 tmp_cmdbuf[2];
509 507
510 if (rtlhal->bfw_ready == false) { 508 if (rtlhal->fw_ready == false) {
511 RT_ASSERT(false, ("return H2C cmd because of Fw " 509 RT_ASSERT(false, ("return H2C cmd because of Fw "
512 "download fail!!!\n")); 510 "download fail!!!\n"));
513 return; 511 return;
@@ -519,6 +517,7 @@ void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
519 517
520 return; 518 return;
521} 519}
520EXPORT_SYMBOL(rtl92c_fill_h2c_cmd);
522 521
523void rtl92c_firmware_selfreset(struct ieee80211_hw *hw) 522void rtl92c_firmware_selfreset(struct ieee80211_hw *hw)
524{ 523{
@@ -539,6 +538,7 @@ void rtl92c_firmware_selfreset(struct ieee80211_hw *hw)
539 u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); 538 u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
540 } 539 }
541} 540}
541EXPORT_SYMBOL(rtl92c_firmware_selfreset);
542 542
543void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode) 543void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
544{ 544{
@@ -559,39 +559,7 @@ void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
559 rtl92c_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode); 559 rtl92c_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode);
560 560
561} 561}
562 562EXPORT_SYMBOL(rtl92c_set_fw_pwrmode_cmd);
563static bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw,
564 struct sk_buff *skb)
565{
566 struct rtl_priv *rtlpriv = rtl_priv(hw);
567 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
568 struct rtl8192_tx_ring *ring;
569 struct rtl_tx_desc *pdesc;
570 u8 own;
571 unsigned long flags;
572 struct sk_buff *pskb = NULL;
573
574 ring = &rtlpci->tx_ring[BEACON_QUEUE];
575
576 pskb = __skb_dequeue(&ring->queue);
577 if (pskb)
578 kfree_skb(pskb);
579
580 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
581
582 pdesc = &ring->desc[0];
583 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN);
584
585 rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb);
586
587 __skb_queue_tail(&ring->queue, skb);
588
589 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
590
591 rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);
592
593 return true;
594}
595 563
596#define BEACON_PG 0 /*->1*/ 564#define BEACON_PG 0 /*->1*/
597#define PSPOLL_PG 2 565#define PSPOLL_PG 2
@@ -776,7 +744,7 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
776 memcpy((u8 *) skb_put(skb, totalpacketlen), 744 memcpy((u8 *) skb_put(skb, totalpacketlen),
777 &reserved_page_packet, totalpacketlen); 745 &reserved_page_packet, totalpacketlen);
778 746
779 rtstatus = _rtl92c_cmd_send_packet(hw, skb); 747 rtstatus = rtlpriv->cfg->ops->cmd_send_packet(hw, skb);
780 748
781 if (rtstatus) 749 if (rtstatus)
782 b_dlok = true; 750 b_dlok = true;
@@ -793,6 +761,7 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
793 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, 761 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
794 ("Set RSVD page location to Fw FAIL!!!!!!.\n")); 762 ("Set RSVD page location to Fw FAIL!!!!!!.\n"));
795} 763}
764EXPORT_SYMBOL(rtl92c_set_fw_rsvdpagepkt);
796 765
797void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus) 766void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
798{ 767{
@@ -802,3 +771,4 @@ void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
802 771
803 rtl92c_fill_h2c_cmd(hw, H2C_JOINBSSRPT, 1, u1_joinbssrpt_parm); 772 rtl92c_fill_h2c_cmd(hw, H2C_JOINBSSRPT, 1, u1_joinbssrpt_parm);
804} 773}
774EXPORT_SYMBOL(rtl92c_set_fw_joinbss_report_cmd);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/fw.h b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
index 3db33bd14666..3db33bd14666 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/fw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/main.c b/drivers/net/wireless/rtlwifi/rtl8192c/main.c
new file mode 100644
index 000000000000..2f624fc27499
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/main.c
@@ -0,0 +1,39 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31
32
33MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
34MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
35MODULE_AUTHOR("Georgia <georgia@realtek.com>");
36MODULE_AUTHOR("Ziv Huang <ziv_huang@realtek.com>");
37MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
38MODULE_LICENSE("GPL");
39MODULE_DESCRIPTION("Realtek 8192C/8188C 802.11n PCI wireless");
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
new file mode 100644
index 000000000000..a70228278398
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -0,0 +1,2042 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../rtl8192ce/reg.h"
32#include "../rtl8192ce/def.h"
33#include "dm_common.h"
34#include "phy_common.h"
35
36/* Define macro to shorten lines */
37#define MCS_TXPWR mcs_txpwrlevel_origoffset
38
39u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
40{
41 struct rtl_priv *rtlpriv = rtl_priv(hw);
42 u32 returnvalue, originalvalue, bitshift;
43
44 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
45 "bitmask(%#x)\n", regaddr,
46 bitmask));
47 originalvalue = rtl_read_dword(rtlpriv, regaddr);
48 bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
49 returnvalue = (originalvalue & bitmask) >> bitshift;
50
51 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("BBR MASK=0x%x "
52 "Addr[0x%x]=0x%x\n", bitmask,
53 regaddr, originalvalue));
54
55 return returnvalue;
56
57}
58EXPORT_SYMBOL(rtl92c_phy_query_bb_reg);
59
60void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
61 u32 regaddr, u32 bitmask, u32 data)
62{
63 struct rtl_priv *rtlpriv = rtl_priv(hw);
64 u32 originalvalue, bitshift;
65
66 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
67 " data(%#x)\n", regaddr, bitmask,
68 data));
69
70 if (bitmask != MASKDWORD) {
71 originalvalue = rtl_read_dword(rtlpriv, regaddr);
72 bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
73 data = ((originalvalue & (~bitmask)) | (data << bitshift));
74 }
75
76 rtl_write_dword(rtlpriv, regaddr, data);
77
78 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
79 " data(%#x)\n", regaddr, bitmask,
80 data));
81}
82EXPORT_SYMBOL(rtl92c_phy_set_bb_reg);
83
84u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
85 enum radio_path rfpath, u32 offset)
86{
87 RT_ASSERT(false, ("deprecated!\n"));
88 return 0;
89}
90EXPORT_SYMBOL(_rtl92c_phy_fw_rf_serial_read);
91
92void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
93 enum radio_path rfpath, u32 offset,
94 u32 data)
95{
96 RT_ASSERT(false, ("deprecated!\n"));
97}
98EXPORT_SYMBOL(_rtl92c_phy_fw_rf_serial_write);
99
100u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
101 enum radio_path rfpath, u32 offset)
102{
103 struct rtl_priv *rtlpriv = rtl_priv(hw);
104 struct rtl_phy *rtlphy = &(rtlpriv->phy);
105 struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
106 u32 newoffset;
107 u32 tmplong, tmplong2;
108 u8 rfpi_enable = 0;
109 u32 retvalue;
110
111 offset &= 0x3f;
112 newoffset = offset;
113 if (RT_CANNOT_IO(hw)) {
114 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("return all one\n"));
115 return 0xFFFFFFFF;
116 }
117 tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
118 if (rfpath == RF90_PATH_A)
119 tmplong2 = tmplong;
120 else
121 tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
122 tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
123 (newoffset << 23) | BLSSIREADEDGE;
124 rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
125 tmplong & (~BLSSIREADEDGE));
126 mdelay(1);
127 rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
128 mdelay(1);
129 rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
130 tmplong | BLSSIREADEDGE);
131 mdelay(1);
132 if (rfpath == RF90_PATH_A)
133 rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
134 BIT(8));
135 else if (rfpath == RF90_PATH_B)
136 rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
137 BIT(8));
138 if (rfpi_enable)
139 retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readbackpi,
140 BLSSIREADBACKDATA);
141 else
142 retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback,
143 BLSSIREADBACKDATA);
144 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFR-%d Addr[0x%x]=0x%x\n",
145 rfpath, pphyreg->rflssi_readback,
146 retvalue));
147 return retvalue;
148}
149EXPORT_SYMBOL(_rtl92c_phy_rf_serial_read);
150
151void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
152 enum radio_path rfpath, u32 offset,
153 u32 data)
154{
155 u32 data_and_addr;
156 u32 newoffset;
157 struct rtl_priv *rtlpriv = rtl_priv(hw);
158 struct rtl_phy *rtlphy = &(rtlpriv->phy);
159 struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
160
161 if (RT_CANNOT_IO(hw)) {
162 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("stop\n"));
163 return;
164 }
165 offset &= 0x3f;
166 newoffset = offset;
167 data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
168 rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
169 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFW-%d Addr[0x%x]=0x%x\n",
170 rfpath, pphyreg->rf3wire_offset,
171 data_and_addr));
172}
173EXPORT_SYMBOL(_rtl92c_phy_rf_serial_write);
174
175u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask)
176{
177 u32 i;
178
179 for (i = 0; i <= 31; i++) {
180 if (((bitmask >> i) & 0x1) == 1)
181 break;
182 }
183 return i;
184}
185EXPORT_SYMBOL(_rtl92c_phy_calculate_bit_shift);
186
187static void _rtl92c_phy_bb_config_1t(struct ieee80211_hw *hw)
188{
189 rtl_set_bbreg(hw, RFPGA0_TXINFO, 0x3, 0x2);
190 rtl_set_bbreg(hw, RFPGA1_TXINFO, 0x300033, 0x200022);
191 rtl_set_bbreg(hw, RCCK0_AFESETTING, MASKBYTE3, 0x45);
192 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x23);
193 rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, 0x30, 0x1);
194 rtl_set_bbreg(hw, 0xe74, 0x0c000000, 0x2);
195 rtl_set_bbreg(hw, 0xe78, 0x0c000000, 0x2);
196 rtl_set_bbreg(hw, 0xe7c, 0x0c000000, 0x2);
197 rtl_set_bbreg(hw, 0xe80, 0x0c000000, 0x2);
198 rtl_set_bbreg(hw, 0xe88, 0x0c000000, 0x2);
199}
200bool rtl92c_phy_rf_config(struct ieee80211_hw *hw)
201{
202 struct rtl_priv *rtlpriv = rtl_priv(hw);
203
204 return rtlpriv->cfg->ops->phy_rf6052_config(hw);
205}
206EXPORT_SYMBOL(rtl92c_phy_rf_config);
207
208bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
209{
210 struct rtl_priv *rtlpriv = rtl_priv(hw);
211 struct rtl_phy *rtlphy = &(rtlpriv->phy);
212 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
213 bool rtstatus;
214
215 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("==>\n"));
216 rtstatus = rtlpriv->cfg->ops->config_bb_with_headerfile(hw,
217 BASEBAND_CONFIG_PHY_REG);
218 if (rtstatus != true) {
219 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Write BB Reg Fail!!"));
220 return false;
221 }
222 if (rtlphy->rf_type == RF_1T2R) {
223 _rtl92c_phy_bb_config_1t(hw);
224 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Config to 1T!!\n"));
225 }
226 if (rtlefuse->autoload_failflag == false) {
227 rtlphy->pwrgroup_cnt = 0;
228 rtstatus = rtlpriv->cfg->ops->config_bb_with_pgheaderfile(hw,
229 BASEBAND_CONFIG_PHY_REG);
230 }
231 if (rtstatus != true) {
232 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("BB_PG Reg Fail!!"));
233 return false;
234 }
235 rtstatus = rtlpriv->cfg->ops->config_bb_with_headerfile(hw,
236 BASEBAND_CONFIG_AGC_TAB);
237 if (rtstatus != true) {
238 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("AGC Table Fail\n"));
239 return false;
240 }
241 rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw,
242 RFPGA0_XA_HSSIPARAMETER2,
243 0x200));
244 return true;
245}
246EXPORT_SYMBOL(_rtl92c_phy_bb8192c_config_parafile);
247
248void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
249 u32 regaddr, u32 bitmask,
250 u32 data)
251{
252 struct rtl_priv *rtlpriv = rtl_priv(hw);
253 struct rtl_phy *rtlphy = &(rtlpriv->phy);
254
255 if (regaddr == RTXAGC_A_RATE18_06) {
256 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][0] = data;
257 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
258 ("MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n",
259 rtlphy->pwrgroup_cnt,
260 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][0]));
261 }
262 if (regaddr == RTXAGC_A_RATE54_24) {
263 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][1] = data;
264 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
265 ("MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n",
266 rtlphy->pwrgroup_cnt,
267 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][1]));
268 }
269 if (regaddr == RTXAGC_A_CCK1_MCS32) {
270 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][6] = data;
271 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
272 ("MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n",
273 rtlphy->pwrgroup_cnt,
274 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][6]));
275 }
276 if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00) {
277 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][7] = data;
278 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
279 ("MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n",
280 rtlphy->pwrgroup_cnt,
281 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][7]));
282 }
283 if (regaddr == RTXAGC_A_MCS03_MCS00) {
284 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][2] = data;
285 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
286 ("MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n",
287 rtlphy->pwrgroup_cnt,
288 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][2]));
289 }
290 if (regaddr == RTXAGC_A_MCS07_MCS04) {
291 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][3] = data;
292 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
293 ("MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n",
294 rtlphy->pwrgroup_cnt,
295 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][3]));
296 }
297 if (regaddr == RTXAGC_A_MCS11_MCS08) {
298 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][4] = data;
299 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
300 ("MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n",
301 rtlphy->pwrgroup_cnt,
302 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][4]));
303 }
304 if (regaddr == RTXAGC_A_MCS15_MCS12) {
305 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][5] = data;
306 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
307 ("MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n",
308 rtlphy->pwrgroup_cnt,
309 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][5]));
310 }
311 if (regaddr == RTXAGC_B_RATE18_06) {
312 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][8] = data;
313 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
314 ("MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n",
315 rtlphy->pwrgroup_cnt,
316 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][8]));
317 }
318 if (regaddr == RTXAGC_B_RATE54_24) {
319 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][9] = data;
320
321 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
322 ("MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n",
323 rtlphy->pwrgroup_cnt,
324 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][9]));
325 }
326
327 if (regaddr == RTXAGC_B_CCK1_55_MCS32) {
328 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][14] = data;
329
330 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
331 ("MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n",
332 rtlphy->pwrgroup_cnt,
333 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][14]));
334 }
335
336 if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) {
337 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][15] = data;
338
339 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
340 ("MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n",
341 rtlphy->pwrgroup_cnt,
342 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][15]));
343 }
344
345 if (regaddr == RTXAGC_B_MCS03_MCS00) {
346 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][10] = data;
347
348 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
349 ("MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n",
350 rtlphy->pwrgroup_cnt,
351 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][10]));
352 }
353
354 if (regaddr == RTXAGC_B_MCS07_MCS04) {
355 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][11] = data;
356
357 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
358 ("MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n",
359 rtlphy->pwrgroup_cnt,
360 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][11]));
361 }
362
363 if (regaddr == RTXAGC_B_MCS11_MCS08) {
364 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][12] = data;
365
366 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
367 ("MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n",
368 rtlphy->pwrgroup_cnt,
369 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][12]));
370 }
371
372 if (regaddr == RTXAGC_B_MCS15_MCS12) {
373 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][13] = data;
374
375 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
376 ("MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n",
377 rtlphy->pwrgroup_cnt,
378 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][13]));
379
380 rtlphy->pwrgroup_cnt++;
381 }
382}
383EXPORT_SYMBOL(_rtl92c_store_pwrIndex_diffrate_offset);
384
385void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
386{
387 struct rtl_priv *rtlpriv = rtl_priv(hw);
388 struct rtl_phy *rtlphy = &(rtlpriv->phy);
389
390 rtlphy->default_initialgain[0] =
391 (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
392 rtlphy->default_initialgain[1] =
393 (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
394 rtlphy->default_initialgain[2] =
395 (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
396 rtlphy->default_initialgain[3] =
397 (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
398
399 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
400 ("Default initial gain (c50=0x%x, "
401 "c58=0x%x, c60=0x%x, c68=0x%x\n",
402 rtlphy->default_initialgain[0],
403 rtlphy->default_initialgain[1],
404 rtlphy->default_initialgain[2],
405 rtlphy->default_initialgain[3]));
406
407 rtlphy->framesync = (u8) rtl_get_bbreg(hw,
408 ROFDM0_RXDETECTOR3, MASKBYTE0);
409 rtlphy->framesync_c34 = rtl_get_bbreg(hw,
410 ROFDM0_RXDETECTOR2, MASKDWORD);
411
412 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
413 ("Default framesync (0x%x) = 0x%x\n",
414 ROFDM0_RXDETECTOR3, rtlphy->framesync));
415}
416
417void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
418{
419 struct rtl_priv *rtlpriv = rtl_priv(hw);
420 struct rtl_phy *rtlphy = &(rtlpriv->phy);
421
422 rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
423 rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
424 rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW;
425 rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW;
426
427 rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB;
428 rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB;
429 rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB;
430 rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB;
431
432 rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
433 rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
434
435 rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
436 rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
437
438 rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset =
439 RFPGA0_XA_LSSIPARAMETER;
440 rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset =
441 RFPGA0_XB_LSSIPARAMETER;
442
443 rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = rFPGA0_XAB_RFPARAMETER;
444 rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = rFPGA0_XAB_RFPARAMETER;
445 rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER;
446 rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER;
447
448 rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE;
449 rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE;
450 rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE;
451 rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE;
452
453 rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1;
454 rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1;
455
456 rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
457 rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
458
459 rtlphy->phyreg_def[RF90_PATH_A].rfswitch_control =
460 RFPGA0_XAB_SWITCHCONTROL;
461 rtlphy->phyreg_def[RF90_PATH_B].rfswitch_control =
462 RFPGA0_XAB_SWITCHCONTROL;
463 rtlphy->phyreg_def[RF90_PATH_C].rfswitch_control =
464 RFPGA0_XCD_SWITCHCONTROL;
465 rtlphy->phyreg_def[RF90_PATH_D].rfswitch_control =
466 RFPGA0_XCD_SWITCHCONTROL;
467
468 rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
469 rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
470 rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1;
471 rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1;
472
473 rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2;
474 rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2;
475 rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
476 rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
477
478 rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbalance =
479 ROFDM0_XARXIQIMBALANCE;
480 rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbalance =
481 ROFDM0_XBRXIQIMBALANCE;
482 rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbalance =
483 ROFDM0_XCRXIQIMBANLANCE;
484 rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbalance =
485 ROFDM0_XDRXIQIMBALANCE;
486
487 rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
488 rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
489 rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
490 rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
491
492 rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbalance =
493 ROFDM0_XATXIQIMBALANCE;
494 rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbalance =
495 ROFDM0_XBTXIQIMBALANCE;
496 rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbalance =
497 ROFDM0_XCTXIQIMBALANCE;
498 rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbalance =
499 ROFDM0_XDTXIQIMBALANCE;
500
501 rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
502 rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
503 rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
504 rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
505
506 rtlphy->phyreg_def[RF90_PATH_A].rflssi_readback =
507 RFPGA0_XA_LSSIREADBACK;
508 rtlphy->phyreg_def[RF90_PATH_B].rflssi_readback =
509 RFPGA0_XB_LSSIREADBACK;
510 rtlphy->phyreg_def[RF90_PATH_C].rflssi_readback =
511 RFPGA0_XC_LSSIREADBACK;
512 rtlphy->phyreg_def[RF90_PATH_D].rflssi_readback =
513 RFPGA0_XD_LSSIREADBACK;
514
515 rtlphy->phyreg_def[RF90_PATH_A].rflssi_readbackpi =
516 TRANSCEIVEA_HSPI_READBACK;
517 rtlphy->phyreg_def[RF90_PATH_B].rflssi_readbackpi =
518 TRANSCEIVEB_HSPI_READBACK;
519
520}
521EXPORT_SYMBOL(_rtl92c_phy_init_bb_rf_register_definition);
522
523void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
524{
525 struct rtl_priv *rtlpriv = rtl_priv(hw);
526 struct rtl_phy *rtlphy = &(rtlpriv->phy);
527 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
528 u8 txpwr_level;
529 long txpwr_dbm;
530
531 txpwr_level = rtlphy->cur_cck_txpwridx;
532 txpwr_dbm = _rtl92c_phy_txpwr_idx_to_dbm(hw,
533 WIRELESS_MODE_B, txpwr_level);
534 txpwr_level = rtlphy->cur_ofdm24g_txpwridx +
535 rtlefuse->legacy_ht_txpowerdiff;
536 if (_rtl92c_phy_txpwr_idx_to_dbm(hw,
537 WIRELESS_MODE_G,
538 txpwr_level) > txpwr_dbm)
539 txpwr_dbm =
540 _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
541 txpwr_level);
542 txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
543 if (_rtl92c_phy_txpwr_idx_to_dbm(hw,
544 WIRELESS_MODE_N_24G,
545 txpwr_level) > txpwr_dbm)
546 txpwr_dbm =
547 _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
548 txpwr_level);
549 *powerlevel = txpwr_dbm;
550}
551
552static void _rtl92c_get_txpower_index(struct ieee80211_hw *hw, u8 channel,
553 u8 *cckpowerlevel, u8 *ofdmpowerlevel)
554{
555 struct rtl_priv *rtlpriv = rtl_priv(hw);
556 struct rtl_phy *rtlphy = &(rtlpriv->phy);
557 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
558 u8 index = (channel - 1);
559
560 cckpowerlevel[RF90_PATH_A] =
561 rtlefuse->txpwrlevel_cck[RF90_PATH_A][index];
562 cckpowerlevel[RF90_PATH_B] =
563 rtlefuse->txpwrlevel_cck[RF90_PATH_B][index];
564 if (get_rf_type(rtlphy) == RF_1T2R || get_rf_type(rtlphy) == RF_1T1R) {
565 ofdmpowerlevel[RF90_PATH_A] =
566 rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_A][index];
567 ofdmpowerlevel[RF90_PATH_B] =
568 rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_B][index];
569 } else if (get_rf_type(rtlphy) == RF_2T2R) {
570 ofdmpowerlevel[RF90_PATH_A] =
571 rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_A][index];
572 ofdmpowerlevel[RF90_PATH_B] =
573 rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_B][index];
574 }
575}
576
577static void _rtl92c_ccxpower_index_check(struct ieee80211_hw *hw,
578 u8 channel, u8 *cckpowerlevel,
579 u8 *ofdmpowerlevel)
580{
581 struct rtl_priv *rtlpriv = rtl_priv(hw);
582 struct rtl_phy *rtlphy = &(rtlpriv->phy);
583
584 rtlphy->cur_cck_txpwridx = cckpowerlevel[0];
585 rtlphy->cur_ofdm24g_txpwridx = ofdmpowerlevel[0];
586}
587
588void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
589{
590 struct rtl_priv *rtlpriv = rtl_priv(hw);
591 struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv);
592 u8 cckpowerlevel[2], ofdmpowerlevel[2];
593
594 if (rtlefuse->txpwr_fromeprom == false)
595 return;
596 _rtl92c_get_txpower_index(hw, channel,
597 &cckpowerlevel[0], &ofdmpowerlevel[0]);
598 _rtl92c_ccxpower_index_check(hw,
599 channel, &cckpowerlevel[0],
600 &ofdmpowerlevel[0]);
601 rtlpriv->cfg->ops->phy_rf6052_set_cck_txpower(hw, &cckpowerlevel[0]);
602 rtlpriv->cfg->ops->phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0],
603 channel);
604}
605EXPORT_SYMBOL(rtl92c_phy_set_txpower_level);
606
607bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm)
608{
609 struct rtl_priv *rtlpriv = rtl_priv(hw);
610 struct rtl_phy *rtlphy = &(rtlpriv->phy);
611 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
612 u8 idx;
613 u8 rf_path;
614
615 u8 ccktxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw,
616 WIRELESS_MODE_B,
617 power_indbm);
618 u8 ofdmtxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw,
619 WIRELESS_MODE_N_24G,
620 power_indbm);
621 if (ofdmtxpwridx - rtlefuse->legacy_ht_txpowerdiff > 0)
622 ofdmtxpwridx -= rtlefuse->legacy_ht_txpowerdiff;
623 else
624 ofdmtxpwridx = 0;
625 RT_TRACE(rtlpriv, COMP_TXAGC, DBG_TRACE,
626 ("%lx dBm, ccktxpwridx = %d, ofdmtxpwridx = %d\n",
627 power_indbm, ccktxpwridx, ofdmtxpwridx));
628 for (idx = 0; idx < 14; idx++) {
629 for (rf_path = 0; rf_path < 2; rf_path++) {
630 rtlefuse->txpwrlevel_cck[rf_path][idx] = ccktxpwridx;
631 rtlefuse->txpwrlevel_ht40_1s[rf_path][idx] =
632 ofdmtxpwridx;
633 rtlefuse->txpwrlevel_ht40_2s[rf_path][idx] =
634 ofdmtxpwridx;
635 }
636 }
637 rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
638 return true;
639}
640EXPORT_SYMBOL(rtl92c_phy_update_txpower_dbm);
641
642void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, u16 beaconinterval)
643{
644}
645EXPORT_SYMBOL(rtl92c_phy_set_beacon_hw_reg);
646
647u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
648 enum wireless_mode wirelessmode,
649 long power_indbm)
650{
651 u8 txpwridx;
652 long offset;
653
654 switch (wirelessmode) {
655 case WIRELESS_MODE_B:
656 offset = -7;
657 break;
658 case WIRELESS_MODE_G:
659 case WIRELESS_MODE_N_24G:
660 offset = -8;
661 break;
662 default:
663 offset = -8;
664 break;
665 }
666
667 if ((power_indbm - offset) > 0)
668 txpwridx = (u8) ((power_indbm - offset) * 2);
669 else
670 txpwridx = 0;
671
672 if (txpwridx > MAX_TXPWR_IDX_NMODE_92S)
673 txpwridx = MAX_TXPWR_IDX_NMODE_92S;
674
675 return txpwridx;
676}
677EXPORT_SYMBOL(_rtl92c_phy_dbm_to_txpwr_Idx);
678
679long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
680 enum wireless_mode wirelessmode,
681 u8 txpwridx)
682{
683 long offset;
684 long pwrout_dbm;
685
686 switch (wirelessmode) {
687 case WIRELESS_MODE_B:
688 offset = -7;
689 break;
690 case WIRELESS_MODE_G:
691 case WIRELESS_MODE_N_24G:
692 offset = -8;
693 break;
694 default:
695 offset = -8;
696 break;
697 }
698 pwrout_dbm = txpwridx / 2 + offset;
699 return pwrout_dbm;
700}
701EXPORT_SYMBOL(_rtl92c_phy_txpwr_idx_to_dbm);
702
703void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
704{
705 struct rtl_priv *rtlpriv = rtl_priv(hw);
706 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
707 enum io_type iotype;
708
709 if (!is_hal_stop(rtlhal)) {
710 switch (operation) {
711 case SCAN_OPT_BACKUP:
712 iotype = IO_CMD_PAUSE_DM_BY_SCAN;
713 rtlpriv->cfg->ops->set_hw_reg(hw,
714 HW_VAR_IO_CMD,
715 (u8 *)&iotype);
716
717 break;
718 case SCAN_OPT_RESTORE:
719 iotype = IO_CMD_RESUME_DM_BY_SCAN;
720 rtlpriv->cfg->ops->set_hw_reg(hw,
721 HW_VAR_IO_CMD,
722 (u8 *)&iotype);
723 break;
724 default:
725 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
726 ("Unknown Scan Backup operation.\n"));
727 break;
728 }
729 }
730}
731EXPORT_SYMBOL(rtl92c_phy_scan_operation_backup);
732
733void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
734 enum nl80211_channel_type ch_type)
735{
736 struct rtl_priv *rtlpriv = rtl_priv(hw);
737 struct rtl_phy *rtlphy = &(rtlpriv->phy);
738 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
739 u8 tmp_bw = rtlphy->current_chan_bw;
740
741 if (rtlphy->set_bwmode_inprogress)
742 return;
743 rtlphy->set_bwmode_inprogress = true;
744 if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw)))
745 rtlpriv->cfg->ops->phy_set_bw_mode_callback(hw);
746 else {
747 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
748 ("FALSE driver sleep or unload\n"));
749 rtlphy->set_bwmode_inprogress = false;
750 rtlphy->current_chan_bw = tmp_bw;
751 }
752}
753EXPORT_SYMBOL(rtl92c_phy_set_bw_mode);
754
755void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw)
756{
757 struct rtl_priv *rtlpriv = rtl_priv(hw);
758 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
759 struct rtl_phy *rtlphy = &(rtlpriv->phy);
760 u32 delay;
761
762 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
763 ("switch to channel%d\n", rtlphy->current_channel));
764 if (is_hal_stop(rtlhal))
765 return;
766 do {
767 if (!rtlphy->sw_chnl_inprogress)
768 break;
769 if (!_rtl92c_phy_sw_chnl_step_by_step
770 (hw, rtlphy->current_channel, &rtlphy->sw_chnl_stage,
771 &rtlphy->sw_chnl_step, &delay)) {
772 if (delay > 0)
773 mdelay(delay);
774 else
775 continue;
776 } else
777 rtlphy->sw_chnl_inprogress = false;
778 break;
779 } while (true);
780 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
781}
782EXPORT_SYMBOL(rtl92c_phy_sw_chnl_callback);
783
784u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw)
785{
786 struct rtl_priv *rtlpriv = rtl_priv(hw);
787 struct rtl_phy *rtlphy = &(rtlpriv->phy);
788 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
789
790 if (rtlphy->sw_chnl_inprogress)
791 return 0;
792 if (rtlphy->set_bwmode_inprogress)
793 return 0;
794 RT_ASSERT((rtlphy->current_channel <= 14),
795 ("WIRELESS_MODE_G but channel>14"));
796 rtlphy->sw_chnl_inprogress = true;
797 rtlphy->sw_chnl_stage = 0;
798 rtlphy->sw_chnl_step = 0;
799 if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
800 rtl92c_phy_sw_chnl_callback(hw);
801 RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
802 ("sw_chnl_inprogress false schdule workitem\n"));
803 rtlphy->sw_chnl_inprogress = false;
804 } else {
805 RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
806 ("sw_chnl_inprogress false driver sleep or"
807 " unload\n"));
808 rtlphy->sw_chnl_inprogress = false;
809 }
810 return 1;
811}
812EXPORT_SYMBOL(rtl92c_phy_sw_chnl);
813
814static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
815 u8 channel, u8 *stage, u8 *step,
816 u32 *delay)
817{
818 struct rtl_priv *rtlpriv = rtl_priv(hw);
819 struct rtl_phy *rtlphy = &(rtlpriv->phy);
820 struct swchnlcmd precommoncmd[MAX_PRECMD_CNT];
821 u32 precommoncmdcnt;
822 struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT];
823 u32 postcommoncmdcnt;
824 struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT];
825 u32 rfdependcmdcnt;
826 struct swchnlcmd *currentcmd = NULL;
827 u8 rfpath;
828 u8 num_total_rfpath = rtlphy->num_total_rfpath;
829
830 precommoncmdcnt = 0;
831 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
832 MAX_PRECMD_CNT,
833 CMDID_SET_TXPOWEROWER_LEVEL, 0, 0, 0);
834 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
835 MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
836
837 postcommoncmdcnt = 0;
838
839 _rtl92c_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++,
840 MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0);
841
842 rfdependcmdcnt = 0;
843
844 RT_ASSERT((channel >= 1 && channel <= 14),
845 ("illegal channel for Zebra: %d\n", channel));
846
847 _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
848 MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
849 RF_CHNLBW, channel, 10);
850
851 _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
852 MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0,
853 0);
854
855 do {
856 switch (*stage) {
857 case 0:
858 currentcmd = &precommoncmd[*step];
859 break;
860 case 1:
861 currentcmd = &rfdependcmd[*step];
862 break;
863 case 2:
864 currentcmd = &postcommoncmd[*step];
865 break;
866 }
867
868 if (currentcmd->cmdid == CMDID_END) {
869 if ((*stage) == 2) {
870 return true;
871 } else {
872 (*stage)++;
873 (*step) = 0;
874 continue;
875 }
876 }
877
878 switch (currentcmd->cmdid) {
879 case CMDID_SET_TXPOWEROWER_LEVEL:
880 rtl92c_phy_set_txpower_level(hw, channel);
881 break;
882 case CMDID_WRITEPORT_ULONG:
883 rtl_write_dword(rtlpriv, currentcmd->para1,
884 currentcmd->para2);
885 break;
886 case CMDID_WRITEPORT_USHORT:
887 rtl_write_word(rtlpriv, currentcmd->para1,
888 (u16) currentcmd->para2);
889 break;
890 case CMDID_WRITEPORT_UCHAR:
891 rtl_write_byte(rtlpriv, currentcmd->para1,
892 (u8) currentcmd->para2);
893 break;
894 case CMDID_RF_WRITEREG:
895 for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) {
896 rtlphy->rfreg_chnlval[rfpath] =
897 ((rtlphy->rfreg_chnlval[rfpath] &
898 0xfffffc00) | currentcmd->para2);
899
900 rtl_set_rfreg(hw, (enum radio_path)rfpath,
901 currentcmd->para1,
902 RFREG_OFFSET_MASK,
903 rtlphy->rfreg_chnlval[rfpath]);
904 }
905 break;
906 default:
907 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
908 ("switch case not process\n"));
909 break;
910 }
911
912 break;
913 } while (true);
914
915 (*delay) = currentcmd->msdelay;
916 (*step)++;
917 return false;
918}
919
920static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
921 u32 cmdtableidx, u32 cmdtablesz,
922 enum swchnlcmd_id cmdid,
923 u32 para1, u32 para2, u32 msdelay)
924{
925 struct swchnlcmd *pcmd;
926
927 if (cmdtable == NULL) {
928 RT_ASSERT(false, ("cmdtable cannot be NULL.\n"));
929 return false;
930 }
931
932 if (cmdtableidx >= cmdtablesz)
933 return false;
934
935 pcmd = cmdtable + cmdtableidx;
936 pcmd->cmdid = cmdid;
937 pcmd->para1 = para1;
938 pcmd->para2 = para2;
939 pcmd->msdelay = msdelay;
940 return true;
941}
942
943bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, u32 rfpath)
944{
945 return true;
946}
947EXPORT_SYMBOL(rtl8192_phy_check_is_legal_rfpath);
948
949static u8 _rtl92c_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb)
950{
951 u32 reg_eac, reg_e94, reg_e9c, reg_ea4;
952 u8 result = 0x00;
953
954 rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c1f);
955 rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x10008c1f);
956 rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x82140102);
957 rtl_set_bbreg(hw, 0xe3c, MASKDWORD,
958 config_pathb ? 0x28160202 : 0x28160502);
959
960 if (config_pathb) {
961 rtl_set_bbreg(hw, 0xe50, MASKDWORD, 0x10008c22);
962 rtl_set_bbreg(hw, 0xe54, MASKDWORD, 0x10008c22);
963 rtl_set_bbreg(hw, 0xe58, MASKDWORD, 0x82140102);
964 rtl_set_bbreg(hw, 0xe5c, MASKDWORD, 0x28160202);
965 }
966
967 rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x001028d1);
968 rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000);
969 rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
970
971 mdelay(IQK_DELAY_TIME);
972
973 reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
974 reg_e94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD);
975 reg_e9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD);
976 reg_ea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD);
977
978 if (!(reg_eac & BIT(28)) &&
979 (((reg_e94 & 0x03FF0000) >> 16) != 0x142) &&
980 (((reg_e9c & 0x03FF0000) >> 16) != 0x42))
981 result |= 0x01;
982 else
983 return result;
984
985 if (!(reg_eac & BIT(27)) &&
986 (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) &&
987 (((reg_eac & 0x03FF0000) >> 16) != 0x36))
988 result |= 0x02;
989 return result;
990}
991
992static u8 _rtl92c_phy_path_b_iqk(struct ieee80211_hw *hw)
993{
994 u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc;
995 u8 result = 0x00;
996
997 rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000002);
998 rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000000);
999 mdelay(IQK_DELAY_TIME);
1000 reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
1001 reg_eb4 = rtl_get_bbreg(hw, 0xeb4, MASKDWORD);
1002 reg_ebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD);
1003 reg_ec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD);
1004 reg_ecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD);
1005 if (!(reg_eac & BIT(31)) &&
1006 (((reg_eb4 & 0x03FF0000) >> 16) != 0x142) &&
1007 (((reg_ebc & 0x03FF0000) >> 16) != 0x42))
1008 result |= 0x01;
1009 else
1010 return result;
1011
1012 if (!(reg_eac & BIT(30)) &&
1013 (((reg_ec4 & 0x03FF0000) >> 16) != 0x132) &&
1014 (((reg_ecc & 0x03FF0000) >> 16) != 0x36))
1015 result |= 0x02;
1016 return result;
1017}
1018
1019static void _rtl92c_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw,
1020 bool iqk_ok, long result[][8],
1021 u8 final_candidate, bool btxonly)
1022{
1023 u32 oldval_0, x, tx0_a, reg;
1024 long y, tx0_c;
1025
1026 if (final_candidate == 0xFF)
1027 return;
1028 else if (iqk_ok) {
1029 oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
1030 MASKDWORD) >> 22) & 0x3FF;
1031 x = result[final_candidate][0];
1032 if ((x & 0x00000200) != 0)
1033 x = x | 0xFFFFFC00;
1034 tx0_a = (x * oldval_0) >> 8;
1035 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a);
1036 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31),
1037 ((x * oldval_0 >> 7) & 0x1));
1038 y = result[final_candidate][1];
1039 if ((y & 0x00000200) != 0)
1040 y = y | 0xFFFFFC00;
1041 tx0_c = (y * oldval_0) >> 8;
1042 rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000,
1043 ((tx0_c & 0x3C0) >> 6));
1044 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000,
1045 (tx0_c & 0x3F));
1046 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29),
1047 ((y * oldval_0 >> 7) & 0x1));
1048 if (btxonly)
1049 return;
1050 reg = result[final_candidate][2];
1051 rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg);
1052 reg = result[final_candidate][3] & 0x3F;
1053 rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg);
1054 reg = (result[final_candidate][3] >> 6) & 0xF;
1055 rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg);
1056 }
1057}
1058
1059static void _rtl92c_phy_path_b_fill_iqk_matrix(struct ieee80211_hw *hw,
1060 bool iqk_ok, long result[][8],
1061 u8 final_candidate, bool btxonly)
1062{
1063 u32 oldval_1, x, tx1_a, reg;
1064 long y, tx1_c;
1065
1066 if (final_candidate == 0xFF)
1067 return;
1068 else if (iqk_ok) {
1069 oldval_1 = (rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
1070 MASKDWORD) >> 22) & 0x3FF;
1071 x = result[final_candidate][4];
1072 if ((x & 0x00000200) != 0)
1073 x = x | 0xFFFFFC00;
1074 tx1_a = (x * oldval_1) >> 8;
1075 rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x3FF, tx1_a);
1076 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(27),
1077 ((x * oldval_1 >> 7) & 0x1));
1078 y = result[final_candidate][5];
1079 if ((y & 0x00000200) != 0)
1080 y = y | 0xFFFFFC00;
1081 tx1_c = (y * oldval_1) >> 8;
1082 rtl_set_bbreg(hw, ROFDM0_XDTXAFE, 0xF0000000,
1083 ((tx1_c & 0x3C0) >> 6));
1084 rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x003F0000,
1085 (tx1_c & 0x3F));
1086 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(25),
1087 ((y * oldval_1 >> 7) & 0x1));
1088 if (btxonly)
1089 return;
1090 reg = result[final_candidate][6];
1091 rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0x3FF, reg);
1092 reg = result[final_candidate][7] & 0x3F;
1093 rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0xFC00, reg);
1094 reg = (result[final_candidate][7] >> 6) & 0xF;
1095 rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, 0x0000F000, reg);
1096 }
1097}
1098
1099static void _rtl92c_phy_save_adda_registers(struct ieee80211_hw *hw,
1100 u32 *addareg, u32 *addabackup,
1101 u32 registernum)
1102{
1103 u32 i;
1104
1105 for (i = 0; i < registernum; i++)
1106 addabackup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD);
1107}
1108
1109static void _rtl92c_phy_save_mac_registers(struct ieee80211_hw *hw,
1110 u32 *macreg, u32 *macbackup)
1111{
1112 struct rtl_priv *rtlpriv = rtl_priv(hw);
1113 u32 i;
1114
1115 for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
1116 macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]);
1117 macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]);
1118}
1119
1120static void _rtl92c_phy_reload_adda_registers(struct ieee80211_hw *hw,
1121 u32 *addareg, u32 *addabackup,
1122 u32 regiesternum)
1123{
1124 u32 i;
1125
1126 for (i = 0; i < regiesternum; i++)
1127 rtl_set_bbreg(hw, addareg[i], MASKDWORD, addabackup[i]);
1128}
1129
1130static void _rtl92c_phy_reload_mac_registers(struct ieee80211_hw *hw,
1131 u32 *macreg, u32 *macbackup)
1132{
1133 struct rtl_priv *rtlpriv = rtl_priv(hw);
1134 u32 i;
1135
1136 for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
1137 rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]);
1138 rtl_write_dword(rtlpriv, macreg[i], macbackup[i]);
1139}
1140
1141static void _rtl92c_phy_path_adda_on(struct ieee80211_hw *hw,
1142 u32 *addareg, bool is_patha_on, bool is2t)
1143{
1144 u32 pathOn;
1145 u32 i;
1146
1147 pathOn = is_patha_on ? 0x04db25a4 : 0x0b1b25a4;
1148 if (false == is2t) {
1149 pathOn = 0x0bdb25a0;
1150 rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0);
1151 } else {
1152 rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathOn);
1153 }
1154
1155 for (i = 1; i < IQK_ADDA_REG_NUM; i++)
1156 rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathOn);
1157}
1158
1159static void _rtl92c_phy_mac_setting_calibration(struct ieee80211_hw *hw,
1160 u32 *macreg, u32 *macbackup)
1161{
1162 struct rtl_priv *rtlpriv = rtl_priv(hw);
1163 u32 i;
1164
1165 rtl_write_byte(rtlpriv, macreg[0], 0x3F);
1166
1167 for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
1168 rtl_write_byte(rtlpriv, macreg[i],
1169 (u8) (macbackup[i] & (~BIT(3))));
1170 rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5))));
1171}
1172
1173static void _rtl92c_phy_path_a_standby(struct ieee80211_hw *hw)
1174{
1175 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0);
1176 rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
1177 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
1178}
1179
1180static void _rtl92c_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode)
1181{
1182 u32 mode;
1183
1184 mode = pi_mode ? 0x01000100 : 0x01000000;
1185 rtl_set_bbreg(hw, 0x820, MASKDWORD, mode);
1186 rtl_set_bbreg(hw, 0x828, MASKDWORD, mode);
1187}
1188
1189static bool _rtl92c_phy_simularity_compare(struct ieee80211_hw *hw,
1190 long result[][8], u8 c1, u8 c2)
1191{
1192 u32 i, j, diff, simularity_bitmap, bound;
1193 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1194
1195 u8 final_candidate[2] = { 0xFF, 0xFF };
1196 bool bresult = true, is2t = IS_92C_SERIAL(rtlhal->version);
1197
1198 if (is2t)
1199 bound = 8;
1200 else
1201 bound = 4;
1202
1203 simularity_bitmap = 0;
1204
1205 for (i = 0; i < bound; i++) {
1206 diff = (result[c1][i] > result[c2][i]) ?
1207 (result[c1][i] - result[c2][i]) :
1208 (result[c2][i] - result[c1][i]);
1209
1210 if (diff > MAX_TOLERANCE) {
1211 if ((i == 2 || i == 6) && !simularity_bitmap) {
1212 if (result[c1][i] + result[c1][i + 1] == 0)
1213 final_candidate[(i / 4)] = c2;
1214 else if (result[c2][i] + result[c2][i + 1] == 0)
1215 final_candidate[(i / 4)] = c1;
1216 else
1217 simularity_bitmap = simularity_bitmap |
1218 (1 << i);
1219 } else
1220 simularity_bitmap =
1221 simularity_bitmap | (1 << i);
1222 }
1223 }
1224
1225 if (simularity_bitmap == 0) {
1226 for (i = 0; i < (bound / 4); i++) {
1227 if (final_candidate[i] != 0xFF) {
1228 for (j = i * 4; j < (i + 1) * 4 - 2; j++)
1229 result[3][j] =
1230 result[final_candidate[i]][j];
1231 bresult = false;
1232 }
1233 }
1234 return bresult;
1235 } else if (!(simularity_bitmap & 0x0F)) {
1236 for (i = 0; i < 4; i++)
1237 result[3][i] = result[c1][i];
1238 return false;
1239 } else if (!(simularity_bitmap & 0xF0) && is2t) {
1240 for (i = 4; i < 8; i++)
1241 result[3][i] = result[c1][i];
1242 return false;
1243 } else {
1244 return false;
1245 }
1246
1247}
1248
1249static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
1250 long result[][8], u8 t, bool is2t)
1251{
1252 struct rtl_priv *rtlpriv = rtl_priv(hw);
1253 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1254 u32 i;
1255 u8 patha_ok, pathb_ok;
1256 u32 adda_reg[IQK_ADDA_REG_NUM] = {
1257 0x85c, 0xe6c, 0xe70, 0xe74,
1258 0xe78, 0xe7c, 0xe80, 0xe84,
1259 0xe88, 0xe8c, 0xed0, 0xed4,
1260 0xed8, 0xedc, 0xee0, 0xeec
1261 };
1262
1263 u32 iqk_mac_reg[IQK_MAC_REG_NUM] = {
1264 0x522, 0x550, 0x551, 0x040
1265 };
1266
1267 const u32 retrycount = 2;
1268
1269 u32 bbvalue;
1270
1271 if (t == 0) {
1272 bbvalue = rtl_get_bbreg(hw, 0x800, MASKDWORD);
1273
1274 _rtl92c_phy_save_adda_registers(hw, adda_reg,
1275 rtlphy->adda_backup, 16);
1276 _rtl92c_phy_save_mac_registers(hw, iqk_mac_reg,
1277 rtlphy->iqk_mac_backup);
1278 }
1279 _rtl92c_phy_path_adda_on(hw, adda_reg, true, is2t);
1280 if (t == 0) {
1281 rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw,
1282 RFPGA0_XA_HSSIPARAMETER1,
1283 BIT(8));
1284 }
1285 if (!rtlphy->rfpi_enable)
1286 _rtl92c_phy_pi_mode_switch(hw, true);
1287 if (t == 0) {
1288 rtlphy->reg_c04 = rtl_get_bbreg(hw, 0xc04, MASKDWORD);
1289 rtlphy->reg_c08 = rtl_get_bbreg(hw, 0xc08, MASKDWORD);
1290 rtlphy->reg_874 = rtl_get_bbreg(hw, 0x874, MASKDWORD);
1291 }
1292 rtl_set_bbreg(hw, 0xc04, MASKDWORD, 0x03a05600);
1293 rtl_set_bbreg(hw, 0xc08, MASKDWORD, 0x000800e4);
1294 rtl_set_bbreg(hw, 0x874, MASKDWORD, 0x22204000);
1295 if (is2t) {
1296 rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
1297 rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00010000);
1298 }
1299 _rtl92c_phy_mac_setting_calibration(hw, iqk_mac_reg,
1300 rtlphy->iqk_mac_backup);
1301 rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x00080000);
1302 if (is2t)
1303 rtl_set_bbreg(hw, 0xb6c, MASKDWORD, 0x00080000);
1304 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
1305 rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x01007c00);
1306 rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x01004800);
1307 for (i = 0; i < retrycount; i++) {
1308 patha_ok = _rtl92c_phy_path_a_iqk(hw, is2t);
1309 if (patha_ok == 0x03) {
1310 result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
1311 0x3FF0000) >> 16;
1312 result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
1313 0x3FF0000) >> 16;
1314 result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
1315 0x3FF0000) >> 16;
1316 result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
1317 0x3FF0000) >> 16;
1318 break;
1319 } else if (i == (retrycount - 1) && patha_ok == 0x01)
1320 result[t][0] = (rtl_get_bbreg(hw, 0xe94,
1321 MASKDWORD) & 0x3FF0000) >>
1322 16;
1323 result[t][1] =
1324 (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) & 0x3FF0000) >> 16;
1325
1326 }
1327
1328 if (is2t) {
1329 _rtl92c_phy_path_a_standby(hw);
1330 _rtl92c_phy_path_adda_on(hw, adda_reg, false, is2t);
1331 for (i = 0; i < retrycount; i++) {
1332 pathb_ok = _rtl92c_phy_path_b_iqk(hw);
1333 if (pathb_ok == 0x03) {
1334 result[t][4] = (rtl_get_bbreg(hw,
1335 0xeb4,
1336 MASKDWORD) &
1337 0x3FF0000) >> 16;
1338 result[t][5] =
1339 (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
1340 0x3FF0000) >> 16;
1341 result[t][6] =
1342 (rtl_get_bbreg(hw, 0xec4, MASKDWORD) &
1343 0x3FF0000) >> 16;
1344 result[t][7] =
1345 (rtl_get_bbreg(hw, 0xecc, MASKDWORD) &
1346 0x3FF0000) >> 16;
1347 break;
1348 } else if (i == (retrycount - 1) && pathb_ok == 0x01) {
1349 result[t][4] = (rtl_get_bbreg(hw,
1350 0xeb4,
1351 MASKDWORD) &
1352 0x3FF0000) >> 16;
1353 }
1354 result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
1355 0x3FF0000) >> 16;
1356 }
1357 }
1358 rtl_set_bbreg(hw, 0xc04, MASKDWORD, rtlphy->reg_c04);
1359 rtl_set_bbreg(hw, 0x874, MASKDWORD, rtlphy->reg_874);
1360 rtl_set_bbreg(hw, 0xc08, MASKDWORD, rtlphy->reg_c08);
1361 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0);
1362 rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00032ed3);
1363 if (is2t)
1364 rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3);
1365 if (t != 0) {
1366 if (!rtlphy->rfpi_enable)
1367 _rtl92c_phy_pi_mode_switch(hw, false);
1368 _rtl92c_phy_reload_adda_registers(hw, adda_reg,
1369 rtlphy->adda_backup, 16);
1370 _rtl92c_phy_reload_mac_registers(hw, iqk_mac_reg,
1371 rtlphy->iqk_mac_backup);
1372 }
1373}
1374
1375static void _rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw,
1376 char delta, bool is2t)
1377{
1378 /* This routine is deliberately dummied out for later fixes */
1379#if 0
1380 struct rtl_priv *rtlpriv = rtl_priv(hw);
1381 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1382 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1383
1384 u32 reg_d[PATH_NUM];
1385 u32 tmpreg, index, offset, path, i, pathbound = PATH_NUM, apkbound;
1386
1387 u32 bb_backup[APK_BB_REG_NUM];
1388 u32 bb_reg[APK_BB_REG_NUM] = {
1389 0x904, 0xc04, 0x800, 0xc08, 0x874
1390 };
1391 u32 bb_ap_mode[APK_BB_REG_NUM] = {
1392 0x00000020, 0x00a05430, 0x02040000,
1393 0x000800e4, 0x00204000
1394 };
1395 u32 bb_normal_ap_mode[APK_BB_REG_NUM] = {
1396 0x00000020, 0x00a05430, 0x02040000,
1397 0x000800e4, 0x22204000
1398 };
1399
1400 u32 afe_backup[APK_AFE_REG_NUM];
1401 u32 afe_reg[APK_AFE_REG_NUM] = {
1402 0x85c, 0xe6c, 0xe70, 0xe74, 0xe78,
1403 0xe7c, 0xe80, 0xe84, 0xe88, 0xe8c,
1404 0xed0, 0xed4, 0xed8, 0xedc, 0xee0,
1405 0xeec
1406 };
1407
1408 u32 mac_backup[IQK_MAC_REG_NUM];
1409 u32 mac_reg[IQK_MAC_REG_NUM] = {
1410 0x522, 0x550, 0x551, 0x040
1411 };
1412
1413 u32 apk_rf_init_value[PATH_NUM][APK_BB_REG_NUM] = {
1414 {0x0852c, 0x1852c, 0x5852c, 0x1852c, 0x5852c},
1415 {0x2852e, 0x0852e, 0x3852e, 0x0852e, 0x0852e}
1416 };
1417
1418 u32 apk_normal_rf_init_value[PATH_NUM][APK_BB_REG_NUM] = {
1419 {0x0852c, 0x0a52c, 0x3a52c, 0x5a52c, 0x5a52c},
1420 {0x0852c, 0x0a52c, 0x5a52c, 0x5a52c, 0x5a52c}
1421 };
1422
1423 u32 apk_rf_value_0[PATH_NUM][APK_BB_REG_NUM] = {
1424 {0x52019, 0x52014, 0x52013, 0x5200f, 0x5208d},
1425 {0x5201a, 0x52019, 0x52016, 0x52033, 0x52050}
1426 };
1427
1428 u32 apk_normal_rf_value_0[PATH_NUM][APK_BB_REG_NUM] = {
1429 {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a},
1430 {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a}
1431 };
1432
1433 u32 afe_on_off[PATH_NUM] = {
1434 0x04db25a4, 0x0b1b25a4
1435 };
1436
1437 u32 apk_offset[PATH_NUM] = { 0xb68, 0xb6c };
1438
1439 u32 apk_normal_offset[PATH_NUM] = { 0xb28, 0xb98 };
1440
1441 u32 apk_value[PATH_NUM] = { 0x92fc0000, 0x12fc0000 };
1442
1443 u32 apk_normal_value[PATH_NUM] = { 0x92680000, 0x12680000 };
1444
1445 const char apk_delta_mapping[APK_BB_REG_NUM][13] = {
1446 {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
1447 {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
1448 {-6, -4, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
1449 {-1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6},
1450 {-11, -9, -7, -5, -3, -1, 0, 0, 0, 0, 0, 0, 0}
1451 };
1452
1453 const u32 apk_normal_setting_value_1[13] = {
1454 0x01017018, 0xf7ed8f84, 0x1b1a1816, 0x2522201e, 0x322e2b28,
1455 0x433f3a36, 0x5b544e49, 0x7b726a62, 0xa69a8f84, 0xdfcfc0b3,
1456 0x12680000, 0x00880000, 0x00880000
1457 };
1458
1459 const u32 apk_normal_setting_value_2[16] = {
1460 0x01c7021d, 0x01670183, 0x01000123, 0x00bf00e2, 0x008d00a3,
1461 0x0068007b, 0x004d0059, 0x003a0042, 0x002b0031, 0x001f0025,
1462 0x0017001b, 0x00110014, 0x000c000f, 0x0009000b, 0x00070008,
1463 0x00050006
1464 };
1465
1466 const u32 apk_result[PATH_NUM][APK_BB_REG_NUM];
1467
1468 long bb_offset, delta_v, delta_offset;
1469
1470 if (!is2t)
1471 pathbound = 1;
1472
1473 for (index = 0; index < PATH_NUM; index++) {
1474 apk_offset[index] = apk_normal_offset[index];
1475 apk_value[index] = apk_normal_value[index];
1476 afe_on_off[index] = 0x6fdb25a4;
1477 }
1478
1479 for (index = 0; index < APK_BB_REG_NUM; index++) {
1480 for (path = 0; path < pathbound; path++) {
1481 apk_rf_init_value[path][index] =
1482 apk_normal_rf_init_value[path][index];
1483 apk_rf_value_0[path][index] =
1484 apk_normal_rf_value_0[path][index];
1485 }
1486 bb_ap_mode[index] = bb_normal_ap_mode[index];
1487
1488 apkbound = 6;
1489 }
1490
1491 for (index = 0; index < APK_BB_REG_NUM; index++) {
1492 if (index == 0)
1493 continue;
1494 bb_backup[index] = rtl_get_bbreg(hw, bb_reg[index], MASKDWORD);
1495 }
1496
1497 _rtl92c_phy_save_mac_registers(hw, mac_reg, mac_backup);
1498
1499 _rtl92c_phy_save_adda_registers(hw, afe_reg, afe_backup, 16);
1500
1501 for (path = 0; path < pathbound; path++) {
1502 if (path == RF90_PATH_A) {
1503 offset = 0xb00;
1504 for (index = 0; index < 11; index++) {
1505 rtl_set_bbreg(hw, offset, MASKDWORD,
1506 apk_normal_setting_value_1
1507 [index]);
1508
1509 offset += 0x04;
1510 }
1511
1512 rtl_set_bbreg(hw, 0xb98, MASKDWORD, 0x12680000);
1513
1514 offset = 0xb68;
1515 for (; index < 13; index++) {
1516 rtl_set_bbreg(hw, offset, MASKDWORD,
1517 apk_normal_setting_value_1
1518 [index]);
1519
1520 offset += 0x04;
1521 }
1522
1523 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x40000000);
1524
1525 offset = 0xb00;
1526 for (index = 0; index < 16; index++) {
1527 rtl_set_bbreg(hw, offset, MASKDWORD,
1528 apk_normal_setting_value_2
1529 [index]);
1530
1531 offset += 0x04;
1532 }
1533 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
1534 } else if (path == RF90_PATH_B) {
1535 offset = 0xb70;
1536 for (index = 0; index < 10; index++) {
1537 rtl_set_bbreg(hw, offset, MASKDWORD,
1538 apk_normal_setting_value_1
1539 [index]);
1540
1541 offset += 0x04;
1542 }
1543 rtl_set_bbreg(hw, 0xb28, MASKDWORD, 0x12680000);
1544 rtl_set_bbreg(hw, 0xb98, MASKDWORD, 0x12680000);
1545
1546 offset = 0xb68;
1547 index = 11;
1548 for (; index < 13; index++) {
1549 rtl_set_bbreg(hw, offset, MASKDWORD,
1550 apk_normal_setting_value_1
1551 [index]);
1552
1553 offset += 0x04;
1554 }
1555
1556 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x40000000);
1557
1558 offset = 0xb60;
1559 for (index = 0; index < 16; index++) {
1560 rtl_set_bbreg(hw, offset, MASKDWORD,
1561 apk_normal_setting_value_2
1562 [index]);
1563
1564 offset += 0x04;
1565 }
1566 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
1567 }
1568
1569 reg_d[path] = rtl_get_rfreg(hw, (enum radio_path)path,
1570 0xd, MASKDWORD);
1571
1572 for (index = 0; index < APK_AFE_REG_NUM; index++)
1573 rtl_set_bbreg(hw, afe_reg[index], MASKDWORD,
1574 afe_on_off[path]);
1575
1576 if (path == RF90_PATH_A) {
1577 for (index = 0; index < APK_BB_REG_NUM; index++) {
1578 if (index == 0)
1579 continue;
1580 rtl_set_bbreg(hw, bb_reg[index], MASKDWORD,
1581 bb_ap_mode[index]);
1582 }
1583 }
1584
1585 _rtl92c_phy_mac_setting_calibration(hw, mac_reg, mac_backup);
1586
1587 if (path == 0) {
1588 rtl_set_rfreg(hw, RF90_PATH_B, 0x0, MASKDWORD, 0x10000);
1589 } else {
1590 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASKDWORD,
1591 0x10000);
1592 rtl_set_rfreg(hw, RF90_PATH_A, 0x10, MASKDWORD,
1593 0x1000f);
1594 rtl_set_rfreg(hw, RF90_PATH_A, 0x11, MASKDWORD,
1595 0x20103);
1596 }
1597
1598 delta_offset = ((delta + 14) / 2);
1599 if (delta_offset < 0)
1600 delta_offset = 0;
1601 else if (delta_offset > 12)
1602 delta_offset = 12;
1603
1604 for (index = 0; index < APK_BB_REG_NUM; index++) {
1605 if (index != 1)
1606 continue;
1607
1608 tmpreg = apk_rf_init_value[path][index];
1609
1610 if (!rtlefuse->apk_thermalmeterignore) {
1611 bb_offset = (tmpreg & 0xF0000) >> 16;
1612
1613 if (!(tmpreg & BIT(15)))
1614 bb_offset = -bb_offset;
1615
1616 delta_v =
1617 apk_delta_mapping[index][delta_offset];
1618
1619 bb_offset += delta_v;
1620
1621 if (bb_offset < 0) {
1622 tmpreg = tmpreg & (~BIT(15));
1623 bb_offset = -bb_offset;
1624 } else {
1625 tmpreg = tmpreg | BIT(15);
1626 }
1627
1628 tmpreg =
1629 (tmpreg & 0xFFF0FFFF) | (bb_offset << 16);
1630 }
1631
1632 rtl_set_rfreg(hw, (enum radio_path)path, 0xc,
1633 MASKDWORD, 0x8992e);
1634 rtl_set_rfreg(hw, (enum radio_path)path, 0x0,
1635 MASKDWORD, apk_rf_value_0[path][index]);
1636 rtl_set_rfreg(hw, (enum radio_path)path, 0xd,
1637 MASKDWORD, tmpreg);
1638
1639 i = 0;
1640 do {
1641 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80000000);
1642 rtl_set_bbreg(hw, apk_offset[path],
1643 MASKDWORD, apk_value[0]);
1644 RTPRINT(rtlpriv, FINIT, INIT_IQK,
1645 ("PHY_APCalibrate() offset 0x%x "
1646 "value 0x%x\n",
1647 apk_offset[path],
1648 rtl_get_bbreg(hw, apk_offset[path],
1649 MASKDWORD)));
1650
1651 mdelay(3);
1652
1653 rtl_set_bbreg(hw, apk_offset[path],
1654 MASKDWORD, apk_value[1]);
1655 RTPRINT(rtlpriv, FINIT, INIT_IQK,
1656 ("PHY_APCalibrate() offset 0x%x "
1657 "value 0x%x\n",
1658 apk_offset[path],
1659 rtl_get_bbreg(hw, apk_offset[path],
1660 MASKDWORD)));
1661
1662 mdelay(20);
1663
1664 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
1665
1666 if (path == RF90_PATH_A)
1667 tmpreg = rtl_get_bbreg(hw, 0xbd8,
1668 0x03E00000);
1669 else
1670 tmpreg = rtl_get_bbreg(hw, 0xbd8,
1671 0xF8000000);
1672
1673 RTPRINT(rtlpriv, FINIT, INIT_IQK,
1674 ("PHY_APCalibrate() offset "
1675 "0xbd8[25:21] %x\n", tmpreg));
1676
1677 i++;
1678
1679 } while (tmpreg > apkbound && i < 4);
1680
1681 apk_result[path][index] = tmpreg;
1682 }
1683 }
1684
1685 _rtl92c_phy_reload_mac_registers(hw, mac_reg, mac_backup);
1686
1687 for (index = 0; index < APK_BB_REG_NUM; index++) {
1688 if (index == 0)
1689 continue;
1690 rtl_set_bbreg(hw, bb_reg[index], MASKDWORD, bb_backup[index]);
1691 }
1692
1693 _rtl92c_phy_reload_adda_registers(hw, afe_reg, afe_backup, 16);
1694
1695 for (path = 0; path < pathbound; path++) {
1696 rtl_set_rfreg(hw, (enum radio_path)path, 0xd,
1697 MASKDWORD, reg_d[path]);
1698
1699 if (path == RF90_PATH_B) {
1700 rtl_set_rfreg(hw, RF90_PATH_A, 0x10, MASKDWORD,
1701 0x1000f);
1702 rtl_set_rfreg(hw, RF90_PATH_A, 0x11, MASKDWORD,
1703 0x20101);
1704 }
1705
1706 if (apk_result[path][1] > 6)
1707 apk_result[path][1] = 6;
1708 }
1709
1710 for (path = 0; path < pathbound; path++) {
1711 rtl_set_rfreg(hw, (enum radio_path)path, 0x3, MASKDWORD,
1712 ((apk_result[path][1] << 15) |
1713 (apk_result[path][1] << 10) |
1714 (apk_result[path][1] << 5) |
1715 apk_result[path][1]));
1716
1717 if (path == RF90_PATH_A)
1718 rtl_set_rfreg(hw, (enum radio_path)path, 0x4, MASKDWORD,
1719 ((apk_result[path][1] << 15) |
1720 (apk_result[path][1] << 10) |
1721 (0x00 << 5) | 0x05));
1722 else
1723 rtl_set_rfreg(hw, (enum radio_path)path, 0x4, MASKDWORD,
1724 ((apk_result[path][1] << 15) |
1725 (apk_result[path][1] << 10) |
1726 (0x02 << 5) | 0x05));
1727
1728 rtl_set_rfreg(hw, (enum radio_path)path, 0xe, MASKDWORD,
1729 ((0x08 << 15) | (0x08 << 10) | (0x08 << 5) |
1730 0x08));
1731
1732 }
1733
1734 rtlphy->apk_done = true;
1735#endif
1736}
1737
1738static void _rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw,
1739 bool bmain, bool is2t)
1740{
1741 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1742
1743 if (is_hal_stop(rtlhal)) {
1744 rtl_set_bbreg(hw, REG_LEDCFG0, BIT(23), 0x01);
1745 rtl_set_bbreg(hw, rFPGA0_XAB_RFPARAMETER, BIT(13), 0x01);
1746 }
1747 if (is2t) {
1748 if (bmain)
1749 rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
1750 BIT(5) | BIT(6), 0x1);
1751 else
1752 rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
1753 BIT(5) | BIT(6), 0x2);
1754 } else {
1755 if (bmain)
1756 rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x2);
1757 else
1758 rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x1);
1759
1760 }
1761}
1762
1763#undef IQK_ADDA_REG_NUM
1764#undef IQK_DELAY_TIME
1765
1766void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery)
1767{
1768 struct rtl_priv *rtlpriv = rtl_priv(hw);
1769 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1770 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1771
1772 long result[4][8];
1773 u8 i, final_candidate;
1774 bool patha_ok, pathb_ok;
1775 long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
1776 reg_ecc, reg_tmp = 0;
1777 bool is12simular, is13simular, is23simular;
1778 bool start_conttx = false, singletone = false;
1779 u32 iqk_bb_reg[10] = {
1780 ROFDM0_XARXIQIMBALANCE,
1781 ROFDM0_XBRXIQIMBALANCE,
1782 ROFDM0_ECCATHRESHOLD,
1783 ROFDM0_AGCRSSITABLE,
1784 ROFDM0_XATXIQIMBALANCE,
1785 ROFDM0_XBTXIQIMBALANCE,
1786 ROFDM0_XCTXIQIMBALANCE,
1787 ROFDM0_XCTXAFE,
1788 ROFDM0_XDTXAFE,
1789 ROFDM0_RXIQEXTANTA
1790 };
1791
1792 if (recovery) {
1793 _rtl92c_phy_reload_adda_registers(hw,
1794 iqk_bb_reg,
1795 rtlphy->iqk_bb_backup, 10);
1796 return;
1797 }
1798 if (start_conttx || singletone)
1799 return;
1800 for (i = 0; i < 8; i++) {
1801 result[0][i] = 0;
1802 result[1][i] = 0;
1803 result[2][i] = 0;
1804 result[3][i] = 0;
1805 }
1806 final_candidate = 0xff;
1807 patha_ok = false;
1808 pathb_ok = false;
1809 is12simular = false;
1810 is23simular = false;
1811 is13simular = false;
1812 for (i = 0; i < 3; i++) {
1813 if (IS_92C_SERIAL(rtlhal->version))
1814 _rtl92c_phy_iq_calibrate(hw, result, i, true);
1815 else
1816 _rtl92c_phy_iq_calibrate(hw, result, i, false);
1817 if (i == 1) {
1818 is12simular = _rtl92c_phy_simularity_compare(hw,
1819 result, 0,
1820 1);
1821 if (is12simular) {
1822 final_candidate = 0;
1823 break;
1824 }
1825 }
1826 if (i == 2) {
1827 is13simular = _rtl92c_phy_simularity_compare(hw,
1828 result, 0,
1829 2);
1830 if (is13simular) {
1831 final_candidate = 0;
1832 break;
1833 }
1834 is23simular = _rtl92c_phy_simularity_compare(hw,
1835 result, 1,
1836 2);
1837 if (is23simular)
1838 final_candidate = 1;
1839 else {
1840 for (i = 0; i < 8; i++)
1841 reg_tmp += result[3][i];
1842
1843 if (reg_tmp != 0)
1844 final_candidate = 3;
1845 else
1846 final_candidate = 0xFF;
1847 }
1848 }
1849 }
1850 for (i = 0; i < 4; i++) {
1851 reg_e94 = result[i][0];
1852 reg_e9c = result[i][1];
1853 reg_ea4 = result[i][2];
1854 reg_eac = result[i][3];
1855 reg_eb4 = result[i][4];
1856 reg_ebc = result[i][5];
1857 reg_ec4 = result[i][6];
1858 reg_ecc = result[i][7];
1859 }
1860 if (final_candidate != 0xff) {
1861 rtlphy->reg_e94 = reg_e94 = result[final_candidate][0];
1862 rtlphy->reg_e9c = reg_e9c = result[final_candidate][1];
1863 reg_ea4 = result[final_candidate][2];
1864 reg_eac = result[final_candidate][3];
1865 rtlphy->reg_eb4 = reg_eb4 = result[final_candidate][4];
1866 rtlphy->reg_ebc = reg_ebc = result[final_candidate][5];
1867 reg_ec4 = result[final_candidate][6];
1868 reg_ecc = result[final_candidate][7];
1869 patha_ok = pathb_ok = true;
1870 } else {
1871 rtlphy->reg_e94 = rtlphy->reg_eb4 = 0x100;
1872 rtlphy->reg_e9c = rtlphy->reg_ebc = 0x0;
1873 }
1874 if (reg_e94 != 0) /*&&(reg_ea4 != 0) */
1875 _rtl92c_phy_path_a_fill_iqk_matrix(hw, patha_ok, result,
1876 final_candidate,
1877 (reg_ea4 == 0));
1878 if (IS_92C_SERIAL(rtlhal->version)) {
1879 if (reg_eb4 != 0) /*&&(reg_ec4 != 0) */
1880 _rtl92c_phy_path_b_fill_iqk_matrix(hw, pathb_ok,
1881 result,
1882 final_candidate,
1883 (reg_ec4 == 0));
1884 }
1885 _rtl92c_phy_save_adda_registers(hw, iqk_bb_reg,
1886 rtlphy->iqk_bb_backup, 10);
1887}
1888EXPORT_SYMBOL(rtl92c_phy_iq_calibrate);
1889
1890void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw)
1891{
1892 struct rtl_priv *rtlpriv = rtl_priv(hw);
1893 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1894 bool start_conttx = false, singletone = false;
1895
1896 if (start_conttx || singletone)
1897 return;
1898 if (IS_92C_SERIAL(rtlhal->version))
1899 rtlpriv->cfg->ops->phy_lc_calibrate(hw, true);
1900 else
1901 rtlpriv->cfg->ops->phy_lc_calibrate(hw, false);
1902}
1903EXPORT_SYMBOL(rtl92c_phy_lc_calibrate);
1904
1905void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta)
1906{
1907 struct rtl_priv *rtlpriv = rtl_priv(hw);
1908 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1909 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1910
1911 if (rtlphy->apk_done)
1912 return;
1913 if (IS_92C_SERIAL(rtlhal->version))
1914 _rtl92c_phy_ap_calibrate(hw, delta, true);
1915 else
1916 _rtl92c_phy_ap_calibrate(hw, delta, false);
1917}
1918EXPORT_SYMBOL(rtl92c_phy_ap_calibrate);
1919
1920void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain)
1921{
1922 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1923
1924 if (IS_92C_SERIAL(rtlhal->version))
1925 _rtl92c_phy_set_rfpath_switch(hw, bmain, true);
1926 else
1927 _rtl92c_phy_set_rfpath_switch(hw, bmain, false);
1928}
1929EXPORT_SYMBOL(rtl92c_phy_set_rfpath_switch);
1930
1931bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
1932{
1933 struct rtl_priv *rtlpriv = rtl_priv(hw);
1934 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1935 bool postprocessing = false;
1936
1937 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
1938 ("-->IO Cmd(%#x), set_io_inprogress(%d)\n",
1939 iotype, rtlphy->set_io_inprogress));
1940 do {
1941 switch (iotype) {
1942 case IO_CMD_RESUME_DM_BY_SCAN:
1943 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
1944 ("[IO CMD] Resume DM after scan.\n"));
1945 postprocessing = true;
1946 break;
1947 case IO_CMD_PAUSE_DM_BY_SCAN:
1948 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
1949 ("[IO CMD] Pause DM before scan.\n"));
1950 postprocessing = true;
1951 break;
1952 default:
1953 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1954 ("switch case not process\n"));
1955 break;
1956 }
1957 } while (false);
1958 if (postprocessing && !rtlphy->set_io_inprogress) {
1959 rtlphy->set_io_inprogress = true;
1960 rtlphy->current_io_type = iotype;
1961 } else {
1962 return false;
1963 }
1964 rtl92c_phy_set_io(hw);
1965 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, ("<--IO Type(%#x)\n", iotype));
1966 return true;
1967}
1968EXPORT_SYMBOL(rtl92c_phy_set_io_cmd);
1969
1970void rtl92c_phy_set_io(struct ieee80211_hw *hw)
1971{
1972 struct rtl_priv *rtlpriv = rtl_priv(hw);
1973 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1974
1975 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
1976 ("--->Cmd(%#x), set_io_inprogress(%d)\n",
1977 rtlphy->current_io_type, rtlphy->set_io_inprogress));
1978 switch (rtlphy->current_io_type) {
1979 case IO_CMD_RESUME_DM_BY_SCAN:
1980 dm_digtable.cur_igvalue = rtlphy->initgain_backup.xaagccore1;
1981 rtl92c_dm_write_dig(hw);
1982 rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
1983 break;
1984 case IO_CMD_PAUSE_DM_BY_SCAN:
1985 rtlphy->initgain_backup.xaagccore1 = dm_digtable.cur_igvalue;
1986 dm_digtable.cur_igvalue = 0x17;
1987 rtl92c_dm_write_dig(hw);
1988 break;
1989 default:
1990 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1991 ("switch case not process\n"));
1992 break;
1993 }
1994 rtlphy->set_io_inprogress = false;
1995 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
1996 ("<---(%#x)\n", rtlphy->current_io_type));
1997}
1998EXPORT_SYMBOL(rtl92c_phy_set_io);
1999
2000void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw)
2001{
2002 struct rtl_priv *rtlpriv = rtl_priv(hw);
2003
2004 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
2005 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
2006 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
2007 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
2008 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
2009 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
2010}
2011EXPORT_SYMBOL(rtl92ce_phy_set_rf_on);
2012
2013void _rtl92c_phy_set_rf_sleep(struct ieee80211_hw *hw)
2014{
2015 u32 u4b_tmp;
2016 u8 delay = 5;
2017 struct rtl_priv *rtlpriv = rtl_priv(hw);
2018
2019 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
2020 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
2021 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
2022 u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
2023 while (u4b_tmp != 0 && delay > 0) {
2024 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0);
2025 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
2026 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
2027 u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
2028 delay--;
2029 }
2030 if (delay == 0) {
2031 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
2032 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
2033 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
2034 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
2035 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
2036 ("Switch RF timeout !!!.\n"));
2037 return;
2038 }
2039 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
2040 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
2041}
2042EXPORT_SYMBOL(_rtl92c_phy_set_rf_sleep);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
new file mode 100644
index 000000000000..53ffb0981586
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
@@ -0,0 +1,246 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92C_PHY_H__
31#define __RTL92C_PHY_H__
32
33#define MAX_PRECMD_CNT 16
34#define MAX_RFDEPENDCMD_CNT 16
35#define MAX_POSTCMD_CNT 16
36
37#define MAX_DOZE_WAITING_TIMES_9x 64
38
39#define RT_CANNOT_IO(hw) false
40#define HIGHPOWER_RADIOA_ARRAYLEN 22
41
42#define MAX_TOLERANCE 5
43#define IQK_DELAY_TIME 1
44
45#define APK_BB_REG_NUM 5
46#define APK_AFE_REG_NUM 16
47#define APK_CURVE_REG_NUM 4
48#define PATH_NUM 2
49
50#define LOOP_LIMIT 5
51#define MAX_STALL_TIME 50
52#define AntennaDiversityValue 0x80
53#define MAX_TXPWR_IDX_NMODE_92S 63
54#define Reset_Cnt_Limit 3
55
56#define IQK_ADDA_REG_NUM 16
57#define IQK_MAC_REG_NUM 4
58
59#define RF90_PATH_MAX 2
60
61#define CT_OFFSET_MAC_ADDR 0X16
62
63#define CT_OFFSET_CCK_TX_PWR_IDX 0x5A
64#define CT_OFFSET_HT401S_TX_PWR_IDX 0x60
65#define CT_OFFSET_HT402S_TX_PWR_IDX_DIF 0x66
66#define CT_OFFSET_HT20_TX_PWR_IDX_DIFF 0x69
67#define CT_OFFSET_OFDM_TX_PWR_IDX_DIFF 0x6C
68
69#define CT_OFFSET_HT40_MAX_PWR_OFFSET 0x6F
70#define CT_OFFSET_HT20_MAX_PWR_OFFSET 0x72
71
72#define CT_OFFSET_CHANNEL_PLAH 0x75
73#define CT_OFFSET_THERMAL_METER 0x78
74#define CT_OFFSET_RF_OPTION 0x79
75#define CT_OFFSET_VERSION 0x7E
76#define CT_OFFSET_CUSTOMER_ID 0x7F
77
78#define RTL92C_MAX_PATH_NUM 2
79#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER 255
80enum swchnlcmd_id {
81 CMDID_END,
82 CMDID_SET_TXPOWEROWER_LEVEL,
83 CMDID_BBREGWRITE10,
84 CMDID_WRITEPORT_ULONG,
85 CMDID_WRITEPORT_USHORT,
86 CMDID_WRITEPORT_UCHAR,
87 CMDID_RF_WRITEREG,
88};
89
90struct swchnlcmd {
91 enum swchnlcmd_id cmdid;
92 u32 para1;
93 u32 para2;
94 u32 msdelay;
95};
96
97enum hw90_block_e {
98 HW90_BLOCK_MAC = 0,
99 HW90_BLOCK_PHY0 = 1,
100 HW90_BLOCK_PHY1 = 2,
101 HW90_BLOCK_RF = 3,
102 HW90_BLOCK_MAXIMUM = 4,
103};
104
105enum baseband_config_type {
106 BASEBAND_CONFIG_PHY_REG = 0,
107 BASEBAND_CONFIG_AGC_TAB = 1,
108};
109
110enum ra_offset_area {
111 RA_OFFSET_LEGACY_OFDM1,
112 RA_OFFSET_LEGACY_OFDM2,
113 RA_OFFSET_HT_OFDM1,
114 RA_OFFSET_HT_OFDM2,
115 RA_OFFSET_HT_OFDM3,
116 RA_OFFSET_HT_OFDM4,
117 RA_OFFSET_HT_CCK,
118};
119
120enum antenna_path {
121 ANTENNA_NONE,
122 ANTENNA_D,
123 ANTENNA_C,
124 ANTENNA_CD,
125 ANTENNA_B,
126 ANTENNA_BD,
127 ANTENNA_BC,
128 ANTENNA_BCD,
129 ANTENNA_A,
130 ANTENNA_AD,
131 ANTENNA_AC,
132 ANTENNA_ACD,
133 ANTENNA_AB,
134 ANTENNA_ABD,
135 ANTENNA_ABC,
136 ANTENNA_ABCD
137};
138
139struct r_antenna_select_ofdm {
140 u32 r_tx_antenna:4;
141 u32 r_ant_l:4;
142 u32 r_ant_non_ht:4;
143 u32 r_ant_ht1:4;
144 u32 r_ant_ht2:4;
145 u32 r_ant_ht_s1:4;
146 u32 r_ant_non_ht_s1:4;
147 u32 ofdm_txsc:2;
148 u32 reserved:2;
149};
150
151struct r_antenna_select_cck {
152 u8 r_cckrx_enable_2:2;
153 u8 r_cckrx_enable:2;
154 u8 r_ccktx_enable:4;
155};
156
157struct efuse_contents {
158 u8 mac_addr[ETH_ALEN];
159 u8 cck_tx_power_idx[6];
160 u8 ht40_1s_tx_power_idx[6];
161 u8 ht40_2s_tx_power_idx_diff[3];
162 u8 ht20_tx_power_idx_diff[3];
163 u8 ofdm_tx_power_idx_diff[3];
164 u8 ht40_max_power_offset[3];
165 u8 ht20_max_power_offset[3];
166 u8 channel_plan;
167 u8 thermal_meter;
168 u8 rf_option[5];
169 u8 version;
170 u8 oem_id;
171 u8 regulatory;
172};
173
174struct tx_power_struct {
175 u8 cck[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
176 u8 ht40_1s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
177 u8 ht40_2s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
178 u8 ht20_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
179 u8 legacy_ht_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
180 u8 legacy_ht_txpowerdiff;
181 u8 groupht20[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
182 u8 groupht40[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
183 u8 pwrgroup_cnt;
184 u32 mcs_original_offset[4][16];
185};
186
187extern u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw,
188 u32 regaddr, u32 bitmask);
189extern void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
190 u32 regaddr, u32 bitmask, u32 data);
191extern u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
192 enum radio_path rfpath, u32 regaddr,
193 u32 bitmask);
194extern void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw,
195 enum radio_path rfpath, u32 regaddr,
196 u32 bitmask, u32 data);
197extern bool rtl92c_phy_mac_config(struct ieee80211_hw *hw);
198extern bool rtl92c_phy_bb_config(struct ieee80211_hw *hw);
199extern bool rtl92c_phy_rf_config(struct ieee80211_hw *hw);
200extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
201 enum radio_path rfpath);
202extern void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
203extern void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw,
204 long *powerlevel);
205extern void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
206extern bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw,
207 long power_indbm);
208extern void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw,
209 u8 operation);
210extern void rtl92c_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
211extern void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
212 enum nl80211_channel_type ch_type);
213extern void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw);
214extern u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw);
215extern void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
216extern void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw,
217 u16 beaconinterval);
218void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
219void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
220void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
221bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
222 enum radio_path rfpath);
223extern bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw,
224 u32 rfpath);
225extern bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw,
226 enum rf_pwrstate rfpwr_state);
227void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw);
228void rtl92c_phy_set_io(struct ieee80211_hw *hw);
229void rtl92c_bb_block_on(struct ieee80211_hw *hw);
230u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
231long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
232 enum wireless_mode wirelessmode,
233 u8 txpwridx);
234u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
235 enum wireless_mode wirelessmode,
236 long power_indbm);
237void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
238static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
239 u32 cmdtableidx, u32 cmdtablesz,
240 enum swchnlcmd_id cmdid, u32 para1,
241 u32 para2, u32 msdelay);
242static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
243 u8 channel, u8 *stage, u8 *step,
244 u32 *delay);
245
246#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile b/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile
index 0f0be7c763b8..c0cb0cfe7d37 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile
@@ -1,6 +1,5 @@
1rtl8192ce-objs := \ 1rtl8192ce-objs := \
2 dm.o \ 2 dm.o \
3 fw.o \
4 hw.o \ 3 hw.o \
5 led.o \ 4 led.o \
6 phy.o \ 5 phy.o \
@@ -10,3 +9,5 @@ rtl8192ce-objs := \
10 trx.o 9 trx.o
11 10
12obj-$(CONFIG_RTL8192CE) += rtl8192ce.o 11obj-$(CONFIG_RTL8192CE) += rtl8192ce.o
12
13ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
index 83cd64895292..2f577c8828fc 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
@@ -121,11 +121,37 @@
121#define CHIP_92C 0x01 121#define CHIP_92C 0x01
122#define CHIP_88C 0x00 122#define CHIP_88C 0x00
123 123
124/* Add vendor information into chip version definition.
125 * Add UMC B-Cut and RTL8723 chip info definition.
126 *
127 * BIT 7 Reserved
128 * BIT 6 UMC BCut
129 * BIT 5 Manufacturer(TSMC/UMC)
130 * BIT 4 TEST/NORMAL
131 * BIT 3 8723 Version
132 * BIT 2 8723?
133 * BIT 1 1T2R?
134 * BIT 0 88C/92C
135*/
136
124enum version_8192c { 137enum version_8192c {
125 VERSION_A_CHIP_92C = 0x01, 138 VERSION_A_CHIP_92C = 0x01,
126 VERSION_A_CHIP_88C = 0x00, 139 VERSION_A_CHIP_88C = 0x00,
127 VERSION_B_CHIP_92C = 0x11, 140 VERSION_B_CHIP_92C = 0x11,
128 VERSION_B_CHIP_88C = 0x10, 141 VERSION_B_CHIP_88C = 0x10,
142 VERSION_TEST_CHIP_88C = 0x00,
143 VERSION_TEST_CHIP_92C = 0x01,
144 VERSION_NORMAL_TSMC_CHIP_88C = 0x10,
145 VERSION_NORMAL_TSMC_CHIP_92C = 0x11,
146 VERSION_NORMAL_TSMC_CHIP_92C_1T2R = 0x13,
147 VERSION_NORMAL_UMC_CHIP_88C_A_CUT = 0x30,
148 VERSION_NORMAL_UMC_CHIP_92C_A_CUT = 0x31,
149 VERSION_NORMAL_UMC_CHIP_92C_1T2R_A_CUT = 0x33,
150 VERSION_NORMA_UMC_CHIP_8723_1T1R_A_CUT = 0x34,
151 VERSION_NORMA_UMC_CHIP_8723_1T1R_B_CUT = 0x3c,
152 VERSION_NORMAL_UMC_CHIP_88C_B_CUT = 0x70,
153 VERSION_NORMAL_UMC_CHIP_92C_B_CUT = 0x71,
154 VERSION_NORMAL_UMC_CHIP_92C_1T2R_B_CUT = 0x73,
129 VERSION_UNKNOWN = 0x88, 155 VERSION_UNKNOWN = 0x88,
130}; 156};
131 157
@@ -254,4 +280,122 @@ struct h2c_cmd_8192c {
254 u8 *p_cmdbuffer; 280 u8 *p_cmdbuffer;
255}; 281};
256 282
283static inline u8 _rtl92c_get_chnl_group(u8 chnl)
284{
285 u8 group = 0;
286
287 if (chnl < 3)
288 group = 0;
289 else if (chnl < 9)
290 group = 1;
291 else
292 group = 2;
293
294 return group;
295}
296
297/* NOTE: reference to rtl8192c_rates struct */
298static inline int _rtl92c_rate_mapping(struct ieee80211_hw *hw, bool isHT,
299 u8 desc_rate, bool first_ampdu)
300{
301 struct rtl_priv *rtlpriv = rtl_priv(hw);
302 int rate_idx = 0;
303
304 if (first_ampdu) {
305 if (false == isHT) {
306 switch (desc_rate) {
307 case DESC92C_RATE1M:
308 rate_idx = 0;
309 break;
310 case DESC92C_RATE2M:
311 rate_idx = 1;
312 break;
313 case DESC92C_RATE5_5M:
314 rate_idx = 2;
315 break;
316 case DESC92C_RATE11M:
317 rate_idx = 3;
318 break;
319 case DESC92C_RATE6M:
320 rate_idx = 4;
321 break;
322 case DESC92C_RATE9M:
323 rate_idx = 5;
324 break;
325 case DESC92C_RATE12M:
326 rate_idx = 6;
327 break;
328 case DESC92C_RATE18M:
329 rate_idx = 7;
330 break;
331 case DESC92C_RATE24M:
332 rate_idx = 8;
333 break;
334 case DESC92C_RATE36M:
335 rate_idx = 9;
336 break;
337 case DESC92C_RATE48M:
338 rate_idx = 10;
339 break;
340 case DESC92C_RATE54M:
341 rate_idx = 11;
342 break;
343 default:
344 RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
345 ("Rate %d is not support, set to "
346 "1M rate.\n", desc_rate));
347 rate_idx = 0;
348 break;
349 }
350 } else {
351 rate_idx = 11;
352 }
353 return rate_idx;
354 }
355 switch (desc_rate) {
356 case DESC92C_RATE1M:
357 rate_idx = 0;
358 break;
359 case DESC92C_RATE2M:
360 rate_idx = 1;
361 break;
362 case DESC92C_RATE5_5M:
363 rate_idx = 2;
364 break;
365 case DESC92C_RATE11M:
366 rate_idx = 3;
367 break;
368 case DESC92C_RATE6M:
369 rate_idx = 4;
370 break;
371 case DESC92C_RATE9M:
372 rate_idx = 5;
373 break;
374 case DESC92C_RATE12M:
375 rate_idx = 6;
376 break;
377 case DESC92C_RATE18M:
378 rate_idx = 7;
379 break;
380 case DESC92C_RATE24M:
381 rate_idx = 8;
382 break;
383 case DESC92C_RATE36M:
384 rate_idx = 9;
385 break;
386 case DESC92C_RATE48M:
387 rate_idx = 10;
388 break;
389 case DESC92C_RATE54M:
390 rate_idx = 11;
391 break;
392 /* TODO: How to mapping MCS rate? */
393 /* NOTE: referenc to __ieee80211_rx */
394 default:
395 rate_idx = 11;
396 break;
397 }
398 return rate_idx;
399}
400
257#endif 401#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
index 62e7c64e087b..7d76504df4d1 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
@@ -33,487 +33,15 @@
33#include "def.h" 33#include "def.h"
34#include "phy.h" 34#include "phy.h"
35#include "dm.h" 35#include "dm.h"
36#include "fw.h"
37 36
38struct dig_t dm_digtable; 37void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw)
39static struct ps_t dm_pstable;
40
41static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = {
42 0x7f8001fe,
43 0x788001e2,
44 0x71c001c7,
45 0x6b8001ae,
46 0x65400195,
47 0x5fc0017f,
48 0x5a400169,
49 0x55400155,
50 0x50800142,
51 0x4c000130,
52 0x47c0011f,
53 0x43c0010f,
54 0x40000100,
55 0x3c8000f2,
56 0x390000e4,
57 0x35c000d7,
58 0x32c000cb,
59 0x300000c0,
60 0x2d4000b5,
61 0x2ac000ab,
62 0x288000a2,
63 0x26000098,
64 0x24000090,
65 0x22000088,
66 0x20000080,
67 0x1e400079,
68 0x1c800072,
69 0x1b00006c,
70 0x19800066,
71 0x18000060,
72 0x16c0005b,
73 0x15800056,
74 0x14400051,
75 0x1300004c,
76 0x12000048,
77 0x11000044,
78 0x10000040,
79};
80
81static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
82 {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04},
83 {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04},
84 {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03},
85 {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03},
86 {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03},
87 {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03},
88 {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03},
89 {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03},
90 {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02},
91 {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02},
92 {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02},
93 {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02},
94 {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02},
95 {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02},
96 {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02},
97 {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02},
98 {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01},
99 {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02},
100 {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01},
101 {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
102 {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
103 {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01},
104 {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01},
105 {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01},
106 {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01},
107 {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01},
108 {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01},
109 {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01},
110 {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01},
111 {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01},
112 {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01},
113 {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01},
114 {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}
115};
116
117static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
118 {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00},
119 {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00},
120 {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00},
121 {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00},
122 {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00},
123 {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00},
124 {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00},
125 {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00},
126 {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00},
127 {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00},
128 {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00},
129 {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00},
130 {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00},
131 {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00},
132 {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00},
133 {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00},
134 {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00},
135 {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00},
136 {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00},
137 {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
138 {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
139 {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00},
140 {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00},
141 {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
142 {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
143 {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00},
144 {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
145 {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
146 {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
147 {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
148 {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
149 {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
150 {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}
151};
152
153static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
154{
155 dm_digtable.dig_enable_flag = true;
156 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
157 dm_digtable.cur_igvalue = 0x20;
158 dm_digtable.pre_igvalue = 0x0;
159 dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
160 dm_digtable.presta_connectstate = DIG_STA_DISCONNECT;
161 dm_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
162 dm_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW;
163 dm_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH;
164 dm_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
165 dm_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
166 dm_digtable.rx_gain_range_max = DM_DIG_MAX;
167 dm_digtable.rx_gain_range_min = DM_DIG_MIN;
168 dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
169 dm_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX;
170 dm_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN;
171 dm_digtable.pre_cck_pd_state = CCK_PD_STAGE_MAX;
172 dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
173}
174
175static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
176{
177 struct rtl_priv *rtlpriv = rtl_priv(hw);
178 long rssi_val_min = 0;
179
180 if ((dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) &&
181 (dm_digtable.cursta_connectctate == DIG_STA_CONNECT)) {
182 if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0)
183 rssi_val_min =
184 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb >
185 rtlpriv->dm.undecorated_smoothed_pwdb) ?
186 rtlpriv->dm.undecorated_smoothed_pwdb :
187 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
188 else
189 rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
190 } else if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT ||
191 dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT) {
192 rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
193 } else if (dm_digtable.curmultista_connectstate ==
194 DIG_MULTISTA_CONNECT) {
195 rssi_val_min = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
196 }
197
198 return (u8) rssi_val_min;
199}
200
201static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
202{
203 u32 ret_value;
204 struct rtl_priv *rtlpriv = rtl_priv(hw);
205 struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
206
207 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD);
208 falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16);
209
210 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD);
211 falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff);
212 falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16);
213
214 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
215 falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
216 falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
217 falsealm_cnt->cnt_rate_illegal +
218 falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail;
219
220 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(14), 1);
221 ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0);
222 falsealm_cnt->cnt_cck_fail = ret_value;
223
224 ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3);
225 falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
226 falsealm_cnt->cnt_all = (falsealm_cnt->cnt_parity_fail +
227 falsealm_cnt->cnt_rate_illegal +
228 falsealm_cnt->cnt_crc8_fail +
229 falsealm_cnt->cnt_mcs_fail +
230 falsealm_cnt->cnt_cck_fail);
231
232 rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1);
233 rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0);
234 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0);
235 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
236
237 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
238 ("cnt_parity_fail = %d, cnt_rate_illegal = %d, "
239 "cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
240 falsealm_cnt->cnt_parity_fail,
241 falsealm_cnt->cnt_rate_illegal,
242 falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail));
243
244 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
245 ("cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
246 falsealm_cnt->cnt_ofdm_fail,
247 falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all));
248}
249
250static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
251{
252 struct rtl_priv *rtlpriv = rtl_priv(hw);
253 u8 value_igi = dm_digtable.cur_igvalue;
254
255 if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0)
256 value_igi--;
257 else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH1)
258 value_igi += 0;
259 else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH2)
260 value_igi++;
261 else if (rtlpriv->falsealm_cnt.cnt_all >= DM_DIG_FA_TH2)
262 value_igi += 2;
263 if (value_igi > DM_DIG_FA_UPPER)
264 value_igi = DM_DIG_FA_UPPER;
265 else if (value_igi < DM_DIG_FA_LOWER)
266 value_igi = DM_DIG_FA_LOWER;
267 if (rtlpriv->falsealm_cnt.cnt_all > 10000)
268 value_igi = 0x32;
269
270 dm_digtable.cur_igvalue = value_igi;
271 rtl92c_dm_write_dig(hw);
272}
273
274static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
275{
276 struct rtl_priv *rtlpriv = rtl_priv(hw);
277
278 if (rtlpriv->falsealm_cnt.cnt_all > dm_digtable.fa_highthresh) {
279 if ((dm_digtable.backoff_val - 2) <
280 dm_digtable.backoff_val_range_min)
281 dm_digtable.backoff_val =
282 dm_digtable.backoff_val_range_min;
283 else
284 dm_digtable.backoff_val -= 2;
285 } else if (rtlpriv->falsealm_cnt.cnt_all < dm_digtable.fa_lowthresh) {
286 if ((dm_digtable.backoff_val + 2) >
287 dm_digtable.backoff_val_range_max)
288 dm_digtable.backoff_val =
289 dm_digtable.backoff_val_range_max;
290 else
291 dm_digtable.backoff_val += 2;
292 }
293
294 if ((dm_digtable.rssi_val_min + 10 - dm_digtable.backoff_val) >
295 dm_digtable.rx_gain_range_max)
296 dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_max;
297 else if ((dm_digtable.rssi_val_min + 10 -
298 dm_digtable.backoff_val) < dm_digtable.rx_gain_range_min)
299 dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_min;
300 else
301 dm_digtable.cur_igvalue = dm_digtable.rssi_val_min + 10 -
302 dm_digtable.backoff_val;
303
304 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
305 ("rssi_val_min = %x backoff_val %x\n",
306 dm_digtable.rssi_val_min, dm_digtable.backoff_val));
307
308 rtl92c_dm_write_dig(hw);
309}
310
311static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
312{
313 static u8 binitialized; /* initialized to false */
314 struct rtl_priv *rtlpriv = rtl_priv(hw);
315 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
316 long rssi_strength = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
317 bool b_multi_sta = false;
318
319 if (mac->opmode == NL80211_IFTYPE_ADHOC)
320 b_multi_sta = true;
321
322 if ((b_multi_sta == false) || (dm_digtable.cursta_connectctate !=
323 DIG_STA_DISCONNECT)) {
324 binitialized = false;
325 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
326 return;
327 } else if (binitialized == false) {
328 binitialized = true;
329 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
330 dm_digtable.cur_igvalue = 0x20;
331 rtl92c_dm_write_dig(hw);
332 }
333
334 if (dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) {
335 if ((rssi_strength < dm_digtable.rssi_lowthresh) &&
336 (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) {
337
338 if (dm_digtable.dig_ext_port_stage ==
339 DIG_EXT_PORT_STAGE_2) {
340 dm_digtable.cur_igvalue = 0x20;
341 rtl92c_dm_write_dig(hw);
342 }
343
344 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_1;
345 } else if (rssi_strength > dm_digtable.rssi_highthresh) {
346 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_2;
347 rtl92c_dm_ctrl_initgain_by_fa(hw);
348 }
349 } else if (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_0) {
350 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
351 dm_digtable.cur_igvalue = 0x20;
352 rtl92c_dm_write_dig(hw);
353 }
354
355 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
356 ("curmultista_connectstate = "
357 "%x dig_ext_port_stage %x\n",
358 dm_digtable.curmultista_connectstate,
359 dm_digtable.dig_ext_port_stage));
360}
361
362static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
363{
364 struct rtl_priv *rtlpriv = rtl_priv(hw);
365
366 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
367 ("presta_connectstate = %x,"
368 " cursta_connectctate = %x\n",
369 dm_digtable.presta_connectstate,
370 dm_digtable.cursta_connectctate));
371
372 if (dm_digtable.presta_connectstate == dm_digtable.cursta_connectctate
373 || dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT
374 || dm_digtable.cursta_connectctate == DIG_STA_CONNECT) {
375
376 if (dm_digtable.cursta_connectctate != DIG_STA_DISCONNECT) {
377 dm_digtable.rssi_val_min =
378 rtl92c_dm_initial_gain_min_pwdb(hw);
379 rtl92c_dm_ctrl_initgain_by_rssi(hw);
380 }
381 } else {
382 dm_digtable.rssi_val_min = 0;
383 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
384 dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
385 dm_digtable.cur_igvalue = 0x20;
386 dm_digtable.pre_igvalue = 0;
387 rtl92c_dm_write_dig(hw);
388 }
389}
390
391static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
392{
393 struct rtl_priv *rtlpriv = rtl_priv(hw);
394 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
395
396 if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT) {
397 dm_digtable.rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw);
398
399 if (dm_digtable.pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
400 if (dm_digtable.rssi_val_min <= 25)
401 dm_digtable.cur_cck_pd_state =
402 CCK_PD_STAGE_LowRssi;
403 else
404 dm_digtable.cur_cck_pd_state =
405 CCK_PD_STAGE_HighRssi;
406 } else {
407 if (dm_digtable.rssi_val_min <= 20)
408 dm_digtable.cur_cck_pd_state =
409 CCK_PD_STAGE_LowRssi;
410 else
411 dm_digtable.cur_cck_pd_state =
412 CCK_PD_STAGE_HighRssi;
413 }
414 } else {
415 dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
416 }
417
418 if (dm_digtable.pre_cck_pd_state != dm_digtable.cur_cck_pd_state) {
419 if (dm_digtable.cur_cck_pd_state == CCK_PD_STAGE_LowRssi) {
420 if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800)
421 dm_digtable.cur_cck_fa_state =
422 CCK_FA_STAGE_High;
423 else
424 dm_digtable.cur_cck_fa_state = CCK_FA_STAGE_Low;
425
426 if (dm_digtable.pre_cck_fa_state !=
427 dm_digtable.cur_cck_fa_state) {
428 if (dm_digtable.cur_cck_fa_state ==
429 CCK_FA_STAGE_Low)
430 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
431 0x83);
432 else
433 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
434 0xcd);
435
436 dm_digtable.pre_cck_fa_state =
437 dm_digtable.cur_cck_fa_state;
438 }
439
440 rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x40);
441
442 if (IS_92C_SERIAL(rtlhal->version))
443 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
444 MASKBYTE2, 0xd7);
445 } else {
446 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
447 rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x47);
448
449 if (IS_92C_SERIAL(rtlhal->version))
450 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
451 MASKBYTE2, 0xd3);
452 }
453 dm_digtable.pre_cck_pd_state = dm_digtable.cur_cck_pd_state;
454 }
455
456 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
457 ("CCKPDStage=%x\n", dm_digtable.cur_cck_pd_state));
458
459 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
460 ("is92C=%x\n", IS_92C_SERIAL(rtlhal->version)));
461}
462
463static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
464{
465 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
466
467 if (mac->act_scanning == true)
468 return;
469
470 if ((mac->link_state > MAC80211_NOLINK) &&
471 (mac->link_state < MAC80211_LINKED))
472 dm_digtable.cursta_connectctate = DIG_STA_BEFORE_CONNECT;
473 else if (mac->link_state >= MAC80211_LINKED)
474 dm_digtable.cursta_connectctate = DIG_STA_CONNECT;
475 else
476 dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
477
478 rtl92c_dm_initial_gain_sta(hw);
479 rtl92c_dm_initial_gain_multi_sta(hw);
480 rtl92c_dm_cck_packet_detection_thresh(hw);
481
482 dm_digtable.presta_connectstate = dm_digtable.cursta_connectctate;
483
484}
485
486static void rtl92c_dm_dig(struct ieee80211_hw *hw)
487{
488 struct rtl_priv *rtlpriv = rtl_priv(hw);
489
490 if (rtlpriv->dm.b_dm_initialgain_enable == false)
491 return;
492 if (dm_digtable.dig_enable_flag == false)
493 return;
494
495 rtl92c_dm_ctrl_initgain_by_twoport(hw);
496
497}
498
499static void rtl92c_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
500{
501 struct rtl_priv *rtlpriv = rtl_priv(hw);
502
503 rtlpriv->dm.bdynamic_txpower_enable = false;
504
505 rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
506 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
507}
508
509static void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
510{ 38{
511 struct rtl_priv *rtlpriv = rtl_priv(hw); 39 struct rtl_priv *rtlpriv = rtl_priv(hw);
512 struct rtl_phy *rtlphy = &(rtlpriv->phy); 40 struct rtl_phy *rtlphy = &(rtlpriv->phy);
513 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 41 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
514 long undecorated_smoothed_pwdb; 42 long undecorated_smoothed_pwdb;
515 43
516 if (!rtlpriv->dm.bdynamic_txpower_enable) 44 if (!rtlpriv->dm.dynamic_txpower_enable)
517 return; 45 return;
518 46
519 if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) { 47 if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) {
@@ -583,891 +111,3 @@ static void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
583 111
584 rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl; 112 rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
585} 113}
586
587void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
588{
589 struct rtl_priv *rtlpriv = rtl_priv(hw);
590
591 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
592 ("cur_igvalue = 0x%x, "
593 "pre_igvalue = 0x%x, backoff_val = %d\n",
594 dm_digtable.cur_igvalue, dm_digtable.pre_igvalue,
595 dm_digtable.backoff_val));
596
597 if (dm_digtable.pre_igvalue != dm_digtable.cur_igvalue) {
598 rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
599 dm_digtable.cur_igvalue);
600 rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f,
601 dm_digtable.cur_igvalue);
602
603 dm_digtable.pre_igvalue = dm_digtable.cur_igvalue;
604 }
605}
606
607static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw)
608{
609 struct rtl_priv *rtlpriv = rtl_priv(hw);
610 long tmpentry_max_pwdb = 0, tmpentry_min_pwdb = 0xff;
611
612 u8 h2c_parameter[3] = { 0 };
613
614 return;
615
616 if (tmpentry_max_pwdb != 0) {
617 rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb =
618 tmpentry_max_pwdb;
619 } else {
620 rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = 0;
621 }
622
623 if (tmpentry_min_pwdb != 0xff) {
624 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb =
625 tmpentry_min_pwdb;
626 } else {
627 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = 0;
628 }
629
630 h2c_parameter[2] = (u8) (rtlpriv->dm.undecorated_smoothed_pwdb & 0xFF);
631 h2c_parameter[0] = 0;
632
633 rtl92c_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
634}
635
636void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw)
637{
638 struct rtl_priv *rtlpriv = rtl_priv(hw);
639 rtlpriv->dm.bcurrent_turbo_edca = false;
640 rtlpriv->dm.bis_any_nonbepkts = false;
641 rtlpriv->dm.bis_cur_rdlstate = false;
642}
643
644static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
645{
646 struct rtl_priv *rtlpriv = rtl_priv(hw);
647 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
648 static u64 last_txok_cnt;
649 static u64 last_rxok_cnt;
650 u64 cur_txok_cnt;
651 u64 cur_rxok_cnt;
652 u32 edca_be_ul = 0x5ea42b;
653 u32 edca_be_dl = 0x5ea42b;
654
655 if (mac->opmode == NL80211_IFTYPE_ADHOC)
656 goto dm_checkedcaturbo_exit;
657
658 if (mac->link_state != MAC80211_LINKED) {
659 rtlpriv->dm.bcurrent_turbo_edca = false;
660 return;
661 }
662
663 if (!mac->ht_enable) { /*FIX MERGE */
664 if (!(edca_be_ul & 0xffff0000))
665 edca_be_ul |= 0x005e0000;
666
667 if (!(edca_be_dl & 0xffff0000))
668 edca_be_dl |= 0x005e0000;
669 }
670
671 if ((!rtlpriv->dm.bis_any_nonbepkts) &&
672 (!rtlpriv->dm.b_disable_framebursting)) {
673 cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
674 cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
675 if (cur_rxok_cnt > 4 * cur_txok_cnt) {
676 if (!rtlpriv->dm.bis_cur_rdlstate ||
677 !rtlpriv->dm.bcurrent_turbo_edca) {
678 rtl_write_dword(rtlpriv,
679 REG_EDCA_BE_PARAM,
680 edca_be_dl);
681 rtlpriv->dm.bis_cur_rdlstate = true;
682 }
683 } else {
684 if (rtlpriv->dm.bis_cur_rdlstate ||
685 !rtlpriv->dm.bcurrent_turbo_edca) {
686 rtl_write_dword(rtlpriv,
687 REG_EDCA_BE_PARAM,
688 edca_be_ul);
689 rtlpriv->dm.bis_cur_rdlstate = false;
690 }
691 }
692 rtlpriv->dm.bcurrent_turbo_edca = true;
693 } else {
694 if (rtlpriv->dm.bcurrent_turbo_edca) {
695 u8 tmp = AC0_BE;
696 rtlpriv->cfg->ops->set_hw_reg(hw,
697 HW_VAR_AC_PARAM,
698 (u8 *) (&tmp));
699 rtlpriv->dm.bcurrent_turbo_edca = false;
700 }
701 }
702
703dm_checkedcaturbo_exit:
704 rtlpriv->dm.bis_any_nonbepkts = false;
705 last_txok_cnt = rtlpriv->stats.txbytesunicast;
706 last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
707}
708
709static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
710 *hw)
711{
712 struct rtl_priv *rtlpriv = rtl_priv(hw);
713 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
714 struct rtl_phy *rtlphy = &(rtlpriv->phy);
715 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
716 u8 thermalvalue, delta, delta_lck, delta_iqk;
717 long ele_a, ele_d, temp_cck, val_x, value32;
718 long val_y, ele_c;
719 u8 ofdm_index[2], cck_index, ofdm_index_old[2], cck_index_old;
720 int i;
721 bool is2t = IS_92C_SERIAL(rtlhal->version);
722 u8 txpwr_level[2] = {0, 0};
723 u8 ofdm_min_index = 6, rf;
724
725 rtlpriv->dm.btxpower_trackingInit = true;
726 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
727 ("rtl92c_dm_txpower_tracking_callback_thermalmeter\n"));
728
729 thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0x1f);
730
731 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
732 ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
733 "eeprom_thermalmeter 0x%x\n",
734 thermalvalue, rtlpriv->dm.thermalvalue,
735 rtlefuse->eeprom_thermalmeter));
736
737 rtl92c_phy_ap_calibrate(hw, (thermalvalue -
738 rtlefuse->eeprom_thermalmeter));
739 if (is2t)
740 rf = 2;
741 else
742 rf = 1;
743
744 if (thermalvalue) {
745 ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
746 MASKDWORD) & MASKOFDM_D;
747
748 for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
749 if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
750 ofdm_index_old[0] = (u8) i;
751
752 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
753 ("Initial pathA ele_d reg0x%x = 0x%lx, "
754 "ofdm_index=0x%x\n",
755 ROFDM0_XATXIQIMBALANCE,
756 ele_d, ofdm_index_old[0]));
757 break;
758 }
759 }
760
761 if (is2t) {
762 ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
763 MASKDWORD) & MASKOFDM_D;
764
765 for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
766 if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
767 ofdm_index_old[1] = (u8) i;
768
769 RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
770 DBG_LOUD,
771 ("Initial pathB ele_d reg0x%x = "
772 "0x%lx, ofdm_index=0x%x\n",
773 ROFDM0_XBTXIQIMBALANCE, ele_d,
774 ofdm_index_old[1]));
775 break;
776 }
777 }
778 }
779
780 temp_cck =
781 rtl_get_bbreg(hw, RCCK0_TXFILTER2, MASKDWORD) & MASKCCK;
782
783 for (i = 0; i < CCK_TABLE_LENGTH; i++) {
784 if (rtlpriv->dm.b_cck_inch14) {
785 if (memcmp((void *)&temp_cck,
786 (void *)&cckswing_table_ch14[i][2],
787 4) == 0) {
788 cck_index_old = (u8) i;
789
790 RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
791 DBG_LOUD,
792 ("Initial reg0x%x = 0x%lx, "
793 "cck_index=0x%x, ch 14 %d\n",
794 RCCK0_TXFILTER2, temp_cck,
795 cck_index_old,
796 rtlpriv->dm.b_cck_inch14));
797 break;
798 }
799 } else {
800 if (memcmp((void *)&temp_cck,
801 (void *)
802 &cckswing_table_ch1ch13[i][2],
803 4) == 0) {
804 cck_index_old = (u8) i;
805
806 RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
807 DBG_LOUD,
808 ("Initial reg0x%x = 0x%lx, "
809 "cck_index=0x%x, ch14 %d\n",
810 RCCK0_TXFILTER2, temp_cck,
811 cck_index_old,
812 rtlpriv->dm.b_cck_inch14));
813 break;
814 }
815 }
816 }
817
818 if (!rtlpriv->dm.thermalvalue) {
819 rtlpriv->dm.thermalvalue =
820 rtlefuse->eeprom_thermalmeter;
821 rtlpriv->dm.thermalvalue_lck = thermalvalue;
822 rtlpriv->dm.thermalvalue_iqk = thermalvalue;
823 for (i = 0; i < rf; i++)
824 rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
825 rtlpriv->dm.cck_index = cck_index_old;
826 }
827
828 delta = (thermalvalue > rtlpriv->dm.thermalvalue) ?
829 (thermalvalue - rtlpriv->dm.thermalvalue) :
830 (rtlpriv->dm.thermalvalue - thermalvalue);
831
832 delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ?
833 (thermalvalue - rtlpriv->dm.thermalvalue_lck) :
834 (rtlpriv->dm.thermalvalue_lck - thermalvalue);
835
836 delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ?
837 (thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
838 (rtlpriv->dm.thermalvalue_iqk - thermalvalue);
839
840 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
841 ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
842 "eeprom_thermalmeter 0x%x delta 0x%x "
843 "delta_lck 0x%x delta_iqk 0x%x\n",
844 thermalvalue, rtlpriv->dm.thermalvalue,
845 rtlefuse->eeprom_thermalmeter, delta, delta_lck,
846 delta_iqk));
847
848 if (delta_lck > 1) {
849 rtlpriv->dm.thermalvalue_lck = thermalvalue;
850 rtl92c_phy_lc_calibrate(hw);
851 }
852
853 if (delta > 0 && rtlpriv->dm.txpower_track_control) {
854 if (thermalvalue > rtlpriv->dm.thermalvalue) {
855 for (i = 0; i < rf; i++)
856 rtlpriv->dm.ofdm_index[i] -= delta;
857 rtlpriv->dm.cck_index -= delta;
858 } else {
859 for (i = 0; i < rf; i++)
860 rtlpriv->dm.ofdm_index[i] += delta;
861 rtlpriv->dm.cck_index += delta;
862 }
863
864 if (is2t) {
865 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
866 ("temp OFDM_A_index=0x%x, "
867 "OFDM_B_index=0x%x,"
868 "cck_index=0x%x\n",
869 rtlpriv->dm.ofdm_index[0],
870 rtlpriv->dm.ofdm_index[1],
871 rtlpriv->dm.cck_index));
872 } else {
873 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
874 ("temp OFDM_A_index=0x%x,"
875 "cck_index=0x%x\n",
876 rtlpriv->dm.ofdm_index[0],
877 rtlpriv->dm.cck_index));
878 }
879
880 if (thermalvalue > rtlefuse->eeprom_thermalmeter) {
881 for (i = 0; i < rf; i++)
882 ofdm_index[i] =
883 rtlpriv->dm.ofdm_index[i]
884 + 1;
885 cck_index = rtlpriv->dm.cck_index + 1;
886 } else {
887 for (i = 0; i < rf; i++)
888 ofdm_index[i] =
889 rtlpriv->dm.ofdm_index[i];
890 cck_index = rtlpriv->dm.cck_index;
891 }
892
893 for (i = 0; i < rf; i++) {
894 if (txpwr_level[i] >= 0 &&
895 txpwr_level[i] <= 26) {
896 if (thermalvalue >
897 rtlefuse->eeprom_thermalmeter) {
898 if (delta < 5)
899 ofdm_index[i] -= 1;
900
901 else
902 ofdm_index[i] -= 2;
903 } else if (delta > 5 && thermalvalue <
904 rtlefuse->
905 eeprom_thermalmeter) {
906 ofdm_index[i] += 1;
907 }
908 } else if (txpwr_level[i] >= 27 &&
909 txpwr_level[i] <= 32
910 && thermalvalue >
911 rtlefuse->eeprom_thermalmeter) {
912 if (delta < 5)
913 ofdm_index[i] -= 1;
914
915 else
916 ofdm_index[i] -= 2;
917 } else if (txpwr_level[i] >= 32 &&
918 txpwr_level[i] <= 38 &&
919 thermalvalue >
920 rtlefuse->eeprom_thermalmeter
921 && delta > 5) {
922 ofdm_index[i] -= 1;
923 }
924 }
925
926 if (txpwr_level[i] >= 0 && txpwr_level[i] <= 26) {
927 if (thermalvalue >
928 rtlefuse->eeprom_thermalmeter) {
929 if (delta < 5)
930 cck_index -= 1;
931
932 else
933 cck_index -= 2;
934 } else if (delta > 5 && thermalvalue <
935 rtlefuse->eeprom_thermalmeter) {
936 cck_index += 1;
937 }
938 } else if (txpwr_level[i] >= 27 &&
939 txpwr_level[i] <= 32 &&
940 thermalvalue >
941 rtlefuse->eeprom_thermalmeter) {
942 if (delta < 5)
943 cck_index -= 1;
944
945 else
946 cck_index -= 2;
947 } else if (txpwr_level[i] >= 32 &&
948 txpwr_level[i] <= 38 &&
949 thermalvalue > rtlefuse->eeprom_thermalmeter
950 && delta > 5) {
951 cck_index -= 1;
952 }
953
954 for (i = 0; i < rf; i++) {
955 if (ofdm_index[i] > OFDM_TABLE_SIZE - 1)
956 ofdm_index[i] = OFDM_TABLE_SIZE - 1;
957
958 else if (ofdm_index[i] < ofdm_min_index)
959 ofdm_index[i] = ofdm_min_index;
960 }
961
962 if (cck_index > CCK_TABLE_SIZE - 1)
963 cck_index = CCK_TABLE_SIZE - 1;
964 else if (cck_index < 0)
965 cck_index = 0;
966
967 if (is2t) {
968 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
969 ("new OFDM_A_index=0x%x, "
970 "OFDM_B_index=0x%x,"
971 "cck_index=0x%x\n",
972 ofdm_index[0], ofdm_index[1],
973 cck_index));
974 } else {
975 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
976 ("new OFDM_A_index=0x%x,"
977 "cck_index=0x%x\n",
978 ofdm_index[0], cck_index));
979 }
980 }
981
982 if (rtlpriv->dm.txpower_track_control && delta != 0) {
983 ele_d =
984 (ofdmswing_table[ofdm_index[0]] & 0xFFC00000) >> 22;
985 val_x = rtlphy->reg_e94;
986 val_y = rtlphy->reg_e9c;
987
988 if (val_x != 0) {
989 if ((val_x & 0x00000200) != 0)
990 val_x = val_x | 0xFFFFFC00;
991 ele_a = ((val_x * ele_d) >> 8) & 0x000003FF;
992
993 if ((val_y & 0x00000200) != 0)
994 val_y = val_y | 0xFFFFFC00;
995 ele_c = ((val_y * ele_d) >> 8) & 0x000003FF;
996
997 value32 = (ele_d << 22) |
998 ((ele_c & 0x3F) << 16) | ele_a;
999
1000 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
1001 MASKDWORD, value32);
1002
1003 value32 = (ele_c & 0x000003C0) >> 6;
1004 rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
1005 value32);
1006
1007 value32 = ((val_x * ele_d) >> 7) & 0x01;
1008 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
1009 BIT(31), value32);
1010
1011 value32 = ((val_y * ele_d) >> 7) & 0x01;
1012 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
1013 BIT(29), value32);
1014 } else {
1015 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
1016 MASKDWORD,
1017 ofdmswing_table[ofdm_index[0]]);
1018
1019 rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
1020 0x00);
1021 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
1022 BIT(31) | BIT(29), 0x00);
1023 }
1024
1025 if (!rtlpriv->dm.b_cck_inch14) {
1026 rtl_write_byte(rtlpriv, 0xa22,
1027 cckswing_table_ch1ch13[cck_index]
1028 [0]);
1029 rtl_write_byte(rtlpriv, 0xa23,
1030 cckswing_table_ch1ch13[cck_index]
1031 [1]);
1032 rtl_write_byte(rtlpriv, 0xa24,
1033 cckswing_table_ch1ch13[cck_index]
1034 [2]);
1035 rtl_write_byte(rtlpriv, 0xa25,
1036 cckswing_table_ch1ch13[cck_index]
1037 [3]);
1038 rtl_write_byte(rtlpriv, 0xa26,
1039 cckswing_table_ch1ch13[cck_index]
1040 [4]);
1041 rtl_write_byte(rtlpriv, 0xa27,
1042 cckswing_table_ch1ch13[cck_index]
1043 [5]);
1044 rtl_write_byte(rtlpriv, 0xa28,
1045 cckswing_table_ch1ch13[cck_index]
1046 [6]);
1047 rtl_write_byte(rtlpriv, 0xa29,
1048 cckswing_table_ch1ch13[cck_index]
1049 [7]);
1050 } else {
1051 rtl_write_byte(rtlpriv, 0xa22,
1052 cckswing_table_ch14[cck_index]
1053 [0]);
1054 rtl_write_byte(rtlpriv, 0xa23,
1055 cckswing_table_ch14[cck_index]
1056 [1]);
1057 rtl_write_byte(rtlpriv, 0xa24,
1058 cckswing_table_ch14[cck_index]
1059 [2]);
1060 rtl_write_byte(rtlpriv, 0xa25,
1061 cckswing_table_ch14[cck_index]
1062 [3]);
1063 rtl_write_byte(rtlpriv, 0xa26,
1064 cckswing_table_ch14[cck_index]
1065 [4]);
1066 rtl_write_byte(rtlpriv, 0xa27,
1067 cckswing_table_ch14[cck_index]
1068 [5]);
1069 rtl_write_byte(rtlpriv, 0xa28,
1070 cckswing_table_ch14[cck_index]
1071 [6]);
1072 rtl_write_byte(rtlpriv, 0xa29,
1073 cckswing_table_ch14[cck_index]
1074 [7]);
1075 }
1076
1077 if (is2t) {
1078 ele_d = (ofdmswing_table[ofdm_index[1]] &
1079 0xFFC00000) >> 22;
1080
1081 val_x = rtlphy->reg_eb4;
1082 val_y = rtlphy->reg_ebc;
1083
1084 if (val_x != 0) {
1085 if ((val_x & 0x00000200) != 0)
1086 val_x = val_x | 0xFFFFFC00;
1087 ele_a = ((val_x * ele_d) >> 8) &
1088 0x000003FF;
1089
1090 if ((val_y & 0x00000200) != 0)
1091 val_y = val_y | 0xFFFFFC00;
1092 ele_c = ((val_y * ele_d) >> 8) &
1093 0x00003FF;
1094
1095 value32 = (ele_d << 22) |
1096 ((ele_c & 0x3F) << 16) | ele_a;
1097 rtl_set_bbreg(hw,
1098 ROFDM0_XBTXIQIMBALANCE,
1099 MASKDWORD, value32);
1100
1101 value32 = (ele_c & 0x000003C0) >> 6;
1102 rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
1103 MASKH4BITS, value32);
1104
1105 value32 = ((val_x * ele_d) >> 7) & 0x01;
1106 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
1107 BIT(27), value32);
1108
1109 value32 = ((val_y * ele_d) >> 7) & 0x01;
1110 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
1111 BIT(25), value32);
1112 } else {
1113 rtl_set_bbreg(hw,
1114 ROFDM0_XBTXIQIMBALANCE,
1115 MASKDWORD,
1116 ofdmswing_table[ofdm_index
1117 [1]]);
1118 rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
1119 MASKH4BITS, 0x00);
1120 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
1121 BIT(27) | BIT(25), 0x00);
1122 }
1123
1124 }
1125 }
1126
1127 if (delta_iqk > 3) {
1128 rtlpriv->dm.thermalvalue_iqk = thermalvalue;
1129 rtl92c_phy_iq_calibrate(hw, false);
1130 }
1131
1132 if (rtlpriv->dm.txpower_track_control)
1133 rtlpriv->dm.thermalvalue = thermalvalue;
1134 }
1135
1136 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, ("<===\n"));
1137
1138}
1139
1140static void rtl92c_dm_initialize_txpower_tracking_thermalmeter(
1141 struct ieee80211_hw *hw)
1142{
1143 struct rtl_priv *rtlpriv = rtl_priv(hw);
1144
1145 rtlpriv->dm.btxpower_tracking = true;
1146 rtlpriv->dm.btxpower_trackingInit = false;
1147
1148 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
1149 ("pMgntInfo->btxpower_tracking = %d\n",
1150 rtlpriv->dm.btxpower_tracking));
1151}
1152
1153static void rtl92c_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
1154{
1155 rtl92c_dm_initialize_txpower_tracking_thermalmeter(hw);
1156}
1157
1158static void rtl92c_dm_txpower_tracking_directcall(struct ieee80211_hw *hw)
1159{
1160 rtl92c_dm_txpower_tracking_callback_thermalmeter(hw);
1161}
1162
1163static void rtl92c_dm_check_txpower_tracking_thermal_meter(
1164 struct ieee80211_hw *hw)
1165{
1166 struct rtl_priv *rtlpriv = rtl_priv(hw);
1167 static u8 tm_trigger;
1168
1169 if (!rtlpriv->dm.btxpower_tracking)
1170 return;
1171
1172 if (!tm_trigger) {
1173 rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, RFREG_OFFSET_MASK,
1174 0x60);
1175 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
1176 ("Trigger 92S Thermal Meter!!\n"));
1177 tm_trigger = 1;
1178 return;
1179 } else {
1180 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
1181 ("Schedule TxPowerTracking direct call!!\n"));
1182 rtl92c_dm_txpower_tracking_directcall(hw);
1183 tm_trigger = 0;
1184 }
1185}
1186
1187void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw)
1188{
1189 rtl92c_dm_check_txpower_tracking_thermal_meter(hw);
1190}
1191
1192void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
1193{
1194 struct rtl_priv *rtlpriv = rtl_priv(hw);
1195 struct rate_adaptive *p_ra = &(rtlpriv->ra);
1196
1197 p_ra->ratr_state = DM_RATR_STA_INIT;
1198 p_ra->pre_ratr_state = DM_RATR_STA_INIT;
1199
1200 if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
1201 rtlpriv->dm.b_useramask = true;
1202 else
1203 rtlpriv->dm.b_useramask = false;
1204
1205}
1206
1207static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
1208{
1209 struct rtl_priv *rtlpriv = rtl_priv(hw);
1210 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1211 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1212 struct rate_adaptive *p_ra = &(rtlpriv->ra);
1213 u32 low_rssithresh_for_ra, high_rssithresh_for_ra;
1214
1215 if (is_hal_stop(rtlhal)) {
1216 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1217 ("<---- driver is going to unload\n"));
1218 return;
1219 }
1220
1221 if (!rtlpriv->dm.b_useramask) {
1222 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1223 ("<---- driver does not control rate adaptive mask\n"));
1224 return;
1225 }
1226
1227 if (mac->link_state == MAC80211_LINKED) {
1228
1229 switch (p_ra->pre_ratr_state) {
1230 case DM_RATR_STA_HIGH:
1231 high_rssithresh_for_ra = 50;
1232 low_rssithresh_for_ra = 20;
1233 break;
1234 case DM_RATR_STA_MIDDLE:
1235 high_rssithresh_for_ra = 55;
1236 low_rssithresh_for_ra = 20;
1237 break;
1238 case DM_RATR_STA_LOW:
1239 high_rssithresh_for_ra = 50;
1240 low_rssithresh_for_ra = 25;
1241 break;
1242 default:
1243 high_rssithresh_for_ra = 50;
1244 low_rssithresh_for_ra = 20;
1245 break;
1246 }
1247
1248 if (rtlpriv->dm.undecorated_smoothed_pwdb >
1249 (long)high_rssithresh_for_ra)
1250 p_ra->ratr_state = DM_RATR_STA_HIGH;
1251 else if (rtlpriv->dm.undecorated_smoothed_pwdb >
1252 (long)low_rssithresh_for_ra)
1253 p_ra->ratr_state = DM_RATR_STA_MIDDLE;
1254 else
1255 p_ra->ratr_state = DM_RATR_STA_LOW;
1256
1257 if (p_ra->pre_ratr_state != p_ra->ratr_state) {
1258 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1259 ("RSSI = %ld\n",
1260 rtlpriv->dm.undecorated_smoothed_pwdb));
1261 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1262 ("RSSI_LEVEL = %d\n", p_ra->ratr_state));
1263 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1264 ("PreState = %d, CurState = %d\n",
1265 p_ra->pre_ratr_state, p_ra->ratr_state));
1266
1267 rtlpriv->cfg->ops->update_rate_mask(hw,
1268 p_ra->ratr_state);
1269
1270 p_ra->pre_ratr_state = p_ra->ratr_state;
1271 }
1272 }
1273}
1274
1275static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw)
1276{
1277 dm_pstable.pre_ccastate = CCA_MAX;
1278 dm_pstable.cur_ccasate = CCA_MAX;
1279 dm_pstable.pre_rfstate = RF_MAX;
1280 dm_pstable.cur_rfstate = RF_MAX;
1281 dm_pstable.rssi_val_min = 0;
1282}
1283
1284static void rtl92c_dm_1r_cca(struct ieee80211_hw *hw)
1285{
1286 struct rtl_priv *rtlpriv = rtl_priv(hw);
1287 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1288
1289 if (dm_pstable.rssi_val_min != 0) {
1290 if (dm_pstable.pre_ccastate == CCA_2R) {
1291 if (dm_pstable.rssi_val_min >= 35)
1292 dm_pstable.cur_ccasate = CCA_1R;
1293 else
1294 dm_pstable.cur_ccasate = CCA_2R;
1295 } else {
1296 if (dm_pstable.rssi_val_min <= 30)
1297 dm_pstable.cur_ccasate = CCA_2R;
1298 else
1299 dm_pstable.cur_ccasate = CCA_1R;
1300 }
1301 } else {
1302 dm_pstable.cur_ccasate = CCA_MAX;
1303 }
1304
1305 if (dm_pstable.pre_ccastate != dm_pstable.cur_ccasate) {
1306 if (dm_pstable.cur_ccasate == CCA_1R) {
1307 if (get_rf_type(rtlphy) == RF_2T2R) {
1308 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE,
1309 MASKBYTE0, 0x13);
1310 rtl_set_bbreg(hw, 0xe70, MASKBYTE3, 0x20);
1311 } else {
1312 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE,
1313 MASKBYTE0, 0x23);
1314 rtl_set_bbreg(hw, 0xe70, 0x7fc00000, 0x10c);
1315 }
1316 } else {
1317 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0,
1318 0x33);
1319 rtl_set_bbreg(hw, 0xe70, MASKBYTE3, 0x63);
1320 }
1321 dm_pstable.pre_ccastate = dm_pstable.cur_ccasate;
1322 }
1323
1324 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, ("CCAStage = %s\n",
1325 (dm_pstable.cur_ccasate ==
1326 0) ? "1RCCA" : "2RCCA"));
1327}
1328
1329void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
1330{
1331 static u8 initialize;
1332 static u32 reg_874, reg_c70, reg_85c, reg_a74;
1333
1334 if (initialize == 0) {
1335 reg_874 = (rtl_get_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
1336 MASKDWORD) & 0x1CC000) >> 14;
1337
1338 reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1,
1339 MASKDWORD) & BIT(3)) >> 3;
1340
1341 reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
1342 MASKDWORD) & 0xFF000000) >> 24;
1343
1344 reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) & 0xF000) >> 12;
1345
1346 initialize = 1;
1347 }
1348
1349 if (!bforce_in_normal) {
1350 if (dm_pstable.rssi_val_min != 0) {
1351 if (dm_pstable.pre_rfstate == RF_NORMAL) {
1352 if (dm_pstable.rssi_val_min >= 30)
1353 dm_pstable.cur_rfstate = RF_SAVE;
1354 else
1355 dm_pstable.cur_rfstate = RF_NORMAL;
1356 } else {
1357 if (dm_pstable.rssi_val_min <= 25)
1358 dm_pstable.cur_rfstate = RF_NORMAL;
1359 else
1360 dm_pstable.cur_rfstate = RF_SAVE;
1361 }
1362 } else {
1363 dm_pstable.cur_rfstate = RF_MAX;
1364 }
1365 } else {
1366 dm_pstable.cur_rfstate = RF_NORMAL;
1367 }
1368
1369 if (dm_pstable.pre_rfstate != dm_pstable.cur_rfstate) {
1370 if (dm_pstable.cur_rfstate == RF_SAVE) {
1371 rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
1372 0x1C0000, 0x2);
1373 rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), 0);
1374 rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
1375 0xFF000000, 0x63);
1376 rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
1377 0xC000, 0x2);
1378 rtl_set_bbreg(hw, 0xa74, 0xF000, 0x3);
1379 rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
1380 rtl_set_bbreg(hw, 0x818, BIT(28), 0x1);
1381 } else {
1382 rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
1383 0x1CC000, reg_874);
1384 rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3),
1385 reg_c70);
1386 rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, 0xFF000000,
1387 reg_85c);
1388 rtl_set_bbreg(hw, 0xa74, 0xF000, reg_a74);
1389 rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
1390 }
1391
1392 dm_pstable.pre_rfstate = dm_pstable.cur_rfstate;
1393 }
1394}
1395
1396static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
1397{
1398 struct rtl_priv *rtlpriv = rtl_priv(hw);
1399 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1400 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1401
1402 if (((mac->link_state == MAC80211_NOLINK)) &&
1403 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
1404 dm_pstable.rssi_val_min = 0;
1405 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
1406 ("Not connected to any\n"));
1407 }
1408
1409 if (mac->link_state == MAC80211_LINKED) {
1410 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
1411 dm_pstable.rssi_val_min =
1412 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
1413 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
1414 ("AP Client PWDB = 0x%lx\n",
1415 dm_pstable.rssi_val_min));
1416 } else {
1417 dm_pstable.rssi_val_min =
1418 rtlpriv->dm.undecorated_smoothed_pwdb;
1419 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
1420 ("STA Default Port PWDB = 0x%lx\n",
1421 dm_pstable.rssi_val_min));
1422 }
1423 } else {
1424 dm_pstable.rssi_val_min =
1425 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
1426
1427 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
1428 ("AP Ext Port PWDB = 0x%lx\n",
1429 dm_pstable.rssi_val_min));
1430 }
1431
1432 if (IS_92C_SERIAL(rtlhal->version))
1433 rtl92c_dm_1r_cca(hw);
1434}
1435
1436void rtl92c_dm_init(struct ieee80211_hw *hw)
1437{
1438 struct rtl_priv *rtlpriv = rtl_priv(hw);
1439
1440 rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
1441 rtl92c_dm_diginit(hw);
1442 rtl92c_dm_init_dynamic_txpower(hw);
1443 rtl92c_dm_init_edca_turbo(hw);
1444 rtl92c_dm_init_rate_adaptive_mask(hw);
1445 rtl92c_dm_initialize_txpower_tracking(hw);
1446 rtl92c_dm_init_dynamic_bb_powersaving(hw);
1447}
1448
1449void rtl92c_dm_watchdog(struct ieee80211_hw *hw)
1450{
1451 struct rtl_priv *rtlpriv = rtl_priv(hw);
1452 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1453 bool b_fw_current_inpsmode = false;
1454 bool b_fw_ps_awake = true;
1455
1456 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
1457 (u8 *) (&b_fw_current_inpsmode));
1458 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
1459 (u8 *) (&b_fw_ps_awake));
1460
1461 if ((ppsc->rfpwr_state == ERFON) && ((!b_fw_current_inpsmode) &&
1462 b_fw_ps_awake)
1463 && (!ppsc->rfchange_inprogress)) {
1464 rtl92c_dm_pwdb_monitor(hw);
1465 rtl92c_dm_dig(hw);
1466 rtl92c_dm_false_alarm_counter_statistics(hw);
1467 rtl92c_dm_dynamic_bb_powersaving(hw);
1468 rtl92c_dm_dynamic_txpower(hw);
1469 rtl92c_dm_check_txpower_tracking(hw);
1470 rtl92c_dm_refresh_rate_adaptive_mask(hw);
1471 rtl92c_dm_check_edca_turbo(hw);
1472 }
1473}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
index 463439e4074c..36302ebae4a3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
@@ -192,5 +192,6 @@ void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw);
192void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw); 192void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw);
193void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw); 193void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
194void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal); 194void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal);
195void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw);
195 196
196#endif 197#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index 1c41a0c93506..05477f465a75 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -37,7 +37,6 @@
37#include "def.h" 37#include "def.h"
38#include "phy.h" 38#include "phy.h"
39#include "dm.h" 39#include "dm.h"
40#include "fw.h"
41#include "led.h" 40#include "led.h"
42#include "hw.h" 41#include "hw.h"
43 42
@@ -124,7 +123,7 @@ void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
124 break; 123 break;
125 } 124 }
126 case HW_VAR_FW_PSMODE_STATUS: 125 case HW_VAR_FW_PSMODE_STATUS:
127 *((bool *) (val)) = ppsc->b_fw_current_inpsmode; 126 *((bool *) (val)) = ppsc->fw_current_inpsmode;
128 break; 127 break;
129 case HW_VAR_CORRECT_TSF:{ 128 case HW_VAR_CORRECT_TSF:{
130 u64 tsf; 129 u64 tsf;
@@ -173,15 +172,15 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
173 break; 172 break;
174 } 173 }
175 case HW_VAR_BASIC_RATE:{ 174 case HW_VAR_BASIC_RATE:{
176 u16 b_rate_cfg = ((u16 *) val)[0]; 175 u16 rate_cfg = ((u16 *) val)[0];
177 u8 rate_index = 0; 176 u8 rate_index = 0;
178 b_rate_cfg = b_rate_cfg & 0x15f; 177 rate_cfg &= 0x15f;
179 b_rate_cfg |= 0x01; 178 rate_cfg |= 0x01;
180 rtl_write_byte(rtlpriv, REG_RRSR, b_rate_cfg & 0xff); 179 rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff);
181 rtl_write_byte(rtlpriv, REG_RRSR + 1, 180 rtl_write_byte(rtlpriv, REG_RRSR + 1,
182 (b_rate_cfg >> 8)&0xff); 181 (rate_cfg >> 8)&0xff);
183 while (b_rate_cfg > 0x1) { 182 while (rate_cfg > 0x1) {
184 b_rate_cfg = (b_rate_cfg >> 1); 183 rate_cfg = (rate_cfg >> 1);
185 rate_index++; 184 rate_index++;
186 } 185 }
187 rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, 186 rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL,
@@ -318,15 +317,17 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
318 } 317 }
319 case HW_VAR_AC_PARAM:{ 318 case HW_VAR_AC_PARAM:{
320 u8 e_aci = *((u8 *) val); 319 u8 e_aci = *((u8 *) val);
321 u32 u4b_ac_param = 0; 320 u32 u4b_ac_param;
321 u16 cw_min = le16_to_cpu(mac->ac[e_aci].cw_min);
322 u16 cw_max = le16_to_cpu(mac->ac[e_aci].cw_max);
323 u16 tx_op = le16_to_cpu(mac->ac[e_aci].tx_op);
322 324
323 u4b_ac_param |= (u32) mac->ac[e_aci].aifs; 325 u4b_ac_param = (u32) mac->ac[e_aci].aifs;
324 u4b_ac_param |= ((u32) mac->ac[e_aci].cw_min 326 u4b_ac_param |= ((u32)cw_min
325 & 0xF) << AC_PARAM_ECW_MIN_OFFSET; 327 & 0xF) << AC_PARAM_ECW_MIN_OFFSET;
326 u4b_ac_param |= ((u32) mac->ac[e_aci].cw_max & 328 u4b_ac_param |= ((u32)cw_max &
327 0xF) << AC_PARAM_ECW_MAX_OFFSET; 329 0xF) << AC_PARAM_ECW_MAX_OFFSET;
328 u4b_ac_param |= (u32) mac->ac[e_aci].tx_op 330 u4b_ac_param |= (u32)tx_op << AC_PARAM_TXOP_OFFSET;
329 << AC_PARAM_TXOP_LIMIT_OFFSET;
330 331
331 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, 332 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
332 ("queue:%x, ac_param:%x\n", e_aci, 333 ("queue:%x, ac_param:%x\n", e_aci,
@@ -469,12 +470,12 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
469 break; 470 break;
470 } 471 }
471 case HW_VAR_FW_PSMODE_STATUS: 472 case HW_VAR_FW_PSMODE_STATUS:
472 ppsc->b_fw_current_inpsmode = *((bool *) val); 473 ppsc->fw_current_inpsmode = *((bool *) val);
473 break; 474 break;
474 case HW_VAR_H2C_FW_JOINBSSRPT:{ 475 case HW_VAR_H2C_FW_JOINBSSRPT:{
475 u8 mstatus = (*(u8 *) val); 476 u8 mstatus = (*(u8 *) val);
476 u8 tmp_regcr, tmp_reg422; 477 u8 tmp_regcr, tmp_reg422;
477 bool b_recover = false; 478 bool recover = false;
478 479
479 if (mstatus == RT_MEDIA_CONNECT) { 480 if (mstatus == RT_MEDIA_CONNECT) {
480 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID, 481 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID,
@@ -491,7 +492,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
491 rtl_read_byte(rtlpriv, 492 rtl_read_byte(rtlpriv,
492 REG_FWHW_TXQ_CTRL + 2); 493 REG_FWHW_TXQ_CTRL + 2);
493 if (tmp_reg422 & BIT(6)) 494 if (tmp_reg422 & BIT(6))
494 b_recover = true; 495 recover = true;
495 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, 496 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
496 tmp_reg422 & (~BIT(6))); 497 tmp_reg422 & (~BIT(6)));
497 498
@@ -500,7 +501,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
500 _rtl92ce_set_bcn_ctrl_reg(hw, BIT(3), 0); 501 _rtl92ce_set_bcn_ctrl_reg(hw, BIT(3), 0);
501 _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(4)); 502 _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(4));
502 503
503 if (b_recover) { 504 if (recover) {
504 rtl_write_byte(rtlpriv, 505 rtl_write_byte(rtlpriv,
505 REG_FWHW_TXQ_CTRL + 2, 506 REG_FWHW_TXQ_CTRL + 2,
506 tmp_reg422); 507 tmp_reg422);
@@ -868,7 +869,7 @@ static void _rtl92ce_enable_aspm_back_door(struct ieee80211_hw *hw)
868 rtl_write_word(rtlpriv, 0x350, 0x870c); 869 rtl_write_word(rtlpriv, 0x350, 0x870c);
869 rtl_write_byte(rtlpriv, 0x352, 0x1); 870 rtl_write_byte(rtlpriv, 0x352, 0x1);
870 871
871 if (ppsc->b_support_backdoor) 872 if (ppsc->support_backdoor)
872 rtl_write_byte(rtlpriv, 0x349, 0x1b); 873 rtl_write_byte(rtlpriv, 0x349, 0x1b);
873 else 874 else
874 rtl_write_byte(rtlpriv, 0x349, 0x03); 875 rtl_write_byte(rtlpriv, 0x349, 0x03);
@@ -940,15 +941,15 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
940 ("Failed to download FW. Init HW " 941 ("Failed to download FW. Init HW "
941 "without FW now..\n")); 942 "without FW now..\n"));
942 err = 1; 943 err = 1;
943 rtlhal->bfw_ready = false; 944 rtlhal->fw_ready = false;
944 return err; 945 return err;
945 } else { 946 } else {
946 rtlhal->bfw_ready = true; 947 rtlhal->fw_ready = true;
947 } 948 }
948 949
949 rtlhal->last_hmeboxnum = 0; 950 rtlhal->last_hmeboxnum = 0;
950 rtl92c_phy_mac_config(hw); 951 rtl92ce_phy_mac_config(hw);
951 rtl92c_phy_bb_config(hw); 952 rtl92ce_phy_bb_config(hw);
952 rtlphy->rf_mode = RF_OP_BY_SW_3WIRE; 953 rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
953 rtl92c_phy_rf_config(hw); 954 rtl92c_phy_rf_config(hw);
954 rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0, 955 rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0,
@@ -1170,21 +1171,20 @@ void rtl92ce_set_qos(struct ieee80211_hw *hw, int aci)
1170{ 1171{
1171 struct rtl_priv *rtlpriv = rtl_priv(hw); 1172 struct rtl_priv *rtlpriv = rtl_priv(hw);
1172 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 1173 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1173
1174 u32 u4b_ac_param; 1174 u32 u4b_ac_param;
1175 u16 cw_min = le16_to_cpu(mac->ac[aci].cw_min);
1176 u16 cw_max = le16_to_cpu(mac->ac[aci].cw_max);
1177 u16 tx_op = le16_to_cpu(mac->ac[aci].tx_op);
1175 1178
1176 rtl92c_dm_init_edca_turbo(hw); 1179 rtl92c_dm_init_edca_turbo(hw);
1177
1178 u4b_ac_param = (u32) mac->ac[aci].aifs; 1180 u4b_ac_param = (u32) mac->ac[aci].aifs;
1179 u4b_ac_param |= 1181 u4b_ac_param |= (u32) ((cw_min & 0xF) << AC_PARAM_ECW_MIN_OFFSET);
1180 ((u32) mac->ac[aci].cw_min & 0xF) << AC_PARAM_ECW_MIN_OFFSET; 1182 u4b_ac_param |= (u32) ((cw_max & 0xF) << AC_PARAM_ECW_MAX_OFFSET);
1181 u4b_ac_param |= 1183 u4b_ac_param |= (u32) (tx_op << AC_PARAM_TXOP_OFFSET);
1182 ((u32) mac->ac[aci].cw_max & 0xF) << AC_PARAM_ECW_MAX_OFFSET;
1183 u4b_ac_param |= (u32) mac->ac[aci].tx_op << AC_PARAM_TXOP_LIMIT_OFFSET;
1184 RT_TRACE(rtlpriv, COMP_QOS, DBG_DMESG, 1184 RT_TRACE(rtlpriv, COMP_QOS, DBG_DMESG,
1185 ("queue:%x, ac_param:%x aifs:%x cwmin:%x cwmax:%x txop:%x\n", 1185 ("queue:%x, ac_param:%x aifs:%x cwmin:%x cwmax:%x txop:%x\n",
1186 aci, u4b_ac_param, mac->ac[aci].aifs, mac->ac[aci].cw_min, 1186 aci, u4b_ac_param, mac->ac[aci].aifs, cw_min,
1187 mac->ac[aci].cw_max, mac->ac[aci].tx_op)); 1187 cw_max, tx_op));
1188 switch (aci) { 1188 switch (aci) {
1189 case AC1_BK: 1189 case AC1_BK:
1190 rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, u4b_ac_param); 1190 rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, u4b_ac_param);
@@ -1237,7 +1237,7 @@ static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw)
1237 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40); 1237 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
1238 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2); 1238 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
1239 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE0); 1239 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE0);
1240 if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) && rtlhal->bfw_ready) 1240 if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) && rtlhal->fw_ready)
1241 rtl92c_firmware_selfreset(hw); 1241 rtl92c_firmware_selfreset(hw);
1242 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, 0x51); 1242 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, 0x51);
1243 rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00); 1243 rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
@@ -1335,19 +1335,6 @@ void rtl92ce_update_interrupt_mask(struct ieee80211_hw *hw,
1335 rtl92ce_enable_interrupt(hw); 1335 rtl92ce_enable_interrupt(hw);
1336} 1336}
1337 1337
1338static u8 _rtl92c_get_chnl_group(u8 chnl)
1339{
1340 u8 group;
1341
1342 if (chnl < 3)
1343 group = 0;
1344 else if (chnl < 9)
1345 group = 1;
1346 else
1347 group = 2;
1348 return group;
1349}
1350
1351static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, 1338static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
1352 bool autoload_fail, 1339 bool autoload_fail,
1353 u8 *hwinfo) 1340 u8 *hwinfo)
@@ -1568,7 +1555,7 @@ static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
1568 rtlefuse->eeprom_thermalmeter = (tempval & 0x1f); 1555 rtlefuse->eeprom_thermalmeter = (tempval & 0x1f);
1569 1556
1570 if (rtlefuse->eeprom_thermalmeter == 0x1f || autoload_fail) 1557 if (rtlefuse->eeprom_thermalmeter == 0x1f || autoload_fail)
1571 rtlefuse->b_apk_thermalmeterignore = true; 1558 rtlefuse->apk_thermalmeterignore = true;
1572 1559
1573 rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter; 1560 rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
1574 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1561 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
@@ -1625,7 +1612,7 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
1625 1612
1626 rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN]; 1613 rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
1627 rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION]; 1614 rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
1628 rtlefuse->b_txpwr_fromeprom = true; 1615 rtlefuse->txpwr_fromeprom = true;
1629 rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID]; 1616 rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
1630 1617
1631 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, 1618 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
@@ -1668,7 +1655,7 @@ static void _rtl92ce_hal_customized_behavior(struct ieee80211_hw *hw)
1668 1655
1669 switch (rtlhal->oem_id) { 1656 switch (rtlhal->oem_id) {
1670 case RT_CID_819x_HP: 1657 case RT_CID_819x_HP:
1671 pcipriv->ledctl.bled_opendrain = true; 1658 pcipriv->ledctl.led_opendrain = true;
1672 break; 1659 break;
1673 case RT_CID_819x_Lenovo: 1660 case RT_CID_819x_Lenovo:
1674 case RT_CID_DEFAULT: 1661 case RT_CID_DEFAULT:
@@ -1693,10 +1680,10 @@ void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw)
1693 1680
1694 rtlhal->version = _rtl92ce_read_chip_version(hw); 1681 rtlhal->version = _rtl92ce_read_chip_version(hw);
1695 if (get_rf_type(rtlphy) == RF_1T1R) 1682 if (get_rf_type(rtlphy) == RF_1T1R)
1696 rtlpriv->dm.brfpath_rxenable[0] = true; 1683 rtlpriv->dm.rfpath_rxenable[0] = true;
1697 else 1684 else
1698 rtlpriv->dm.brfpath_rxenable[0] = 1685 rtlpriv->dm.rfpath_rxenable[0] =
1699 rtlpriv->dm.brfpath_rxenable[1] = true; 1686 rtlpriv->dm.rfpath_rxenable[1] = true;
1700 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("VersionID = 0x%4x\n", 1687 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("VersionID = 0x%4x\n",
1701 rtlhal->version)); 1688 rtlhal->version));
1702 tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR); 1689 tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
@@ -1725,18 +1712,18 @@ void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw)
1725 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 1712 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1726 1713
1727 u32 ratr_value = (u32) mac->basic_rates; 1714 u32 ratr_value = (u32) mac->basic_rates;
1728 u8 *p_mcsrate = mac->mcs; 1715 u8 *mcsrate = mac->mcs;
1729 u8 ratr_index = 0; 1716 u8 ratr_index = 0;
1730 u8 b_nmode = mac->ht_enable; 1717 u8 nmode = mac->ht_enable;
1731 u8 mimo_ps = 1; 1718 u8 mimo_ps = 1;
1732 u16 shortgi_rate; 1719 u16 shortgi_rate;
1733 u32 tmp_ratr_value; 1720 u32 tmp_ratr_value;
1734 u8 b_curtxbw_40mhz = mac->bw_40; 1721 u8 curtxbw_40mhz = mac->bw_40;
1735 u8 b_curshortgi_40mhz = mac->sgi_40; 1722 u8 curshortgi_40mhz = mac->sgi_40;
1736 u8 b_curshortgi_20mhz = mac->sgi_20; 1723 u8 curshortgi_20mhz = mac->sgi_20;
1737 enum wireless_mode wirelessmode = mac->mode; 1724 enum wireless_mode wirelessmode = mac->mode;
1738 1725
1739 ratr_value |= EF2BYTE((*(u16 *) (p_mcsrate))) << 12; 1726 ratr_value |= ((*(u16 *) (mcsrate))) << 12;
1740 1727
1741 switch (wirelessmode) { 1728 switch (wirelessmode) {
1742 case WIRELESS_MODE_B: 1729 case WIRELESS_MODE_B:
@@ -1750,7 +1737,7 @@ void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw)
1750 break; 1737 break;
1751 case WIRELESS_MODE_N_24G: 1738 case WIRELESS_MODE_N_24G:
1752 case WIRELESS_MODE_N_5G: 1739 case WIRELESS_MODE_N_5G:
1753 b_nmode = 1; 1740 nmode = 1;
1754 if (mimo_ps == 0) { 1741 if (mimo_ps == 0) {
1755 ratr_value &= 0x0007F005; 1742 ratr_value &= 0x0007F005;
1756 } else { 1743 } else {
@@ -1776,9 +1763,8 @@ void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw)
1776 1763
1777 ratr_value &= 0x0FFFFFFF; 1764 ratr_value &= 0x0FFFFFFF;
1778 1765
1779 if (b_nmode && ((b_curtxbw_40mhz && 1766 if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) || (!curtxbw_40mhz &&
1780 b_curshortgi_40mhz) || (!b_curtxbw_40mhz && 1767 curshortgi_20mhz))) {
1781 b_curshortgi_20mhz))) {
1782 1768
1783 ratr_value |= 0x10000000; 1769 ratr_value |= 0x10000000;
1784 tmp_ratr_value = (ratr_value >> 12); 1770 tmp_ratr_value = (ratr_value >> 12);
@@ -1806,11 +1792,11 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
1806 u32 ratr_bitmap = (u32) mac->basic_rates; 1792 u32 ratr_bitmap = (u32) mac->basic_rates;
1807 u8 *p_mcsrate = mac->mcs; 1793 u8 *p_mcsrate = mac->mcs;
1808 u8 ratr_index; 1794 u8 ratr_index;
1809 u8 b_curtxbw_40mhz = mac->bw_40; 1795 u8 curtxbw_40mhz = mac->bw_40;
1810 u8 b_curshortgi_40mhz = mac->sgi_40; 1796 u8 curshortgi_40mhz = mac->sgi_40;
1811 u8 b_curshortgi_20mhz = mac->sgi_20; 1797 u8 curshortgi_20mhz = mac->sgi_20;
1812 enum wireless_mode wirelessmode = mac->mode; 1798 enum wireless_mode wirelessmode = mac->mode;
1813 bool b_shortgi = false; 1799 bool shortgi = false;
1814 u8 rate_mask[5]; 1800 u8 rate_mask[5];
1815 u8 macid = 0; 1801 u8 macid = 0;
1816 u8 mimops = 1; 1802 u8 mimops = 1;
@@ -1852,7 +1838,7 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
1852 } else { 1838 } else {
1853 if (rtlphy->rf_type == RF_1T2R || 1839 if (rtlphy->rf_type == RF_1T2R ||
1854 rtlphy->rf_type == RF_1T1R) { 1840 rtlphy->rf_type == RF_1T1R) {
1855 if (b_curtxbw_40mhz) { 1841 if (curtxbw_40mhz) {
1856 if (rssi_level == 1) 1842 if (rssi_level == 1)
1857 ratr_bitmap &= 0x000f0000; 1843 ratr_bitmap &= 0x000f0000;
1858 else if (rssi_level == 2) 1844 else if (rssi_level == 2)
@@ -1868,7 +1854,7 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
1868 ratr_bitmap &= 0x000ff005; 1854 ratr_bitmap &= 0x000ff005;
1869 } 1855 }
1870 } else { 1856 } else {
1871 if (b_curtxbw_40mhz) { 1857 if (curtxbw_40mhz) {
1872 if (rssi_level == 1) 1858 if (rssi_level == 1)
1873 ratr_bitmap &= 0x0f0f0000; 1859 ratr_bitmap &= 0x0f0f0000;
1874 else if (rssi_level == 2) 1860 else if (rssi_level == 2)
@@ -1886,13 +1872,13 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
1886 } 1872 }
1887 } 1873 }
1888 1874
1889 if ((b_curtxbw_40mhz && b_curshortgi_40mhz) || 1875 if ((curtxbw_40mhz && curshortgi_40mhz) ||
1890 (!b_curtxbw_40mhz && b_curshortgi_20mhz)) { 1876 (!curtxbw_40mhz && curshortgi_20mhz)) {
1891 1877
1892 if (macid == 0) 1878 if (macid == 0)
1893 b_shortgi = true; 1879 shortgi = true;
1894 else if (macid == 1) 1880 else if (macid == 1)
1895 b_shortgi = false; 1881 shortgi = false;
1896 } 1882 }
1897 break; 1883 break;
1898 default: 1884 default:
@@ -1906,9 +1892,9 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
1906 } 1892 }
1907 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, 1893 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
1908 ("ratr_bitmap :%x\n", ratr_bitmap)); 1894 ("ratr_bitmap :%x\n", ratr_bitmap));
1909 *(u32 *)&rate_mask = EF4BYTE((ratr_bitmap & 0x0fffffff) | 1895 *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
1910 (ratr_index << 28)); 1896 (ratr_index << 28);
1911 rate_mask[4] = macid | (b_shortgi ? 0x20 : 0x00) | 0x80; 1897 rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
1912 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("Rate_index:%x, " 1898 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("Rate_index:%x, "
1913 "ratr_val:%x, %x:%x:%x:%x:%x\n", 1899 "ratr_val:%x, %x:%x:%x:%x:%x\n",
1914 ratr_index, ratr_bitmap, 1900 ratr_index, ratr_bitmap,
@@ -1940,13 +1926,13 @@ bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
1940 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 1926 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1941 enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate; 1927 enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
1942 u8 u1tmp; 1928 u8 u1tmp;
1943 bool b_actuallyset = false; 1929 bool actuallyset = false;
1944 unsigned long flag; 1930 unsigned long flag;
1945 1931
1946 if ((rtlpci->up_first_time == 1) || (rtlpci->being_init_adapter)) 1932 if ((rtlpci->up_first_time == 1) || (rtlpci->being_init_adapter))
1947 return false; 1933 return false;
1948 1934
1949 if (ppsc->b_swrf_processing) 1935 if (ppsc->swrf_processing)
1950 return false; 1936 return false;
1951 1937
1952 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag); 1938 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
@@ -1972,24 +1958,24 @@ bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
1972 u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL); 1958 u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL);
1973 e_rfpowerstate_toset = (u1tmp & BIT(3)) ? ERFON : ERFOFF; 1959 e_rfpowerstate_toset = (u1tmp & BIT(3)) ? ERFON : ERFOFF;
1974 1960
1975 if ((ppsc->b_hwradiooff == true) && (e_rfpowerstate_toset == ERFON)) { 1961 if ((ppsc->hwradiooff == true) && (e_rfpowerstate_toset == ERFON)) {
1976 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, 1962 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
1977 ("GPIOChangeRF - HW Radio ON, RF ON\n")); 1963 ("GPIOChangeRF - HW Radio ON, RF ON\n"));
1978 1964
1979 e_rfpowerstate_toset = ERFON; 1965 e_rfpowerstate_toset = ERFON;
1980 ppsc->b_hwradiooff = false; 1966 ppsc->hwradiooff = false;
1981 b_actuallyset = true; 1967 actuallyset = true;
1982 } else if ((ppsc->b_hwradiooff == false) 1968 } else if ((ppsc->hwradiooff == false)
1983 && (e_rfpowerstate_toset == ERFOFF)) { 1969 && (e_rfpowerstate_toset == ERFOFF)) {
1984 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, 1970 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
1985 ("GPIOChangeRF - HW Radio OFF, RF OFF\n")); 1971 ("GPIOChangeRF - HW Radio OFF, RF OFF\n"));
1986 1972
1987 e_rfpowerstate_toset = ERFOFF; 1973 e_rfpowerstate_toset = ERFOFF;
1988 ppsc->b_hwradiooff = true; 1974 ppsc->hwradiooff = true;
1989 b_actuallyset = true; 1975 actuallyset = true;
1990 } 1976 }
1991 1977
1992 if (b_actuallyset) { 1978 if (actuallyset) {
1993 if (e_rfpowerstate_toset == ERFON) { 1979 if (e_rfpowerstate_toset == ERFON) {
1994 if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) && 1980 if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
1995 RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM)) { 1981 RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM)) {
@@ -2028,7 +2014,7 @@ bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
2028 } 2014 }
2029 2015
2030 *valid = 1; 2016 *valid = 1;
2031 return !ppsc->b_hwradiooff; 2017 return !ppsc->hwradiooff;
2032 2018
2033} 2019}
2034 2020
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
index 305c819c8c78..a3dfdb635168 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
@@ -30,6 +30,8 @@
30#ifndef __RTL92CE_HW_H__ 30#ifndef __RTL92CE_HW_H__
31#define __RTL92CE_HW_H__ 31#define __RTL92CE_HW_H__
32 32
33#define H2C_RA_MASK 6
34
33void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); 35void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
34void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw); 36void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw);
35void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw, 37void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw,
@@ -53,5 +55,14 @@ void rtl92ce_enable_hw_security_config(struct ieee80211_hw *hw);
53void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index, 55void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
54 u8 *p_macaddr, bool is_group, u8 enc_algo, 56 u8 *p_macaddr, bool is_group, u8 enc_algo,
55 bool is_wepkey, bool clear_all); 57 bool is_wepkey, bool clear_all);
58bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
59void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
60void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
61void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
62int rtl92c_download_fw(struct ieee80211_hw *hw);
63void rtl92c_firmware_selfreset(struct ieee80211_hw *hw);
64void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
65 u8 element_id, u32 cmd_len, u8 *p_cmdbuffer);
66bool rtl92ce_phy_mac_config(struct ieee80211_hw *hw);
56 67
57#endif 68#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/led.c b/drivers/net/wireless/rtlwifi/rtl8192ce/led.c
index 78a0569208ea..7b1da8d7508f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/led.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/led.c
@@ -57,7 +57,7 @@ void rtl92ce_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
57 ("switch case not process\n")); 57 ("switch case not process\n"));
58 break; 58 break;
59 } 59 }
60 pled->b_ledon = true; 60 pled->ledon = true;
61} 61}
62 62
63void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) 63void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
@@ -76,7 +76,7 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
76 break; 76 break;
77 case LED_PIN_LED0: 77 case LED_PIN_LED0:
78 ledcfg &= 0xf0; 78 ledcfg &= 0xf0;
79 if (pcipriv->ledctl.bled_opendrain == true) 79 if (pcipriv->ledctl.led_opendrain == true)
80 rtl_write_byte(rtlpriv, REG_LEDCFG2, 80 rtl_write_byte(rtlpriv, REG_LEDCFG2,
81 (ledcfg | BIT(1) | BIT(5) | BIT(6))); 81 (ledcfg | BIT(1) | BIT(5) | BIT(6)));
82 else 82 else
@@ -92,7 +92,7 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
92 ("switch case not process\n")); 92 ("switch case not process\n"));
93 break; 93 break;
94 } 94 }
95 pled->b_ledon = false; 95 pled->ledon = false;
96} 96}
97 97
98void rtl92ce_init_sw_leds(struct ieee80211_hw *hw) 98void rtl92ce_init_sw_leds(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
index 45044117139a..d0541e8c6012 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
@@ -32,89 +32,13 @@
32#include "../ps.h" 32#include "../ps.h"
33#include "reg.h" 33#include "reg.h"
34#include "def.h" 34#include "def.h"
35#include "hw.h"
35#include "phy.h" 36#include "phy.h"
36#include "rf.h" 37#include "rf.h"
37#include "dm.h" 38#include "dm.h"
38#include "table.h" 39#include "table.h"
39 40
40static u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw, 41u32 rtl92ce_phy_query_rf_reg(struct ieee80211_hw *hw,
41 enum radio_path rfpath, u32 offset);
42static void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
43 enum radio_path rfpath, u32 offset,
44 u32 data);
45static u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
46 enum radio_path rfpath, u32 offset);
47static void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
48 enum radio_path rfpath, u32 offset,
49 u32 data);
50static u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
51static bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
52static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
53static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
54 u8 configtype);
55static bool _rtl92c_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
56 u8 configtype);
57static void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
58static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
59 u32 cmdtableidx, u32 cmdtablesz,
60 enum swchnlcmd_id cmdid, u32 para1,
61 u32 para2, u32 msdelay);
62static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
63 u8 channel, u8 *stage, u8 *step,
64 u32 *delay);
65static u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
66 enum wireless_mode wirelessmode,
67 long power_indbm);
68static bool _rtl92c_phy_config_rf_external_pa(struct ieee80211_hw *hw,
69 enum radio_path rfpath);
70static long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
71 enum wireless_mode wirelessmode,
72 u8 txpwridx);
73u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
74{
75 struct rtl_priv *rtlpriv = rtl_priv(hw);
76 u32 returnvalue, originalvalue, bitshift;
77
78 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
79 "bitmask(%#x)\n", regaddr,
80 bitmask));
81 originalvalue = rtl_read_dword(rtlpriv, regaddr);
82 bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
83 returnvalue = (originalvalue & bitmask) >> bitshift;
84
85 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("BBR MASK=0x%x "
86 "Addr[0x%x]=0x%x\n", bitmask,
87 regaddr, originalvalue));
88
89 return returnvalue;
90
91}
92
93void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
94 u32 regaddr, u32 bitmask, u32 data)
95{
96 struct rtl_priv *rtlpriv = rtl_priv(hw);
97 u32 originalvalue, bitshift;
98
99 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
100 " data(%#x)\n", regaddr, bitmask,
101 data));
102
103 if (bitmask != MASKDWORD) {
104 originalvalue = rtl_read_dword(rtlpriv, regaddr);
105 bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
106 data = ((originalvalue & (~bitmask)) | (data << bitshift));
107 }
108
109 rtl_write_dword(rtlpriv, regaddr, data);
110
111 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
112 " data(%#x)\n", regaddr, bitmask,
113 data));
114
115}
116
117u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
118 enum radio_path rfpath, u32 regaddr, u32 bitmask) 42 enum radio_path rfpath, u32 regaddr, u32 bitmask)
119{ 43{
120 struct rtl_priv *rtlpriv = rtl_priv(hw); 44 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -149,7 +73,7 @@ u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
149 return readback_value; 73 return readback_value;
150} 74}
151 75
152void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw, 76void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
153 enum radio_path rfpath, 77 enum radio_path rfpath,
154 u32 regaddr, u32 bitmask, u32 data) 78 u32 regaddr, u32 bitmask, u32 data)
155{ 79{
@@ -197,137 +121,25 @@ void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw,
197 bitmask, data, rfpath)); 121 bitmask, data, rfpath));
198} 122}
199 123
200static u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw, 124bool rtl92ce_phy_mac_config(struct ieee80211_hw *hw)
201 enum radio_path rfpath, u32 offset)
202{
203 RT_ASSERT(false, ("deprecated!\n"));
204 return 0;
205}
206
207static void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
208 enum radio_path rfpath, u32 offset,
209 u32 data)
210{
211 RT_ASSERT(false, ("deprecated!\n"));
212}
213
214static u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
215 enum radio_path rfpath, u32 offset)
216{
217 struct rtl_priv *rtlpriv = rtl_priv(hw);
218 struct rtl_phy *rtlphy = &(rtlpriv->phy);
219 struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
220 u32 newoffset;
221 u32 tmplong, tmplong2;
222 u8 rfpi_enable = 0;
223 u32 retvalue;
224
225 offset &= 0x3f;
226 newoffset = offset;
227 if (RT_CANNOT_IO(hw)) {
228 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("return all one\n"));
229 return 0xFFFFFFFF;
230 }
231 tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
232 if (rfpath == RF90_PATH_A)
233 tmplong2 = tmplong;
234 else
235 tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
236 tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
237 (newoffset << 23) | BLSSIREADEDGE;
238 rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
239 tmplong & (~BLSSIREADEDGE));
240 mdelay(1);
241 rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
242 mdelay(1);
243 rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
244 tmplong | BLSSIREADEDGE);
245 mdelay(1);
246 if (rfpath == RF90_PATH_A)
247 rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
248 BIT(8));
249 else if (rfpath == RF90_PATH_B)
250 rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
251 BIT(8));
252 if (rfpi_enable)
253 retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readbackpi,
254 BLSSIREADBACKDATA);
255 else
256 retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback,
257 BLSSIREADBACKDATA);
258 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFR-%d Addr[0x%x]=0x%x\n",
259 rfpath, pphyreg->rflssi_readback,
260 retvalue));
261 return retvalue;
262}
263
264static void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
265 enum radio_path rfpath, u32 offset,
266 u32 data)
267{
268 u32 data_and_addr;
269 u32 newoffset;
270 struct rtl_priv *rtlpriv = rtl_priv(hw);
271 struct rtl_phy *rtlphy = &(rtlpriv->phy);
272 struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
273
274 if (RT_CANNOT_IO(hw)) {
275 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("stop\n"));
276 return;
277 }
278 offset &= 0x3f;
279 newoffset = offset;
280 data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
281 rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
282 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFW-%d Addr[0x%x]=0x%x\n",
283 rfpath, pphyreg->rf3wire_offset,
284 data_and_addr));
285}
286
287static u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask)
288{
289 u32 i;
290
291 for (i = 0; i <= 31; i++) {
292 if (((bitmask >> i) & 0x1) == 1)
293 break;
294 }
295 return i;
296}
297
298static void _rtl92c_phy_bb_config_1t(struct ieee80211_hw *hw)
299{
300 rtl_set_bbreg(hw, RFPGA0_TXINFO, 0x3, 0x2);
301 rtl_set_bbreg(hw, RFPGA1_TXINFO, 0x300033, 0x200022);
302 rtl_set_bbreg(hw, RCCK0_AFESETTING, MASKBYTE3, 0x45);
303 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x23);
304 rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, 0x30, 0x1);
305 rtl_set_bbreg(hw, 0xe74, 0x0c000000, 0x2);
306 rtl_set_bbreg(hw, 0xe78, 0x0c000000, 0x2);
307 rtl_set_bbreg(hw, 0xe7c, 0x0c000000, 0x2);
308 rtl_set_bbreg(hw, 0xe80, 0x0c000000, 0x2);
309 rtl_set_bbreg(hw, 0xe88, 0x0c000000, 0x2);
310}
311
312bool rtl92c_phy_mac_config(struct ieee80211_hw *hw)
313{ 125{
314 struct rtl_priv *rtlpriv = rtl_priv(hw); 126 struct rtl_priv *rtlpriv = rtl_priv(hw);
315 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 127 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
316 bool is92c = IS_92C_SERIAL(rtlhal->version); 128 bool is92c = IS_92C_SERIAL(rtlhal->version);
317 bool rtstatus = _rtl92c_phy_config_mac_with_headerfile(hw); 129 bool rtstatus = _rtl92ce_phy_config_mac_with_headerfile(hw);
318 130
319 if (is92c) 131 if (is92c)
320 rtl_write_byte(rtlpriv, 0x14, 0x71); 132 rtl_write_byte(rtlpriv, 0x14, 0x71);
321 return rtstatus; 133 return rtstatus;
322} 134}
323 135
324bool rtl92c_phy_bb_config(struct ieee80211_hw *hw) 136bool rtl92ce_phy_bb_config(struct ieee80211_hw *hw)
325{ 137{
326 bool rtstatus = true; 138 bool rtstatus = true;
327 struct rtl_priv *rtlpriv = rtl_priv(hw); 139 struct rtl_priv *rtlpriv = rtl_priv(hw);
328 u16 regval; 140 u16 regval;
329 u32 regvaldw; 141 u32 regvaldw;
330 u8 b_reg_hwparafile = 1; 142 u8 reg_hwparafile = 1;
331 143
332 _rtl92c_phy_init_bb_rf_register_definition(hw); 144 _rtl92c_phy_init_bb_rf_register_definition(hw);
333 regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN); 145 regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
@@ -342,56 +154,12 @@ bool rtl92c_phy_bb_config(struct ieee80211_hw *hw)
342 rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80); 154 rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80);
343 regvaldw = rtl_read_dword(rtlpriv, REG_LEDCFG0); 155 regvaldw = rtl_read_dword(rtlpriv, REG_LEDCFG0);
344 rtl_write_dword(rtlpriv, REG_LEDCFG0, regvaldw | BIT(23)); 156 rtl_write_dword(rtlpriv, REG_LEDCFG0, regvaldw | BIT(23));
345 if (b_reg_hwparafile == 1) 157 if (reg_hwparafile == 1)
346 rtstatus = _rtl92c_phy_bb8192c_config_parafile(hw); 158 rtstatus = _rtl92c_phy_bb8192c_config_parafile(hw);
347 return rtstatus; 159 return rtstatus;
348} 160}
349 161
350bool rtl92c_phy_rf_config(struct ieee80211_hw *hw) 162bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
351{
352 return rtl92c_phy_rf6052_config(hw);
353}
354
355static bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
356{
357 struct rtl_priv *rtlpriv = rtl_priv(hw);
358 struct rtl_phy *rtlphy = &(rtlpriv->phy);
359 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
360 bool rtstatus;
361
362 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("==>\n"));
363 rtstatus = _rtl92c_phy_config_bb_with_headerfile(hw,
364 BASEBAND_CONFIG_PHY_REG);
365 if (rtstatus != true) {
366 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Write BB Reg Fail!!"));
367 return false;
368 }
369 if (rtlphy->rf_type == RF_1T2R) {
370 _rtl92c_phy_bb_config_1t(hw);
371 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Config to 1T!!\n"));
372 }
373 if (rtlefuse->autoload_failflag == false) {
374 rtlphy->pwrgroup_cnt = 0;
375 rtstatus = _rtl92c_phy_config_bb_with_pgheaderfile(hw,
376 BASEBAND_CONFIG_PHY_REG);
377 }
378 if (rtstatus != true) {
379 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("BB_PG Reg Fail!!"));
380 return false;
381 }
382 rtstatus = _rtl92c_phy_config_bb_with_headerfile(hw,
383 BASEBAND_CONFIG_AGC_TAB);
384 if (rtstatus != true) {
385 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("AGC Table Fail\n"));
386 return false;
387 }
388 rtlphy->bcck_high_power = (bool) (rtl_get_bbreg(hw,
389 RFPGA0_XA_HSSIPARAMETER2,
390 0x200));
391 return true;
392}
393
394static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
395{ 163{
396 struct rtl_priv *rtlpriv = rtl_priv(hw); 164 struct rtl_priv *rtlpriv = rtl_priv(hw);
397 u32 i; 165 u32 i;
@@ -408,11 +176,7 @@ static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
408 return true; 176 return true;
409} 177}
410 178
411void rtl92c_phy_config_bb_external_pa(struct ieee80211_hw *hw) 179bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
412{
413}
414
415static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
416 u8 configtype) 180 u8 configtype)
417{ 181{
418 int i; 182 int i;
@@ -456,7 +220,6 @@ static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
456 phy_regarray_table[i], 220 phy_regarray_table[i],
457 phy_regarray_table[i + 1])); 221 phy_regarray_table[i + 1]));
458 } 222 }
459 rtl92c_phy_config_bb_external_pa(hw);
460 } else if (configtype == BASEBAND_CONFIG_AGC_TAB) { 223 } else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
461 for (i = 0; i < agctab_arraylen; i = i + 2) { 224 for (i = 0; i < agctab_arraylen; i = i + 2) {
462 rtl_set_bbreg(hw, agctab_array_table[i], MASKDWORD, 225 rtl_set_bbreg(hw, agctab_array_table[i], MASKDWORD,
@@ -472,175 +235,7 @@ static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
472 return true; 235 return true;
473} 236}
474 237
475static void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw, 238bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
476 u32 regaddr, u32 bitmask,
477 u32 data)
478{
479 struct rtl_priv *rtlpriv = rtl_priv(hw);
480 struct rtl_phy *rtlphy = &(rtlpriv->phy);
481
482 if (regaddr == RTXAGC_A_RATE18_06) {
483 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][0] =
484 data;
485 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
486 ("MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n",
487 rtlphy->pwrgroup_cnt,
488 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
489 pwrgroup_cnt][0]));
490 }
491 if (regaddr == RTXAGC_A_RATE54_24) {
492 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][1] =
493 data;
494 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
495 ("MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n",
496 rtlphy->pwrgroup_cnt,
497 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
498 pwrgroup_cnt][1]));
499 }
500 if (regaddr == RTXAGC_A_CCK1_MCS32) {
501 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][6] =
502 data;
503 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
504 ("MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n",
505 rtlphy->pwrgroup_cnt,
506 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
507 pwrgroup_cnt][6]));
508 }
509 if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00) {
510 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][7] =
511 data;
512 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
513 ("MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n",
514 rtlphy->pwrgroup_cnt,
515 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
516 pwrgroup_cnt][7]));
517 }
518 if (regaddr == RTXAGC_A_MCS03_MCS00) {
519 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][2] =
520 data;
521 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
522 ("MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n",
523 rtlphy->pwrgroup_cnt,
524 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
525 pwrgroup_cnt][2]));
526 }
527 if (regaddr == RTXAGC_A_MCS07_MCS04) {
528 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][3] =
529 data;
530 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
531 ("MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n",
532 rtlphy->pwrgroup_cnt,
533 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
534 pwrgroup_cnt][3]));
535 }
536 if (regaddr == RTXAGC_A_MCS11_MCS08) {
537 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][4] =
538 data;
539 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
540 ("MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n",
541 rtlphy->pwrgroup_cnt,
542 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
543 pwrgroup_cnt][4]));
544 }
545 if (regaddr == RTXAGC_A_MCS15_MCS12) {
546 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][5] =
547 data;
548 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
549 ("MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n",
550 rtlphy->pwrgroup_cnt,
551 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
552 pwrgroup_cnt][5]));
553 }
554 if (regaddr == RTXAGC_B_RATE18_06) {
555 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][8] =
556 data;
557 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
558 ("MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n",
559 rtlphy->pwrgroup_cnt,
560 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
561 pwrgroup_cnt][8]));
562 }
563 if (regaddr == RTXAGC_B_RATE54_24) {
564 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][9] =
565 data;
566
567 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
568 ("MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n",
569 rtlphy->pwrgroup_cnt,
570 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
571 pwrgroup_cnt][9]));
572 }
573
574 if (regaddr == RTXAGC_B_CCK1_55_MCS32) {
575 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][14] =
576 data;
577
578 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
579 ("MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n",
580 rtlphy->pwrgroup_cnt,
581 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
582 pwrgroup_cnt][14]));
583 }
584
585 if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) {
586 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][15] =
587 data;
588
589 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
590 ("MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n",
591 rtlphy->pwrgroup_cnt,
592 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
593 pwrgroup_cnt][15]));
594 }
595
596 if (regaddr == RTXAGC_B_MCS03_MCS00) {
597 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][10] =
598 data;
599
600 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
601 ("MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n",
602 rtlphy->pwrgroup_cnt,
603 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
604 pwrgroup_cnt][10]));
605 }
606
607 if (regaddr == RTXAGC_B_MCS07_MCS04) {
608 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][11] =
609 data;
610
611 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
612 ("MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n",
613 rtlphy->pwrgroup_cnt,
614 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
615 pwrgroup_cnt][11]));
616 }
617
618 if (regaddr == RTXAGC_B_MCS11_MCS08) {
619 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][12] =
620 data;
621
622 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
623 ("MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n",
624 rtlphy->pwrgroup_cnt,
625 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
626 pwrgroup_cnt][12]));
627 }
628
629 if (regaddr == RTXAGC_B_MCS15_MCS12) {
630 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][13] =
631 data;
632
633 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
634 ("MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n",
635 rtlphy->pwrgroup_cnt,
636 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
637 pwrgroup_cnt][13]));
638
639 rtlphy->pwrgroup_cnt++;
640 }
641}
642
643static bool _rtl92c_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
644 u8 configtype) 239 u8 configtype)
645{ 240{
646 struct rtl_priv *rtlpriv = rtl_priv(hw); 241 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -679,13 +274,7 @@ static bool _rtl92c_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
679 return true; 274 return true;
680} 275}
681 276
682static bool _rtl92c_phy_config_rf_external_pa(struct ieee80211_hw *hw, 277bool rtl92ce_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
683 enum radio_path rfpath)
684{
685 return true;
686}
687
688bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
689 enum radio_path rfpath) 278 enum radio_path rfpath)
690{ 279{
691 280
@@ -740,7 +329,6 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
740 udelay(1); 329 udelay(1);
741 } 330 }
742 } 331 }
743 _rtl92c_phy_config_rf_external_pa(hw, rfpath);
744 break; 332 break;
745 case RF90_PATH_B: 333 case RF90_PATH_B:
746 for (i = 0; i < radiob_arraylen; i = i + 2) { 334 for (i = 0; i < radiob_arraylen; i = i + 2) {
@@ -776,346 +364,7 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
776 return true; 364 return true;
777} 365}
778 366
779void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw) 367void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
780{
781 struct rtl_priv *rtlpriv = rtl_priv(hw);
782 struct rtl_phy *rtlphy = &(rtlpriv->phy);
783
784 rtlphy->default_initialgain[0] =
785 (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
786 rtlphy->default_initialgain[1] =
787 (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
788 rtlphy->default_initialgain[2] =
789 (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
790 rtlphy->default_initialgain[3] =
791 (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
792
793 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
794 ("Default initial gain (c50=0x%x, "
795 "c58=0x%x, c60=0x%x, c68=0x%x\n",
796 rtlphy->default_initialgain[0],
797 rtlphy->default_initialgain[1],
798 rtlphy->default_initialgain[2],
799 rtlphy->default_initialgain[3]));
800
801 rtlphy->framesync = (u8) rtl_get_bbreg(hw,
802 ROFDM0_RXDETECTOR3, MASKBYTE0);
803 rtlphy->framesync_c34 = rtl_get_bbreg(hw,
804 ROFDM0_RXDETECTOR2, MASKDWORD);
805
806 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
807 ("Default framesync (0x%x) = 0x%x\n",
808 ROFDM0_RXDETECTOR3, rtlphy->framesync));
809}
810
811static void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
812{
813 struct rtl_priv *rtlpriv = rtl_priv(hw);
814 struct rtl_phy *rtlphy = &(rtlpriv->phy);
815
816 rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
817 rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
818 rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW;
819 rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW;
820
821 rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB;
822 rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB;
823 rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB;
824 rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB;
825
826 rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
827 rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
828
829 rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
830 rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
831
832 rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset =
833 RFPGA0_XA_LSSIPARAMETER;
834 rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset =
835 RFPGA0_XB_LSSIPARAMETER;
836
837 rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = rFPGA0_XAB_RFPARAMETER;
838 rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = rFPGA0_XAB_RFPARAMETER;
839 rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER;
840 rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER;
841
842 rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE;
843 rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE;
844 rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE;
845 rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE;
846
847 rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1;
848 rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1;
849
850 rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
851 rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
852
853 rtlphy->phyreg_def[RF90_PATH_A].rfswitch_control =
854 RFPGA0_XAB_SWITCHCONTROL;
855 rtlphy->phyreg_def[RF90_PATH_B].rfswitch_control =
856 RFPGA0_XAB_SWITCHCONTROL;
857 rtlphy->phyreg_def[RF90_PATH_C].rfswitch_control =
858 RFPGA0_XCD_SWITCHCONTROL;
859 rtlphy->phyreg_def[RF90_PATH_D].rfswitch_control =
860 RFPGA0_XCD_SWITCHCONTROL;
861
862 rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
863 rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
864 rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1;
865 rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1;
866
867 rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2;
868 rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2;
869 rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
870 rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
871
872 rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbalance =
873 ROFDM0_XARXIQIMBALANCE;
874 rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbalance =
875 ROFDM0_XBRXIQIMBALANCE;
876 rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbalance =
877 ROFDM0_XCRXIQIMBANLANCE;
878 rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbalance =
879 ROFDM0_XDRXIQIMBALANCE;
880
881 rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
882 rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
883 rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
884 rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
885
886 rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbalance =
887 ROFDM0_XATXIQIMBALANCE;
888 rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbalance =
889 ROFDM0_XBTXIQIMBALANCE;
890 rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbalance =
891 ROFDM0_XCTXIQIMBALANCE;
892 rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbalance =
893 ROFDM0_XDTXIQIMBALANCE;
894
895 rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
896 rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
897 rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
898 rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
899
900 rtlphy->phyreg_def[RF90_PATH_A].rflssi_readback =
901 RFPGA0_XA_LSSIREADBACK;
902 rtlphy->phyreg_def[RF90_PATH_B].rflssi_readback =
903 RFPGA0_XB_LSSIREADBACK;
904 rtlphy->phyreg_def[RF90_PATH_C].rflssi_readback =
905 RFPGA0_XC_LSSIREADBACK;
906 rtlphy->phyreg_def[RF90_PATH_D].rflssi_readback =
907 RFPGA0_XD_LSSIREADBACK;
908
909 rtlphy->phyreg_def[RF90_PATH_A].rflssi_readbackpi =
910 TRANSCEIVEA_HSPI_READBACK;
911 rtlphy->phyreg_def[RF90_PATH_B].rflssi_readbackpi =
912 TRANSCEIVEB_HSPI_READBACK;
913
914}
915
916void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
917{
918 struct rtl_priv *rtlpriv = rtl_priv(hw);
919 struct rtl_phy *rtlphy = &(rtlpriv->phy);
920 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
921 u8 txpwr_level;
922 long txpwr_dbm;
923
924 txpwr_level = rtlphy->cur_cck_txpwridx;
925 txpwr_dbm = _rtl92c_phy_txpwr_idx_to_dbm(hw,
926 WIRELESS_MODE_B, txpwr_level);
927 txpwr_level = rtlphy->cur_ofdm24g_txpwridx +
928 rtlefuse->legacy_ht_txpowerdiff;
929 if (_rtl92c_phy_txpwr_idx_to_dbm(hw,
930 WIRELESS_MODE_G,
931 txpwr_level) > txpwr_dbm)
932 txpwr_dbm =
933 _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
934 txpwr_level);
935 txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
936 if (_rtl92c_phy_txpwr_idx_to_dbm(hw,
937 WIRELESS_MODE_N_24G,
938 txpwr_level) > txpwr_dbm)
939 txpwr_dbm =
940 _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
941 txpwr_level);
942 *powerlevel = txpwr_dbm;
943}
944
945static void _rtl92c_get_txpower_index(struct ieee80211_hw *hw, u8 channel,
946 u8 *cckpowerlevel, u8 *ofdmpowerlevel)
947{
948 struct rtl_priv *rtlpriv = rtl_priv(hw);
949 struct rtl_phy *rtlphy = &(rtlpriv->phy);
950 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
951 u8 index = (channel - 1);
952
953 cckpowerlevel[RF90_PATH_A] =
954 rtlefuse->txpwrlevel_cck[RF90_PATH_A][index];
955 cckpowerlevel[RF90_PATH_B] =
956 rtlefuse->txpwrlevel_cck[RF90_PATH_B][index];
957 if (get_rf_type(rtlphy) == RF_1T2R || get_rf_type(rtlphy) == RF_1T1R) {
958 ofdmpowerlevel[RF90_PATH_A] =
959 rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_A][index];
960 ofdmpowerlevel[RF90_PATH_B] =
961 rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_B][index];
962 } else if (get_rf_type(rtlphy) == RF_2T2R) {
963 ofdmpowerlevel[RF90_PATH_A] =
964 rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_A][index];
965 ofdmpowerlevel[RF90_PATH_B] =
966 rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_B][index];
967 }
968}
969
970static void _rtl92c_ccxpower_index_check(struct ieee80211_hw *hw,
971 u8 channel, u8 *cckpowerlevel,
972 u8 *ofdmpowerlevel)
973{
974 struct rtl_priv *rtlpriv = rtl_priv(hw);
975 struct rtl_phy *rtlphy = &(rtlpriv->phy);
976
977 rtlphy->cur_cck_txpwridx = cckpowerlevel[0];
978 rtlphy->cur_ofdm24g_txpwridx = ofdmpowerlevel[0];
979}
980
981void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
982{
983 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
984 u8 cckpowerlevel[2], ofdmpowerlevel[2];
985
986 if (rtlefuse->b_txpwr_fromeprom == false)
987 return;
988 _rtl92c_get_txpower_index(hw, channel,
989 &cckpowerlevel[0], &ofdmpowerlevel[0]);
990 _rtl92c_ccxpower_index_check(hw,
991 channel, &cckpowerlevel[0],
992 &ofdmpowerlevel[0]);
993 rtl92c_phy_rf6052_set_cck_txpower(hw, &cckpowerlevel[0]);
994 rtl92c_phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], channel);
995}
996
997bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm)
998{
999 struct rtl_priv *rtlpriv = rtl_priv(hw);
1000 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1001 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1002 u8 idx;
1003 u8 rf_path;
1004
1005 u8 ccktxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw,
1006 WIRELESS_MODE_B,
1007 power_indbm);
1008 u8 ofdmtxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw,
1009 WIRELESS_MODE_N_24G,
1010 power_indbm);
1011 if (ofdmtxpwridx - rtlefuse->legacy_ht_txpowerdiff > 0)
1012 ofdmtxpwridx -= rtlefuse->legacy_ht_txpowerdiff;
1013 else
1014 ofdmtxpwridx = 0;
1015 RT_TRACE(rtlpriv, COMP_TXAGC, DBG_TRACE,
1016 ("%lx dBm, ccktxpwridx = %d, ofdmtxpwridx = %d\n",
1017 power_indbm, ccktxpwridx, ofdmtxpwridx));
1018 for (idx = 0; idx < 14; idx++) {
1019 for (rf_path = 0; rf_path < 2; rf_path++) {
1020 rtlefuse->txpwrlevel_cck[rf_path][idx] = ccktxpwridx;
1021 rtlefuse->txpwrlevel_ht40_1s[rf_path][idx] =
1022 ofdmtxpwridx;
1023 rtlefuse->txpwrlevel_ht40_2s[rf_path][idx] =
1024 ofdmtxpwridx;
1025 }
1026 }
1027 rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
1028 return true;
1029}
1030
1031void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, u16 beaconinterval)
1032{
1033}
1034
1035static u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
1036 enum wireless_mode wirelessmode,
1037 long power_indbm)
1038{
1039 u8 txpwridx;
1040 long offset;
1041
1042 switch (wirelessmode) {
1043 case WIRELESS_MODE_B:
1044 offset = -7;
1045 break;
1046 case WIRELESS_MODE_G:
1047 case WIRELESS_MODE_N_24G:
1048 offset = -8;
1049 break;
1050 default:
1051 offset = -8;
1052 break;
1053 }
1054
1055 if ((power_indbm - offset) > 0)
1056 txpwridx = (u8) ((power_indbm - offset) * 2);
1057 else
1058 txpwridx = 0;
1059
1060 if (txpwridx > MAX_TXPWR_IDX_NMODE_92S)
1061 txpwridx = MAX_TXPWR_IDX_NMODE_92S;
1062
1063 return txpwridx;
1064}
1065
1066static long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
1067 enum wireless_mode wirelessmode,
1068 u8 txpwridx)
1069{
1070 long offset;
1071 long pwrout_dbm;
1072
1073 switch (wirelessmode) {
1074 case WIRELESS_MODE_B:
1075 offset = -7;
1076 break;
1077 case WIRELESS_MODE_G:
1078 case WIRELESS_MODE_N_24G:
1079 offset = -8;
1080 break;
1081 default:
1082 offset = -8;
1083 break;
1084 }
1085 pwrout_dbm = txpwridx / 2 + offset;
1086 return pwrout_dbm;
1087}
1088
1089void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
1090{
1091 struct rtl_priv *rtlpriv = rtl_priv(hw);
1092 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1093 enum io_type iotype;
1094
1095 if (!is_hal_stop(rtlhal)) {
1096 switch (operation) {
1097 case SCAN_OPT_BACKUP:
1098 iotype = IO_CMD_PAUSE_DM_BY_SCAN;
1099 rtlpriv->cfg->ops->set_hw_reg(hw,
1100 HW_VAR_IO_CMD,
1101 (u8 *)&iotype);
1102
1103 break;
1104 case SCAN_OPT_RESTORE:
1105 iotype = IO_CMD_RESUME_DM_BY_SCAN;
1106 rtlpriv->cfg->ops->set_hw_reg(hw,
1107 HW_VAR_IO_CMD,
1108 (u8 *)&iotype);
1109 break;
1110 default:
1111 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1112 ("Unknown Scan Backup operation.\n"));
1113 break;
1114 }
1115 }
1116}
1117
1118void rtl92c_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
1119{ 368{
1120 struct rtl_priv *rtlpriv = rtl_priv(hw); 369 struct rtl_priv *rtlpriv = rtl_priv(hw);
1121 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 370 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -1183,645 +432,7 @@ void rtl92c_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
1183 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n")); 432 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
1184} 433}
1185 434
1186void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw, 435void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
1187 enum nl80211_channel_type ch_type)
1188{
1189 struct rtl_priv *rtlpriv = rtl_priv(hw);
1190 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1191 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1192 u8 tmp_bw = rtlphy->current_chan_bw;
1193
1194 if (rtlphy->set_bwmode_inprogress)
1195 return;
1196 rtlphy->set_bwmode_inprogress = true;
1197 if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw)))
1198 rtl92c_phy_set_bw_mode_callback(hw);
1199 else {
1200 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1201 ("FALSE driver sleep or unload\n"));
1202 rtlphy->set_bwmode_inprogress = false;
1203 rtlphy->current_chan_bw = tmp_bw;
1204 }
1205}
1206
1207void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw)
1208{
1209 struct rtl_priv *rtlpriv = rtl_priv(hw);
1210 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1211 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1212 u32 delay;
1213
1214 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
1215 ("switch to channel%d\n", rtlphy->current_channel));
1216 if (is_hal_stop(rtlhal))
1217 return;
1218 do {
1219 if (!rtlphy->sw_chnl_inprogress)
1220 break;
1221 if (!_rtl92c_phy_sw_chnl_step_by_step
1222 (hw, rtlphy->current_channel, &rtlphy->sw_chnl_stage,
1223 &rtlphy->sw_chnl_step, &delay)) {
1224 if (delay > 0)
1225 mdelay(delay);
1226 else
1227 continue;
1228 } else
1229 rtlphy->sw_chnl_inprogress = false;
1230 break;
1231 } while (true);
1232 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
1233}
1234
1235u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw)
1236{
1237 struct rtl_priv *rtlpriv = rtl_priv(hw);
1238 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1239 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1240
1241 if (rtlphy->sw_chnl_inprogress)
1242 return 0;
1243 if (rtlphy->set_bwmode_inprogress)
1244 return 0;
1245 RT_ASSERT((rtlphy->current_channel <= 14),
1246 ("WIRELESS_MODE_G but channel>14"));
1247 rtlphy->sw_chnl_inprogress = true;
1248 rtlphy->sw_chnl_stage = 0;
1249 rtlphy->sw_chnl_step = 0;
1250 if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
1251 rtl92c_phy_sw_chnl_callback(hw);
1252 RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
1253 ("sw_chnl_inprogress false schdule workitem\n"));
1254 rtlphy->sw_chnl_inprogress = false;
1255 } else {
1256 RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
1257 ("sw_chnl_inprogress false driver sleep or"
1258 " unload\n"));
1259 rtlphy->sw_chnl_inprogress = false;
1260 }
1261 return 1;
1262}
1263
1264static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
1265 u8 channel, u8 *stage, u8 *step,
1266 u32 *delay)
1267{
1268 struct rtl_priv *rtlpriv = rtl_priv(hw);
1269 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1270 struct swchnlcmd precommoncmd[MAX_PRECMD_CNT];
1271 u32 precommoncmdcnt;
1272 struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT];
1273 u32 postcommoncmdcnt;
1274 struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT];
1275 u32 rfdependcmdcnt;
1276 struct swchnlcmd *currentcmd = NULL;
1277 u8 rfpath;
1278 u8 num_total_rfpath = rtlphy->num_total_rfpath;
1279
1280 precommoncmdcnt = 0;
1281 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
1282 MAX_PRECMD_CNT,
1283 CMDID_SET_TXPOWEROWER_LEVEL, 0, 0, 0);
1284 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
1285 MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
1286
1287 postcommoncmdcnt = 0;
1288
1289 _rtl92c_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++,
1290 MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0);
1291
1292 rfdependcmdcnt = 0;
1293
1294 RT_ASSERT((channel >= 1 && channel <= 14),
1295 ("illegal channel for Zebra: %d\n", channel));
1296
1297 _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
1298 MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
1299 RF_CHNLBW, channel, 10);
1300
1301 _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
1302 MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0,
1303 0);
1304
1305 do {
1306 switch (*stage) {
1307 case 0:
1308 currentcmd = &precommoncmd[*step];
1309 break;
1310 case 1:
1311 currentcmd = &rfdependcmd[*step];
1312 break;
1313 case 2:
1314 currentcmd = &postcommoncmd[*step];
1315 break;
1316 }
1317
1318 if (currentcmd->cmdid == CMDID_END) {
1319 if ((*stage) == 2) {
1320 return true;
1321 } else {
1322 (*stage)++;
1323 (*step) = 0;
1324 continue;
1325 }
1326 }
1327
1328 switch (currentcmd->cmdid) {
1329 case CMDID_SET_TXPOWEROWER_LEVEL:
1330 rtl92c_phy_set_txpower_level(hw, channel);
1331 break;
1332 case CMDID_WRITEPORT_ULONG:
1333 rtl_write_dword(rtlpriv, currentcmd->para1,
1334 currentcmd->para2);
1335 break;
1336 case CMDID_WRITEPORT_USHORT:
1337 rtl_write_word(rtlpriv, currentcmd->para1,
1338 (u16) currentcmd->para2);
1339 break;
1340 case CMDID_WRITEPORT_UCHAR:
1341 rtl_write_byte(rtlpriv, currentcmd->para1,
1342 (u8) currentcmd->para2);
1343 break;
1344 case CMDID_RF_WRITEREG:
1345 for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) {
1346 rtlphy->rfreg_chnlval[rfpath] =
1347 ((rtlphy->rfreg_chnlval[rfpath] &
1348 0xfffffc00) | currentcmd->para2);
1349
1350 rtl_set_rfreg(hw, (enum radio_path)rfpath,
1351 currentcmd->para1,
1352 RFREG_OFFSET_MASK,
1353 rtlphy->rfreg_chnlval[rfpath]);
1354 }
1355 break;
1356 default:
1357 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1358 ("switch case not process\n"));
1359 break;
1360 }
1361
1362 break;
1363 } while (true);
1364
1365 (*delay) = currentcmd->msdelay;
1366 (*step)++;
1367 return false;
1368}
1369
1370static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
1371 u32 cmdtableidx, u32 cmdtablesz,
1372 enum swchnlcmd_id cmdid,
1373 u32 para1, u32 para2, u32 msdelay)
1374{
1375 struct swchnlcmd *pcmd;
1376
1377 if (cmdtable == NULL) {
1378 RT_ASSERT(false, ("cmdtable cannot be NULL.\n"));
1379 return false;
1380 }
1381
1382 if (cmdtableidx >= cmdtablesz)
1383 return false;
1384
1385 pcmd = cmdtable + cmdtableidx;
1386 pcmd->cmdid = cmdid;
1387 pcmd->para1 = para1;
1388 pcmd->para2 = para2;
1389 pcmd->msdelay = msdelay;
1390 return true;
1391}
1392
1393bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, u32 rfpath)
1394{
1395 return true;
1396}
1397
1398static u8 _rtl92c_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb)
1399{
1400 u32 reg_eac, reg_e94, reg_e9c, reg_ea4;
1401 u8 result = 0x00;
1402
1403 rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c1f);
1404 rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x10008c1f);
1405 rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x82140102);
1406 rtl_set_bbreg(hw, 0xe3c, MASKDWORD,
1407 config_pathb ? 0x28160202 : 0x28160502);
1408
1409 if (config_pathb) {
1410 rtl_set_bbreg(hw, 0xe50, MASKDWORD, 0x10008c22);
1411 rtl_set_bbreg(hw, 0xe54, MASKDWORD, 0x10008c22);
1412 rtl_set_bbreg(hw, 0xe58, MASKDWORD, 0x82140102);
1413 rtl_set_bbreg(hw, 0xe5c, MASKDWORD, 0x28160202);
1414 }
1415
1416 rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x001028d1);
1417 rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000);
1418 rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
1419
1420 mdelay(IQK_DELAY_TIME);
1421
1422 reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
1423 reg_e94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD);
1424 reg_e9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD);
1425 reg_ea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD);
1426
1427 if (!(reg_eac & BIT(28)) &&
1428 (((reg_e94 & 0x03FF0000) >> 16) != 0x142) &&
1429 (((reg_e9c & 0x03FF0000) >> 16) != 0x42))
1430 result |= 0x01;
1431 else
1432 return result;
1433
1434 if (!(reg_eac & BIT(27)) &&
1435 (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) &&
1436 (((reg_eac & 0x03FF0000) >> 16) != 0x36))
1437 result |= 0x02;
1438 return result;
1439}
1440
1441static u8 _rtl92c_phy_path_b_iqk(struct ieee80211_hw *hw)
1442{
1443 u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc;
1444 u8 result = 0x00;
1445
1446 rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000002);
1447 rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000000);
1448 mdelay(IQK_DELAY_TIME);
1449 reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
1450 reg_eb4 = rtl_get_bbreg(hw, 0xeb4, MASKDWORD);
1451 reg_ebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD);
1452 reg_ec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD);
1453 reg_ecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD);
1454 if (!(reg_eac & BIT(31)) &&
1455 (((reg_eb4 & 0x03FF0000) >> 16) != 0x142) &&
1456 (((reg_ebc & 0x03FF0000) >> 16) != 0x42))
1457 result |= 0x01;
1458 else
1459 return result;
1460
1461 if (!(reg_eac & BIT(30)) &&
1462 (((reg_ec4 & 0x03FF0000) >> 16) != 0x132) &&
1463 (((reg_ecc & 0x03FF0000) >> 16) != 0x36))
1464 result |= 0x02;
1465 return result;
1466}
1467
1468static void _rtl92c_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw,
1469 bool b_iqk_ok, long result[][8],
1470 u8 final_candidate, bool btxonly)
1471{
1472 u32 oldval_0, x, tx0_a, reg;
1473 long y, tx0_c;
1474
1475 if (final_candidate == 0xFF)
1476 return;
1477 else if (b_iqk_ok) {
1478 oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
1479 MASKDWORD) >> 22) & 0x3FF;
1480 x = result[final_candidate][0];
1481 if ((x & 0x00000200) != 0)
1482 x = x | 0xFFFFFC00;
1483 tx0_a = (x * oldval_0) >> 8;
1484 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a);
1485 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31),
1486 ((x * oldval_0 >> 7) & 0x1));
1487 y = result[final_candidate][1];
1488 if ((y & 0x00000200) != 0)
1489 y = y | 0xFFFFFC00;
1490 tx0_c = (y * oldval_0) >> 8;
1491 rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000,
1492 ((tx0_c & 0x3C0) >> 6));
1493 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000,
1494 (tx0_c & 0x3F));
1495 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29),
1496 ((y * oldval_0 >> 7) & 0x1));
1497 if (btxonly)
1498 return;
1499 reg = result[final_candidate][2];
1500 rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg);
1501 reg = result[final_candidate][3] & 0x3F;
1502 rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg);
1503 reg = (result[final_candidate][3] >> 6) & 0xF;
1504 rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg);
1505 }
1506}
1507
1508static void _rtl92c_phy_path_b_fill_iqk_matrix(struct ieee80211_hw *hw,
1509 bool b_iqk_ok, long result[][8],
1510 u8 final_candidate, bool btxonly)
1511{
1512 u32 oldval_1, x, tx1_a, reg;
1513 long y, tx1_c;
1514
1515 if (final_candidate == 0xFF)
1516 return;
1517 else if (b_iqk_ok) {
1518 oldval_1 = (rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
1519 MASKDWORD) >> 22) & 0x3FF;
1520 x = result[final_candidate][4];
1521 if ((x & 0x00000200) != 0)
1522 x = x | 0xFFFFFC00;
1523 tx1_a = (x * oldval_1) >> 8;
1524 rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x3FF, tx1_a);
1525 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(27),
1526 ((x * oldval_1 >> 7) & 0x1));
1527 y = result[final_candidate][5];
1528 if ((y & 0x00000200) != 0)
1529 y = y | 0xFFFFFC00;
1530 tx1_c = (y * oldval_1) >> 8;
1531 rtl_set_bbreg(hw, ROFDM0_XDTXAFE, 0xF0000000,
1532 ((tx1_c & 0x3C0) >> 6));
1533 rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x003F0000,
1534 (tx1_c & 0x3F));
1535 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(25),
1536 ((y * oldval_1 >> 7) & 0x1));
1537 if (btxonly)
1538 return;
1539 reg = result[final_candidate][6];
1540 rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0x3FF, reg);
1541 reg = result[final_candidate][7] & 0x3F;
1542 rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0xFC00, reg);
1543 reg = (result[final_candidate][7] >> 6) & 0xF;
1544 rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, 0x0000F000, reg);
1545 }
1546}
1547
1548static void _rtl92c_phy_save_adda_registers(struct ieee80211_hw *hw,
1549 u32 *addareg, u32 *addabackup,
1550 u32 registernum)
1551{
1552 u32 i;
1553
1554 for (i = 0; i < registernum; i++)
1555 addabackup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD);
1556}
1557
1558static void _rtl92c_phy_save_mac_registers(struct ieee80211_hw *hw,
1559 u32 *macreg, u32 *macbackup)
1560{
1561 struct rtl_priv *rtlpriv = rtl_priv(hw);
1562 u32 i;
1563
1564 for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
1565 macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]);
1566 macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]);
1567}
1568
1569static void _rtl92c_phy_reload_adda_registers(struct ieee80211_hw *hw,
1570 u32 *addareg, u32 *addabackup,
1571 u32 regiesternum)
1572{
1573 u32 i;
1574
1575 for (i = 0; i < regiesternum; i++)
1576 rtl_set_bbreg(hw, addareg[i], MASKDWORD, addabackup[i]);
1577}
1578
1579static void _rtl92c_phy_reload_mac_registers(struct ieee80211_hw *hw,
1580 u32 *macreg, u32 *macbackup)
1581{
1582 struct rtl_priv *rtlpriv = rtl_priv(hw);
1583 u32 i;
1584
1585 for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
1586 rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]);
1587 rtl_write_dword(rtlpriv, macreg[i], macbackup[i]);
1588}
1589
1590static void _rtl92c_phy_path_adda_on(struct ieee80211_hw *hw,
1591 u32 *addareg, bool is_patha_on, bool is2t)
1592{
1593 u32 pathOn;
1594 u32 i;
1595
1596 pathOn = is_patha_on ? 0x04db25a4 : 0x0b1b25a4;
1597 if (false == is2t) {
1598 pathOn = 0x0bdb25a0;
1599 rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0);
1600 } else {
1601 rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathOn);
1602 }
1603
1604 for (i = 1; i < IQK_ADDA_REG_NUM; i++)
1605 rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathOn);
1606}
1607
1608static void _rtl92c_phy_mac_setting_calibration(struct ieee80211_hw *hw,
1609 u32 *macreg, u32 *macbackup)
1610{
1611 struct rtl_priv *rtlpriv = rtl_priv(hw);
1612 u32 i;
1613
1614 rtl_write_byte(rtlpriv, macreg[0], 0x3F);
1615
1616 for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
1617 rtl_write_byte(rtlpriv, macreg[i],
1618 (u8) (macbackup[i] & (~BIT(3))));
1619 rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5))));
1620}
1621
1622static void _rtl92c_phy_path_a_standby(struct ieee80211_hw *hw)
1623{
1624 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0);
1625 rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
1626 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
1627}
1628
1629static void _rtl92c_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode)
1630{
1631 u32 mode;
1632
1633 mode = pi_mode ? 0x01000100 : 0x01000000;
1634 rtl_set_bbreg(hw, 0x820, MASKDWORD, mode);
1635 rtl_set_bbreg(hw, 0x828, MASKDWORD, mode);
1636}
1637
1638static bool _rtl92c_phy_simularity_compare(struct ieee80211_hw *hw,
1639 long result[][8], u8 c1, u8 c2)
1640{
1641 u32 i, j, diff, simularity_bitmap, bound;
1642 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1643
1644 u8 final_candidate[2] = { 0xFF, 0xFF };
1645 bool bresult = true, is2t = IS_92C_SERIAL(rtlhal->version);
1646
1647 if (is2t)
1648 bound = 8;
1649 else
1650 bound = 4;
1651
1652 simularity_bitmap = 0;
1653
1654 for (i = 0; i < bound; i++) {
1655 diff = (result[c1][i] > result[c2][i]) ?
1656 (result[c1][i] - result[c2][i]) :
1657 (result[c2][i] - result[c1][i]);
1658
1659 if (diff > MAX_TOLERANCE) {
1660 if ((i == 2 || i == 6) && !simularity_bitmap) {
1661 if (result[c1][i] + result[c1][i + 1] == 0)
1662 final_candidate[(i / 4)] = c2;
1663 else if (result[c2][i] + result[c2][i + 1] == 0)
1664 final_candidate[(i / 4)] = c1;
1665 else
1666 simularity_bitmap = simularity_bitmap |
1667 (1 << i);
1668 } else
1669 simularity_bitmap =
1670 simularity_bitmap | (1 << i);
1671 }
1672 }
1673
1674 if (simularity_bitmap == 0) {
1675 for (i = 0; i < (bound / 4); i++) {
1676 if (final_candidate[i] != 0xFF) {
1677 for (j = i * 4; j < (i + 1) * 4 - 2; j++)
1678 result[3][j] =
1679 result[final_candidate[i]][j];
1680 bresult = false;
1681 }
1682 }
1683 return bresult;
1684 } else if (!(simularity_bitmap & 0x0F)) {
1685 for (i = 0; i < 4; i++)
1686 result[3][i] = result[c1][i];
1687 return false;
1688 } else if (!(simularity_bitmap & 0xF0) && is2t) {
1689 for (i = 4; i < 8; i++)
1690 result[3][i] = result[c1][i];
1691 return false;
1692 } else {
1693 return false;
1694 }
1695
1696}
1697
1698static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
1699 long result[][8], u8 t, bool is2t)
1700{
1701 struct rtl_priv *rtlpriv = rtl_priv(hw);
1702 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1703 u32 i;
1704 u8 patha_ok, pathb_ok;
1705 u32 adda_reg[IQK_ADDA_REG_NUM] = {
1706 0x85c, 0xe6c, 0xe70, 0xe74,
1707 0xe78, 0xe7c, 0xe80, 0xe84,
1708 0xe88, 0xe8c, 0xed0, 0xed4,
1709 0xed8, 0xedc, 0xee0, 0xeec
1710 };
1711
1712 u32 iqk_mac_reg[IQK_MAC_REG_NUM] = {
1713 0x522, 0x550, 0x551, 0x040
1714 };
1715
1716 const u32 retrycount = 2;
1717
1718 u32 bbvalue;
1719
1720 if (t == 0) {
1721 bbvalue = rtl_get_bbreg(hw, 0x800, MASKDWORD);
1722
1723 _rtl92c_phy_save_adda_registers(hw, adda_reg,
1724 rtlphy->adda_backup, 16);
1725 _rtl92c_phy_save_mac_registers(hw, iqk_mac_reg,
1726 rtlphy->iqk_mac_backup);
1727 }
1728 _rtl92c_phy_path_adda_on(hw, adda_reg, true, is2t);
1729 if (t == 0) {
1730 rtlphy->b_rfpi_enable = (u8) rtl_get_bbreg(hw,
1731 RFPGA0_XA_HSSIPARAMETER1,
1732 BIT(8));
1733 }
1734 if (!rtlphy->b_rfpi_enable)
1735 _rtl92c_phy_pi_mode_switch(hw, true);
1736 if (t == 0) {
1737 rtlphy->reg_c04 = rtl_get_bbreg(hw, 0xc04, MASKDWORD);
1738 rtlphy->reg_c08 = rtl_get_bbreg(hw, 0xc08, MASKDWORD);
1739 rtlphy->reg_874 = rtl_get_bbreg(hw, 0x874, MASKDWORD);
1740 }
1741 rtl_set_bbreg(hw, 0xc04, MASKDWORD, 0x03a05600);
1742 rtl_set_bbreg(hw, 0xc08, MASKDWORD, 0x000800e4);
1743 rtl_set_bbreg(hw, 0x874, MASKDWORD, 0x22204000);
1744 if (is2t) {
1745 rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
1746 rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00010000);
1747 }
1748 _rtl92c_phy_mac_setting_calibration(hw, iqk_mac_reg,
1749 rtlphy->iqk_mac_backup);
1750 rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x00080000);
1751 if (is2t)
1752 rtl_set_bbreg(hw, 0xb6c, MASKDWORD, 0x00080000);
1753 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
1754 rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x01007c00);
1755 rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x01004800);
1756 for (i = 0; i < retrycount; i++) {
1757 patha_ok = _rtl92c_phy_path_a_iqk(hw, is2t);
1758 if (patha_ok == 0x03) {
1759 result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
1760 0x3FF0000) >> 16;
1761 result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
1762 0x3FF0000) >> 16;
1763 result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
1764 0x3FF0000) >> 16;
1765 result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
1766 0x3FF0000) >> 16;
1767 break;
1768 } else if (i == (retrycount - 1) && patha_ok == 0x01)
1769 result[t][0] = (rtl_get_bbreg(hw, 0xe94,
1770 MASKDWORD) & 0x3FF0000) >>
1771 16;
1772 result[t][1] =
1773 (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) & 0x3FF0000) >> 16;
1774
1775 }
1776
1777 if (is2t) {
1778 _rtl92c_phy_path_a_standby(hw);
1779 _rtl92c_phy_path_adda_on(hw, adda_reg, false, is2t);
1780 for (i = 0; i < retrycount; i++) {
1781 pathb_ok = _rtl92c_phy_path_b_iqk(hw);
1782 if (pathb_ok == 0x03) {
1783 result[t][4] = (rtl_get_bbreg(hw,
1784 0xeb4,
1785 MASKDWORD) &
1786 0x3FF0000) >> 16;
1787 result[t][5] =
1788 (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
1789 0x3FF0000) >> 16;
1790 result[t][6] =
1791 (rtl_get_bbreg(hw, 0xec4, MASKDWORD) &
1792 0x3FF0000) >> 16;
1793 result[t][7] =
1794 (rtl_get_bbreg(hw, 0xecc, MASKDWORD) &
1795 0x3FF0000) >> 16;
1796 break;
1797 } else if (i == (retrycount - 1) && pathb_ok == 0x01) {
1798 result[t][4] = (rtl_get_bbreg(hw,
1799 0xeb4,
1800 MASKDWORD) &
1801 0x3FF0000) >> 16;
1802 }
1803 result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
1804 0x3FF0000) >> 16;
1805 }
1806 }
1807 rtl_set_bbreg(hw, 0xc04, MASKDWORD, rtlphy->reg_c04);
1808 rtl_set_bbreg(hw, 0x874, MASKDWORD, rtlphy->reg_874);
1809 rtl_set_bbreg(hw, 0xc08, MASKDWORD, rtlphy->reg_c08);
1810 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0);
1811 rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00032ed3);
1812 if (is2t)
1813 rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3);
1814 if (t != 0) {
1815 if (!rtlphy->b_rfpi_enable)
1816 _rtl92c_phy_pi_mode_switch(hw, false);
1817 _rtl92c_phy_reload_adda_registers(hw, adda_reg,
1818 rtlphy->adda_backup, 16);
1819 _rtl92c_phy_reload_mac_registers(hw, iqk_mac_reg,
1820 rtlphy->iqk_mac_backup);
1821 }
1822}
1823
1824static void _rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
1825{ 436{
1826 u8 tmpreg; 437 u8 tmpreg;
1827 u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal; 438 u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal;
@@ -1866,666 +477,6 @@ static void _rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
1866 } 477 }
1867} 478}
1868 479
1869static void _rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw,
1870 char delta, bool is2t)
1871{
1872 /* This routine is deliberately dummied out for later fixes */
1873#if 0
1874 struct rtl_priv *rtlpriv = rtl_priv(hw);
1875 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1876 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1877
1878 u32 reg_d[PATH_NUM];
1879 u32 tmpreg, index, offset, path, i, pathbound = PATH_NUM, apkbound;
1880
1881 u32 bb_backup[APK_BB_REG_NUM];
1882 u32 bb_reg[APK_BB_REG_NUM] = {
1883 0x904, 0xc04, 0x800, 0xc08, 0x874
1884 };
1885 u32 bb_ap_mode[APK_BB_REG_NUM] = {
1886 0x00000020, 0x00a05430, 0x02040000,
1887 0x000800e4, 0x00204000
1888 };
1889 u32 bb_normal_ap_mode[APK_BB_REG_NUM] = {
1890 0x00000020, 0x00a05430, 0x02040000,
1891 0x000800e4, 0x22204000
1892 };
1893
1894 u32 afe_backup[APK_AFE_REG_NUM];
1895 u32 afe_reg[APK_AFE_REG_NUM] = {
1896 0x85c, 0xe6c, 0xe70, 0xe74, 0xe78,
1897 0xe7c, 0xe80, 0xe84, 0xe88, 0xe8c,
1898 0xed0, 0xed4, 0xed8, 0xedc, 0xee0,
1899 0xeec
1900 };
1901
1902 u32 mac_backup[IQK_MAC_REG_NUM];
1903 u32 mac_reg[IQK_MAC_REG_NUM] = {
1904 0x522, 0x550, 0x551, 0x040
1905 };
1906
1907 u32 apk_rf_init_value[PATH_NUM][APK_BB_REG_NUM] = {
1908 {0x0852c, 0x1852c, 0x5852c, 0x1852c, 0x5852c},
1909 {0x2852e, 0x0852e, 0x3852e, 0x0852e, 0x0852e}
1910 };
1911
1912 u32 apk_normal_rf_init_value[PATH_NUM][APK_BB_REG_NUM] = {
1913 {0x0852c, 0x0a52c, 0x3a52c, 0x5a52c, 0x5a52c},
1914 {0x0852c, 0x0a52c, 0x5a52c, 0x5a52c, 0x5a52c}
1915 };
1916
1917 u32 apk_rf_value_0[PATH_NUM][APK_BB_REG_NUM] = {
1918 {0x52019, 0x52014, 0x52013, 0x5200f, 0x5208d},
1919 {0x5201a, 0x52019, 0x52016, 0x52033, 0x52050}
1920 };
1921
1922 u32 apk_normal_rf_value_0[PATH_NUM][APK_BB_REG_NUM] = {
1923 {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a},
1924 {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a}
1925 };
1926
1927 u32 afe_on_off[PATH_NUM] = {
1928 0x04db25a4, 0x0b1b25a4
1929 };
1930
1931 u32 apk_offset[PATH_NUM] = { 0xb68, 0xb6c };
1932
1933 u32 apk_normal_offset[PATH_NUM] = { 0xb28, 0xb98 };
1934
1935 u32 apk_value[PATH_NUM] = { 0x92fc0000, 0x12fc0000 };
1936
1937 u32 apk_normal_value[PATH_NUM] = { 0x92680000, 0x12680000 };
1938
1939 const char apk_delta_mapping[APK_BB_REG_NUM][13] = {
1940 {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
1941 {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
1942 {-6, -4, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
1943 {-1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6},
1944 {-11, -9, -7, -5, -3, -1, 0, 0, 0, 0, 0, 0, 0}
1945 };
1946
1947 const u32 apk_normal_setting_value_1[13] = {
1948 0x01017018, 0xf7ed8f84, 0x1b1a1816, 0x2522201e, 0x322e2b28,
1949 0x433f3a36, 0x5b544e49, 0x7b726a62, 0xa69a8f84, 0xdfcfc0b3,
1950 0x12680000, 0x00880000, 0x00880000
1951 };
1952
1953 const u32 apk_normal_setting_value_2[16] = {
1954 0x01c7021d, 0x01670183, 0x01000123, 0x00bf00e2, 0x008d00a3,
1955 0x0068007b, 0x004d0059, 0x003a0042, 0x002b0031, 0x001f0025,
1956 0x0017001b, 0x00110014, 0x000c000f, 0x0009000b, 0x00070008,
1957 0x00050006
1958 };
1959
1960 const u32 apk_result[PATH_NUM][APK_BB_REG_NUM];
1961
1962 long bb_offset, delta_v, delta_offset;
1963
1964 if (!is2t)
1965 pathbound = 1;
1966
1967 for (index = 0; index < PATH_NUM; index++) {
1968 apk_offset[index] = apk_normal_offset[index];
1969 apk_value[index] = apk_normal_value[index];
1970 afe_on_off[index] = 0x6fdb25a4;
1971 }
1972
1973 for (index = 0; index < APK_BB_REG_NUM; index++) {
1974 for (path = 0; path < pathbound; path++) {
1975 apk_rf_init_value[path][index] =
1976 apk_normal_rf_init_value[path][index];
1977 apk_rf_value_0[path][index] =
1978 apk_normal_rf_value_0[path][index];
1979 }
1980 bb_ap_mode[index] = bb_normal_ap_mode[index];
1981
1982 apkbound = 6;
1983 }
1984
1985 for (index = 0; index < APK_BB_REG_NUM; index++) {
1986 if (index == 0)
1987 continue;
1988 bb_backup[index] = rtl_get_bbreg(hw, bb_reg[index], MASKDWORD);
1989 }
1990
1991 _rtl92c_phy_save_mac_registers(hw, mac_reg, mac_backup);
1992
1993 _rtl92c_phy_save_adda_registers(hw, afe_reg, afe_backup, 16);
1994
1995 for (path = 0; path < pathbound; path++) {
1996 if (path == RF90_PATH_A) {
1997 offset = 0xb00;
1998 for (index = 0; index < 11; index++) {
1999 rtl_set_bbreg(hw, offset, MASKDWORD,
2000 apk_normal_setting_value_1
2001 [index]);
2002
2003 offset += 0x04;
2004 }
2005
2006 rtl_set_bbreg(hw, 0xb98, MASKDWORD, 0x12680000);
2007
2008 offset = 0xb68;
2009 for (; index < 13; index++) {
2010 rtl_set_bbreg(hw, offset, MASKDWORD,
2011 apk_normal_setting_value_1
2012 [index]);
2013
2014 offset += 0x04;
2015 }
2016
2017 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x40000000);
2018
2019 offset = 0xb00;
2020 for (index = 0; index < 16; index++) {
2021 rtl_set_bbreg(hw, offset, MASKDWORD,
2022 apk_normal_setting_value_2
2023 [index]);
2024
2025 offset += 0x04;
2026 }
2027 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
2028 } else if (path == RF90_PATH_B) {
2029 offset = 0xb70;
2030 for (index = 0; index < 10; index++) {
2031 rtl_set_bbreg(hw, offset, MASKDWORD,
2032 apk_normal_setting_value_1
2033 [index]);
2034
2035 offset += 0x04;
2036 }
2037 rtl_set_bbreg(hw, 0xb28, MASKDWORD, 0x12680000);
2038 rtl_set_bbreg(hw, 0xb98, MASKDWORD, 0x12680000);
2039
2040 offset = 0xb68;
2041 index = 11;
2042 for (; index < 13; index++) {
2043 rtl_set_bbreg(hw, offset, MASKDWORD,
2044 apk_normal_setting_value_1
2045 [index]);
2046
2047 offset += 0x04;
2048 }
2049
2050 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x40000000);
2051
2052 offset = 0xb60;
2053 for (index = 0; index < 16; index++) {
2054 rtl_set_bbreg(hw, offset, MASKDWORD,
2055 apk_normal_setting_value_2
2056 [index]);
2057
2058 offset += 0x04;
2059 }
2060 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
2061 }
2062
2063 reg_d[path] = rtl_get_rfreg(hw, (enum radio_path)path,
2064 0xd, MASKDWORD);
2065
2066 for (index = 0; index < APK_AFE_REG_NUM; index++)
2067 rtl_set_bbreg(hw, afe_reg[index], MASKDWORD,
2068 afe_on_off[path]);
2069
2070 if (path == RF90_PATH_A) {
2071 for (index = 0; index < APK_BB_REG_NUM; index++) {
2072 if (index == 0)
2073 continue;
2074 rtl_set_bbreg(hw, bb_reg[index], MASKDWORD,
2075 bb_ap_mode[index]);
2076 }
2077 }
2078
2079 _rtl92c_phy_mac_setting_calibration(hw, mac_reg, mac_backup);
2080
2081 if (path == 0) {
2082 rtl_set_rfreg(hw, RF90_PATH_B, 0x0, MASKDWORD, 0x10000);
2083 } else {
2084 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASKDWORD,
2085 0x10000);
2086 rtl_set_rfreg(hw, RF90_PATH_A, 0x10, MASKDWORD,
2087 0x1000f);
2088 rtl_set_rfreg(hw, RF90_PATH_A, 0x11, MASKDWORD,
2089 0x20103);
2090 }
2091
2092 delta_offset = ((delta + 14) / 2);
2093 if (delta_offset < 0)
2094 delta_offset = 0;
2095 else if (delta_offset > 12)
2096 delta_offset = 12;
2097
2098 for (index = 0; index < APK_BB_REG_NUM; index++) {
2099 if (index != 1)
2100 continue;
2101
2102 tmpreg = apk_rf_init_value[path][index];
2103
2104 if (!rtlefuse->b_apk_thermalmeterignore) {
2105 bb_offset = (tmpreg & 0xF0000) >> 16;
2106
2107 if (!(tmpreg & BIT(15)))
2108 bb_offset = -bb_offset;
2109
2110 delta_v =
2111 apk_delta_mapping[index][delta_offset];
2112
2113 bb_offset += delta_v;
2114
2115 if (bb_offset < 0) {
2116 tmpreg = tmpreg & (~BIT(15));
2117 bb_offset = -bb_offset;
2118 } else {
2119 tmpreg = tmpreg | BIT(15);
2120 }
2121
2122 tmpreg =
2123 (tmpreg & 0xFFF0FFFF) | (bb_offset << 16);
2124 }
2125
2126 rtl_set_rfreg(hw, (enum radio_path)path, 0xc,
2127 MASKDWORD, 0x8992e);
2128 rtl_set_rfreg(hw, (enum radio_path)path, 0x0,
2129 MASKDWORD, apk_rf_value_0[path][index]);
2130 rtl_set_rfreg(hw, (enum radio_path)path, 0xd,
2131 MASKDWORD, tmpreg);
2132
2133 i = 0;
2134 do {
2135 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80000000);
2136 rtl_set_bbreg(hw, apk_offset[path],
2137 MASKDWORD, apk_value[0]);
2138 RTPRINT(rtlpriv, FINIT, INIT_IQK,
2139 ("PHY_APCalibrate() offset 0x%x "
2140 "value 0x%x\n",
2141 apk_offset[path],
2142 rtl_get_bbreg(hw, apk_offset[path],
2143 MASKDWORD)));
2144
2145 mdelay(3);
2146
2147 rtl_set_bbreg(hw, apk_offset[path],
2148 MASKDWORD, apk_value[1]);
2149 RTPRINT(rtlpriv, FINIT, INIT_IQK,
2150 ("PHY_APCalibrate() offset 0x%x "
2151 "value 0x%x\n",
2152 apk_offset[path],
2153 rtl_get_bbreg(hw, apk_offset[path],
2154 MASKDWORD)));
2155
2156 mdelay(20);
2157
2158 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
2159
2160 if (path == RF90_PATH_A)
2161 tmpreg = rtl_get_bbreg(hw, 0xbd8,
2162 0x03E00000);
2163 else
2164 tmpreg = rtl_get_bbreg(hw, 0xbd8,
2165 0xF8000000);
2166
2167 RTPRINT(rtlpriv, FINIT, INIT_IQK,
2168 ("PHY_APCalibrate() offset "
2169 "0xbd8[25:21] %x\n", tmpreg));
2170
2171 i++;
2172
2173 } while (tmpreg > apkbound && i < 4);
2174
2175 apk_result[path][index] = tmpreg;
2176 }
2177 }
2178
2179 _rtl92c_phy_reload_mac_registers(hw, mac_reg, mac_backup);
2180
2181 for (index = 0; index < APK_BB_REG_NUM; index++) {
2182 if (index == 0)
2183 continue;
2184 rtl_set_bbreg(hw, bb_reg[index], MASKDWORD, bb_backup[index]);
2185 }
2186
2187 _rtl92c_phy_reload_adda_registers(hw, afe_reg, afe_backup, 16);
2188
2189 for (path = 0; path < pathbound; path++) {
2190 rtl_set_rfreg(hw, (enum radio_path)path, 0xd,
2191 MASKDWORD, reg_d[path]);
2192
2193 if (path == RF90_PATH_B) {
2194 rtl_set_rfreg(hw, RF90_PATH_A, 0x10, MASKDWORD,
2195 0x1000f);
2196 rtl_set_rfreg(hw, RF90_PATH_A, 0x11, MASKDWORD,
2197 0x20101);
2198 }
2199
2200 if (apk_result[path][1] > 6)
2201 apk_result[path][1] = 6;
2202 }
2203
2204 for (path = 0; path < pathbound; path++) {
2205 rtl_set_rfreg(hw, (enum radio_path)path, 0x3, MASKDWORD,
2206 ((apk_result[path][1] << 15) |
2207 (apk_result[path][1] << 10) |
2208 (apk_result[path][1] << 5) |
2209 apk_result[path][1]));
2210
2211 if (path == RF90_PATH_A)
2212 rtl_set_rfreg(hw, (enum radio_path)path, 0x4, MASKDWORD,
2213 ((apk_result[path][1] << 15) |
2214 (apk_result[path][1] << 10) |
2215 (0x00 << 5) | 0x05));
2216 else
2217 rtl_set_rfreg(hw, (enum radio_path)path, 0x4, MASKDWORD,
2218 ((apk_result[path][1] << 15) |
2219 (apk_result[path][1] << 10) |
2220 (0x02 << 5) | 0x05));
2221
2222 rtl_set_rfreg(hw, (enum radio_path)path, 0xe, MASKDWORD,
2223 ((0x08 << 15) | (0x08 << 10) | (0x08 << 5) |
2224 0x08));
2225
2226 }
2227
2228 rtlphy->b_apk_done = true;
2229#endif
2230}
2231
2232static void _rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw,
2233 bool bmain, bool is2t)
2234{
2235 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
2236
2237 if (is_hal_stop(rtlhal)) {
2238 rtl_set_bbreg(hw, REG_LEDCFG0, BIT(23), 0x01);
2239 rtl_set_bbreg(hw, rFPGA0_XAB_RFPARAMETER, BIT(13), 0x01);
2240 }
2241 if (is2t) {
2242 if (bmain)
2243 rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
2244 BIT(5) | BIT(6), 0x1);
2245 else
2246 rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
2247 BIT(5) | BIT(6), 0x2);
2248 } else {
2249 if (bmain)
2250 rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x2);
2251 else
2252 rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x1);
2253
2254 }
2255}
2256
2257#undef IQK_ADDA_REG_NUM
2258#undef IQK_DELAY_TIME
2259
2260void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
2261{
2262 struct rtl_priv *rtlpriv = rtl_priv(hw);
2263 struct rtl_phy *rtlphy = &(rtlpriv->phy);
2264 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
2265
2266 long result[4][8];
2267 u8 i, final_candidate;
2268 bool b_patha_ok, b_pathb_ok;
2269 long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
2270 reg_ecc, reg_tmp = 0;
2271 bool is12simular, is13simular, is23simular;
2272 bool b_start_conttx = false, b_singletone = false;
2273 u32 iqk_bb_reg[10] = {
2274 ROFDM0_XARXIQIMBALANCE,
2275 ROFDM0_XBRXIQIMBALANCE,
2276 ROFDM0_ECCATHRESHOLD,
2277 ROFDM0_AGCRSSITABLE,
2278 ROFDM0_XATXIQIMBALANCE,
2279 ROFDM0_XBTXIQIMBALANCE,
2280 ROFDM0_XCTXIQIMBALANCE,
2281 ROFDM0_XCTXAFE,
2282 ROFDM0_XDTXAFE,
2283 ROFDM0_RXIQEXTANTA
2284 };
2285
2286 if (b_recovery) {
2287 _rtl92c_phy_reload_adda_registers(hw,
2288 iqk_bb_reg,
2289 rtlphy->iqk_bb_backup, 10);
2290 return;
2291 }
2292 if (b_start_conttx || b_singletone)
2293 return;
2294 for (i = 0; i < 8; i++) {
2295 result[0][i] = 0;
2296 result[1][i] = 0;
2297 result[2][i] = 0;
2298 result[3][i] = 0;
2299 }
2300 final_candidate = 0xff;
2301 b_patha_ok = false;
2302 b_pathb_ok = false;
2303 is12simular = false;
2304 is23simular = false;
2305 is13simular = false;
2306 for (i = 0; i < 3; i++) {
2307 if (IS_92C_SERIAL(rtlhal->version))
2308 _rtl92c_phy_iq_calibrate(hw, result, i, true);
2309 else
2310 _rtl92c_phy_iq_calibrate(hw, result, i, false);
2311 if (i == 1) {
2312 is12simular = _rtl92c_phy_simularity_compare(hw,
2313 result, 0,
2314 1);
2315 if (is12simular) {
2316 final_candidate = 0;
2317 break;
2318 }
2319 }
2320 if (i == 2) {
2321 is13simular = _rtl92c_phy_simularity_compare(hw,
2322 result, 0,
2323 2);
2324 if (is13simular) {
2325 final_candidate = 0;
2326 break;
2327 }
2328 is23simular = _rtl92c_phy_simularity_compare(hw,
2329 result, 1,
2330 2);
2331 if (is23simular)
2332 final_candidate = 1;
2333 else {
2334 for (i = 0; i < 8; i++)
2335 reg_tmp += result[3][i];
2336
2337 if (reg_tmp != 0)
2338 final_candidate = 3;
2339 else
2340 final_candidate = 0xFF;
2341 }
2342 }
2343 }
2344 for (i = 0; i < 4; i++) {
2345 reg_e94 = result[i][0];
2346 reg_e9c = result[i][1];
2347 reg_ea4 = result[i][2];
2348 reg_eac = result[i][3];
2349 reg_eb4 = result[i][4];
2350 reg_ebc = result[i][5];
2351 reg_ec4 = result[i][6];
2352 reg_ecc = result[i][7];
2353 }
2354 if (final_candidate != 0xff) {
2355 rtlphy->reg_e94 = reg_e94 = result[final_candidate][0];
2356 rtlphy->reg_e9c = reg_e9c = result[final_candidate][1];
2357 reg_ea4 = result[final_candidate][2];
2358 reg_eac = result[final_candidate][3];
2359 rtlphy->reg_eb4 = reg_eb4 = result[final_candidate][4];
2360 rtlphy->reg_ebc = reg_ebc = result[final_candidate][5];
2361 reg_ec4 = result[final_candidate][6];
2362 reg_ecc = result[final_candidate][7];
2363 b_patha_ok = b_pathb_ok = true;
2364 } else {
2365 rtlphy->reg_e94 = rtlphy->reg_eb4 = 0x100;
2366 rtlphy->reg_e9c = rtlphy->reg_ebc = 0x0;
2367 }
2368 if (reg_e94 != 0) /*&&(reg_ea4 != 0) */
2369 _rtl92c_phy_path_a_fill_iqk_matrix(hw, b_patha_ok, result,
2370 final_candidate,
2371 (reg_ea4 == 0));
2372 if (IS_92C_SERIAL(rtlhal->version)) {
2373 if (reg_eb4 != 0) /*&&(reg_ec4 != 0) */
2374 _rtl92c_phy_path_b_fill_iqk_matrix(hw, b_pathb_ok,
2375 result,
2376 final_candidate,
2377 (reg_ec4 == 0));
2378 }
2379 _rtl92c_phy_save_adda_registers(hw, iqk_bb_reg,
2380 rtlphy->iqk_bb_backup, 10);
2381}
2382
2383void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw)
2384{
2385 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
2386 bool b_start_conttx = false, b_singletone = false;
2387
2388 if (b_start_conttx || b_singletone)
2389 return;
2390 if (IS_92C_SERIAL(rtlhal->version))
2391 _rtl92c_phy_lc_calibrate(hw, true);
2392 else
2393 _rtl92c_phy_lc_calibrate(hw, false);
2394}
2395
2396void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta)
2397{
2398 struct rtl_priv *rtlpriv = rtl_priv(hw);
2399 struct rtl_phy *rtlphy = &(rtlpriv->phy);
2400 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
2401
2402 if (rtlphy->b_apk_done)
2403 return;
2404 if (IS_92C_SERIAL(rtlhal->version))
2405 _rtl92c_phy_ap_calibrate(hw, delta, true);
2406 else
2407 _rtl92c_phy_ap_calibrate(hw, delta, false);
2408}
2409
2410void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain)
2411{
2412 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
2413
2414 if (IS_92C_SERIAL(rtlhal->version))
2415 _rtl92c_phy_set_rfpath_switch(hw, bmain, true);
2416 else
2417 _rtl92c_phy_set_rfpath_switch(hw, bmain, false);
2418}
2419
2420bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
2421{
2422 struct rtl_priv *rtlpriv = rtl_priv(hw);
2423 struct rtl_phy *rtlphy = &(rtlpriv->phy);
2424 bool b_postprocessing = false;
2425
2426 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
2427 ("-->IO Cmd(%#x), set_io_inprogress(%d)\n",
2428 iotype, rtlphy->set_io_inprogress));
2429 do {
2430 switch (iotype) {
2431 case IO_CMD_RESUME_DM_BY_SCAN:
2432 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
2433 ("[IO CMD] Resume DM after scan.\n"));
2434 b_postprocessing = true;
2435 break;
2436 case IO_CMD_PAUSE_DM_BY_SCAN:
2437 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
2438 ("[IO CMD] Pause DM before scan.\n"));
2439 b_postprocessing = true;
2440 break;
2441 default:
2442 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
2443 ("switch case not process\n"));
2444 break;
2445 }
2446 } while (false);
2447 if (b_postprocessing && !rtlphy->set_io_inprogress) {
2448 rtlphy->set_io_inprogress = true;
2449 rtlphy->current_io_type = iotype;
2450 } else {
2451 return false;
2452 }
2453 rtl92c_phy_set_io(hw);
2454 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, ("<--IO Type(%#x)\n", iotype));
2455 return true;
2456}
2457
2458void rtl92c_phy_set_io(struct ieee80211_hw *hw)
2459{
2460 struct rtl_priv *rtlpriv = rtl_priv(hw);
2461 struct rtl_phy *rtlphy = &(rtlpriv->phy);
2462
2463 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
2464 ("--->Cmd(%#x), set_io_inprogress(%d)\n",
2465 rtlphy->current_io_type, rtlphy->set_io_inprogress));
2466 switch (rtlphy->current_io_type) {
2467 case IO_CMD_RESUME_DM_BY_SCAN:
2468 dm_digtable.cur_igvalue = rtlphy->initgain_backup.xaagccore1;
2469 rtl92c_dm_write_dig(hw);
2470 rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
2471 break;
2472 case IO_CMD_PAUSE_DM_BY_SCAN:
2473 rtlphy->initgain_backup.xaagccore1 = dm_digtable.cur_igvalue;
2474 dm_digtable.cur_igvalue = 0x17;
2475 rtl92c_dm_write_dig(hw);
2476 break;
2477 default:
2478 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
2479 ("switch case not process\n"));
2480 break;
2481 }
2482 rtlphy->set_io_inprogress = false;
2483 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
2484 ("<---(%#x)\n", rtlphy->current_io_type));
2485}
2486
2487void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw)
2488{
2489 struct rtl_priv *rtlpriv = rtl_priv(hw);
2490
2491 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
2492 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
2493 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
2494 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
2495 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
2496 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
2497}
2498
2499static void _rtl92ce_phy_set_rf_sleep(struct ieee80211_hw *hw)
2500{
2501 u32 u4b_tmp;
2502 u8 delay = 5;
2503 struct rtl_priv *rtlpriv = rtl_priv(hw);
2504
2505 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
2506 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
2507 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
2508 u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
2509 while (u4b_tmp != 0 && delay > 0) {
2510 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0);
2511 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
2512 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
2513 u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
2514 delay--;
2515 }
2516 if (delay == 0) {
2517 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
2518 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
2519 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
2520 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
2521 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
2522 ("Switch RF timeout !!!.\n"));
2523 return;
2524 }
2525 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
2526 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
2527}
2528
2529static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw, 480static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
2530 enum rf_pwrstate rfpwr_state) 481 enum rf_pwrstate rfpwr_state)
2531{ 482{
@@ -2648,7 +599,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
2648 jiffies_to_msecs(jiffies - 599 jiffies_to_msecs(jiffies -
2649 ppsc->last_awake_jiffies))); 600 ppsc->last_awake_jiffies)));
2650 ppsc->last_sleep_jiffies = jiffies; 601 ppsc->last_sleep_jiffies = jiffies;
2651 _rtl92ce_phy_set_rf_sleep(hw); 602 _rtl92c_phy_set_rf_sleep(hw);
2652 break; 603 break;
2653 } 604 }
2654 default: 605 default:
@@ -2663,7 +614,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
2663 return bresult; 614 return bresult;
2664} 615}
2665 616
2666bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw, 617bool rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
2667 enum rf_pwrstate rfpwr_state) 618 enum rf_pwrstate rfpwr_state)
2668{ 619{
2669 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 620 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
index ca4daee6e9a8..a37267e3fc22 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
@@ -57,8 +57,6 @@
57#define IQK_MAC_REG_NUM 4 57#define IQK_MAC_REG_NUM 4
58 58
59#define RF90_PATH_MAX 2 59#define RF90_PATH_MAX 2
60#define CHANNEL_MAX_NUMBER 14
61#define CHANNEL_GROUP_MAX 3
62 60
63#define CT_OFFSET_MAC_ADDR 0X16 61#define CT_OFFSET_MAC_ADDR 0X16
64 62
@@ -78,9 +76,7 @@
78#define CT_OFFSET_CUSTOMER_ID 0x7F 76#define CT_OFFSET_CUSTOMER_ID 0x7F
79 77
80#define RTL92C_MAX_PATH_NUM 2 78#define RTL92C_MAX_PATH_NUM 2
81#define CHANNEL_MAX_NUMBER 14 79#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER 255
82#define CHANNEL_GROUP_MAX 3
83
84enum swchnlcmd_id { 80enum swchnlcmd_id {
85 CMDID_END, 81 CMDID_END,
86 CMDID_SET_TXPOWEROWER_LEVEL, 82 CMDID_SET_TXPOWEROWER_LEVEL,
@@ -195,11 +191,11 @@ extern void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
195extern u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw, 191extern u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
196 enum radio_path rfpath, u32 regaddr, 192 enum radio_path rfpath, u32 regaddr,
197 u32 bitmask); 193 u32 bitmask);
198extern void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw, 194extern void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
199 enum radio_path rfpath, u32 regaddr, 195 enum radio_path rfpath, u32 regaddr,
200 u32 bitmask, u32 data); 196 u32 bitmask, u32 data);
201extern bool rtl92c_phy_mac_config(struct ieee80211_hw *hw); 197extern bool rtl92c_phy_mac_config(struct ieee80211_hw *hw);
202extern bool rtl92c_phy_bb_config(struct ieee80211_hw *hw); 198bool rtl92ce_phy_bb_config(struct ieee80211_hw *hw);
203extern bool rtl92c_phy_rf_config(struct ieee80211_hw *hw); 199extern bool rtl92c_phy_rf_config(struct ieee80211_hw *hw);
204extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw, 200extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
205 enum radio_path rfpath); 201 enum radio_path rfpath);
@@ -227,11 +223,32 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
227extern bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, 223extern bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw,
228 u32 rfpath); 224 u32 rfpath);
229bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype); 225bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
230extern bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw, 226bool rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
231 enum rf_pwrstate rfpwr_state); 227 enum rf_pwrstate rfpwr_state);
232void rtl92c_phy_config_bb_external_pa(struct ieee80211_hw *hw);
233void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw); 228void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw);
234bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype); 229bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
235void rtl92c_phy_set_io(struct ieee80211_hw *hw); 230void rtl92c_phy_set_io(struct ieee80211_hw *hw);
231void rtl92c_bb_block_on(struct ieee80211_hw *hw);
232u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
233 enum radio_path rfpath, u32 offset);
234u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
235 enum radio_path rfpath, u32 offset);
236u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
237void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
238 enum radio_path rfpath, u32 offset,
239 u32 data);
240void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
241 u32 regaddr, u32 bitmask,
242 u32 data);
243void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
244 enum radio_path rfpath, u32 offset,
245 u32 data);
246void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
247 u32 regaddr, u32 bitmask,
248 u32 data);
249bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
250void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
251bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
252void _rtl92c_phy_set_rf_sleep(struct ieee80211_hw *hw);
236 253
237#endif 254#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
index 875d51465225..b0868a613841 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
@@ -63,7 +63,15 @@
63#define REG_LEDCFG3 0x004F 63#define REG_LEDCFG3 0x004F
64#define REG_FSIMR 0x0050 64#define REG_FSIMR 0x0050
65#define REG_FSISR 0x0054 65#define REG_FSISR 0x0054
66 66#define REG_HSIMR 0x0058
67#define REG_HSISR 0x005c
68
69/* RTL8723 WIFI/BT/GPS Multi-Function GPIO Pin Control. */
70#define REG_GPIO_PIN_CTRL_2 0x0060
71/* RTL8723 WIFI/BT/GPS Multi-Function GPIO Select. */
72#define REG_GPIO_IO_SEL_2 0x0062
73/* RTL8723 WIFI/BT/GPS Multi-Function control source. */
74#define REG_MULTI_FUNC_CTRL 0x0068
67#define REG_MCUFWDL 0x0080 75#define REG_MCUFWDL 0x0080
68 76
69#define REG_HMEBOX_EXT_0 0x0088 77#define REG_HMEBOX_EXT_0 0x0088
@@ -79,6 +87,7 @@
79#define REG_PCIE_MIO_INTD 0x00E8 87#define REG_PCIE_MIO_INTD 0x00E8
80#define REG_HPON_FSM 0x00EC 88#define REG_HPON_FSM 0x00EC
81#define REG_SYS_CFG 0x00F0 89#define REG_SYS_CFG 0x00F0
90#define REG_GPIO_OUTSTS 0x00F4 /* For RTL8723 only.*/
82 91
83#define REG_CR 0x0100 92#define REG_CR 0x0100
84#define REG_PBP 0x0104 93#define REG_PBP 0x0104
@@ -209,6 +218,8 @@
209#define REG_RDG_PIFS 0x0513 218#define REG_RDG_PIFS 0x0513
210#define REG_SIFS_CTX 0x0514 219#define REG_SIFS_CTX 0x0514
211#define REG_SIFS_TRX 0x0516 220#define REG_SIFS_TRX 0x0516
221#define REG_SIFS_CCK 0x0514
222#define REG_SIFS_OFDM 0x0516
212#define REG_AGGR_BREAK_TIME 0x051A 223#define REG_AGGR_BREAK_TIME 0x051A
213#define REG_SLOT 0x051B 224#define REG_SLOT 0x051B
214#define REG_TX_PTCL_CTRL 0x0520 225#define REG_TX_PTCL_CTRL 0x0520
@@ -261,6 +272,10 @@
261#define REG_MAC_SPEC_SIFS 0x063A 272#define REG_MAC_SPEC_SIFS 0x063A
262#define REG_RESP_SIFS_CCK 0x063C 273#define REG_RESP_SIFS_CCK 0x063C
263#define REG_RESP_SIFS_OFDM 0x063E 274#define REG_RESP_SIFS_OFDM 0x063E
275/* [15:8]SIFS_R2T_OFDM, [7:0]SIFS_R2T_CCK */
276#define REG_R2T_SIFS 0x063C
277/* [15:8]SIFS_T2T_OFDM, [7:0]SIFS_T2T_CCK */
278#define REG_T2T_SIFS 0x063E
264#define REG_ACKTO 0x0640 279#define REG_ACKTO 0x0640
265#define REG_CTS2TO 0x0641 280#define REG_CTS2TO 0x0641
266#define REG_EIFS 0x0642 281#define REG_EIFS 0x0642
@@ -641,9 +656,10 @@
641#define STOPBE BIT(1) 656#define STOPBE BIT(1)
642#define STOPBK BIT(0) 657#define STOPBK BIT(0)
643 658
644#define RCR_APPFCS BIT(31) 659#define RCR_APP_FCS BIT(31)
645#define RCR_APP_MIC BIT(30) 660#define RCR_APP_MIC BIT(30)
646#define RCR_APP_ICV BIT(29) 661#define RCR_APP_ICV BIT(29)
662#define RCR_APP_PHYSTS BIT(28)
647#define RCR_APP_PHYST_RXFF BIT(28) 663#define RCR_APP_PHYST_RXFF BIT(28)
648#define RCR_APP_BA_SSN BIT(27) 664#define RCR_APP_BA_SSN BIT(27)
649#define RCR_ENMBID BIT(24) 665#define RCR_ENMBID BIT(24)
@@ -759,6 +775,7 @@
759 775
760#define BOOT_FROM_EEPROM BIT(4) 776#define BOOT_FROM_EEPROM BIT(4)
761#define EEPROM_EN BIT(5) 777#define EEPROM_EN BIT(5)
778#define EEPROMSEL BOOT_FROM_EEPROM
762 779
763#define AFE_BGEN BIT(0) 780#define AFE_BGEN BIT(0)
764#define AFE_MBEN BIT(1) 781#define AFE_MBEN BIT(1)
@@ -876,6 +893,8 @@
876#define BD_MAC2 BIT(9) 893#define BD_MAC2 BIT(9)
877#define BD_MAC1 BIT(10) 894#define BD_MAC1 BIT(10)
878#define IC_MACPHY_MODE BIT(11) 895#define IC_MACPHY_MODE BIT(11)
896#define BT_FUNC BIT(16)
897#define VENDOR_ID BIT(19)
879#define PAD_HWPD_IDN BIT(22) 898#define PAD_HWPD_IDN BIT(22)
880#define TRP_VAUX_EN BIT(23) 899#define TRP_VAUX_EN BIT(23)
881#define TRP_BT_EN BIT(24) 900#define TRP_BT_EN BIT(24)
@@ -883,6 +902,28 @@
883#define BD_HCI_SEL BIT(26) 902#define BD_HCI_SEL BIT(26)
884#define TYPE_ID BIT(27) 903#define TYPE_ID BIT(27)
885 904
905/* REG_GPIO_OUTSTS (For RTL8723 only) */
906#define EFS_HCI_SEL (BIT(0)|BIT(1))
907#define PAD_HCI_SEL (BIT(2)|BIT(3))
908#define HCI_SEL (BIT(4)|BIT(5))
909#define PKG_SEL_HCI BIT(6)
910#define FEN_GPS BIT(7)
911#define FEN_BT BIT(8)
912#define FEN_WL BIT(9)
913#define FEN_PCI BIT(10)
914#define FEN_USB BIT(11)
915#define BTRF_HWPDN_N BIT(12)
916#define WLRF_HWPDN_N BIT(13)
917#define PDN_BT_N BIT(14)
918#define PDN_GPS_N BIT(15)
919#define BT_CTL_HWPDN BIT(16)
920#define GPS_CTL_HWPDN BIT(17)
921#define PPHY_SUSB BIT(20)
922#define UPHY_SUSB BIT(21)
923#define PCI_SUSEN BIT(22)
924#define USB_SUSEN BIT(23)
925#define RF_RL_ID (BIT(31) | BIT(30) | BIT(29) | BIT(28))
926
886#define CHIP_VER_RTL_MASK 0xF000 927#define CHIP_VER_RTL_MASK 0xF000
887#define CHIP_VER_RTL_SHIFT 12 928#define CHIP_VER_RTL_SHIFT 12
888 929
@@ -1035,7 +1076,7 @@
1035#define _RARF_RC7(x) (((x) & 0x1F) << 16) 1076#define _RARF_RC7(x) (((x) & 0x1F) << 16)
1036#define _RARF_RC8(x) (((x) & 0x1F) << 24) 1077#define _RARF_RC8(x) (((x) & 0x1F) << 24)
1037 1078
1038#define AC_PARAM_TXOP_LIMIT_OFFSET 16 1079#define AC_PARAM_TXOP_OFFSET 16
1039#define AC_PARAM_ECW_MAX_OFFSET 12 1080#define AC_PARAM_ECW_MAX_OFFSET 12
1040#define AC_PARAM_ECW_MIN_OFFSET 8 1081#define AC_PARAM_ECW_MIN_OFFSET 8
1041#define AC_PARAM_AIFS_OFFSET 0 1082#define AC_PARAM_AIFS_OFFSET 0
@@ -1184,6 +1225,30 @@
1184 1225
1185#define HAL_8192C_HW_GPIO_WPS_BIT BIT(2) 1226#define HAL_8192C_HW_GPIO_WPS_BIT BIT(2)
1186 1227
1228/* REG_MULTI_FUNC_CTRL(For RTL8723 Only) */
1229/* Enable GPIO[9] as WiFi HW PDn source */
1230#define WL_HWPDN_EN BIT(0)
1231/* WiFi HW PDn polarity control */
1232#define WL_HWPDN_SL BIT(1)
1233/* WiFi function enable */
1234#define WL_FUNC_EN BIT(2)
1235/* Enable GPIO[9] as WiFi RF HW PDn source */
1236#define WL_HWROF_EN BIT(3)
1237/* Enable GPIO[11] as BT HW PDn source */
1238#define BT_HWPDN_EN BIT(16)
1239/* BT HW PDn polarity control */
1240#define BT_HWPDN_SL BIT(17)
1241/* BT function enable */
1242#define BT_FUNC_EN BIT(18)
1243/* Enable GPIO[11] as BT/GPS RF HW PDn source */
1244#define BT_HWROF_EN BIT(19)
1245/* Enable GPIO[10] as GPS HW PDn source */
1246#define GPS_HWPDN_EN BIT(20)
1247/* GPS HW PDn polarity control */
1248#define GPS_HWPDN_SL BIT(21)
1249/* GPS function enable */
1250#define GPS_FUNC_EN BIT(22)
1251
1187#define RPMAC_RESET 0x100 1252#define RPMAC_RESET 0x100
1188#define RPMAC_TXSTART 0x104 1253#define RPMAC_TXSTART 0x104
1189#define RPMAC_TXLEGACYSIG 0x108 1254#define RPMAC_TXLEGACYSIG 0x108
@@ -1496,7 +1561,7 @@
1496#define BTXHTSTBC 0x30 1561#define BTXHTSTBC 0x30
1497#define BTXHTADVANCECODING 0x40 1562#define BTXHTADVANCECODING 0x40
1498#define BTXHTSHORTGI 0x80 1563#define BTXHTSHORTGI 0x80
1499#define BTXHTNUMBERHT_LT F 0x300 1564#define BTXHTNUMBERHT_LTF 0x300
1500#define BTXHTCRC8 0x3fc00 1565#define BTXHTCRC8 0x3fc00
1501#define BCOUNTERRESET 0x10000 1566#define BCOUNTERRESET 0x10000
1502#define BNUMOFOFDMTX 0xffff 1567#define BNUMOFOFDMTX 0xffff
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
index ffd8e04c4028..669b1168dbec 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
@@ -61,7 +61,7 @@ void rtl92c_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
61 } 61 }
62} 62}
63 63
64void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, 64void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
65 u8 *ppowerlevel) 65 u8 *ppowerlevel)
66{ 66{
67 struct rtl_priv *rtlpriv = rtl_priv(hw); 67 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -410,7 +410,7 @@ static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
410 } 410 }
411} 411}
412 412
413void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, 413void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
414 u8 *ppowerlevel, u8 channel) 414 u8 *ppowerlevel, u8 channel)
415{ 415{
416 u32 writeVal[2], powerBase0[2], powerBase1[2]; 416 u32 writeVal[2], powerBase0[2], powerBase1[2];
@@ -430,7 +430,7 @@ void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
430 } 430 }
431} 431}
432 432
433bool rtl92c_phy_rf6052_config(struct ieee80211_hw *hw) 433bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw)
434{ 434{
435 struct rtl_priv *rtlpriv = rtl_priv(hw); 435 struct rtl_priv *rtlpriv = rtl_priv(hw);
436 struct rtl_phy *rtlphy = &(rtlpriv->phy); 436 struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -484,11 +484,11 @@ static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
484 484
485 switch (rfpath) { 485 switch (rfpath) {
486 case RF90_PATH_A: 486 case RF90_PATH_A:
487 rtstatus = rtl92c_phy_config_rf_with_headerfile(hw, 487 rtstatus = rtl92ce_phy_config_rf_with_headerfile(hw,
488 (enum radio_path) rfpath); 488 (enum radio_path) rfpath);
489 break; 489 break;
490 case RF90_PATH_B: 490 case RF90_PATH_B:
491 rtstatus = rtl92c_phy_config_rf_with_headerfile(hw, 491 rtstatus = rtl92ce_phy_config_rf_with_headerfile(hw,
492 (enum radio_path) rfpath); 492 (enum radio_path) rfpath);
493 break; 493 break;
494 case RF90_PATH_C: 494 case RF90_PATH_C:
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
index d3014f99bb7b..3aa520c1c171 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
@@ -40,5 +40,8 @@ extern void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
40 u8 *ppowerlevel); 40 u8 *ppowerlevel);
41extern void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, 41extern void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
42 u8 *ppowerlevel, u8 channel); 42 u8 *ppowerlevel, u8 channel);
43extern bool rtl92c_phy_rf6052_config(struct ieee80211_hw *hw); 43bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw);
44bool rtl92ce_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
45 enum radio_path rfpath);
46
44#endif 47#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index b366e8862929..b1cc4d44f534 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -37,6 +37,7 @@
37#include "phy.h" 37#include "phy.h"
38#include "dm.h" 38#include "dm.h"
39#include "hw.h" 39#include "hw.h"
40#include "rf.h"
40#include "sw.h" 41#include "sw.h"
41#include "trx.h" 42#include "trx.h"
42#include "led.h" 43#include "led.h"
@@ -46,13 +47,13 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
46 struct rtl_priv *rtlpriv = rtl_priv(hw); 47 struct rtl_priv *rtlpriv = rtl_priv(hw);
47 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 48 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
48 49
49 rtlpriv->dm.b_dm_initialgain_enable = 1; 50 rtlpriv->dm.dm_initialgain_enable = 1;
50 rtlpriv->dm.dm_flag = 0; 51 rtlpriv->dm.dm_flag = 0;
51 rtlpriv->dm.b_disable_framebursting = 0;; 52 rtlpriv->dm.disable_framebursting = 0;
52 rtlpriv->dm.thermalvalue = 0; 53 rtlpriv->dm.thermalvalue = 0;
53 rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13); 54 rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13);
54 55
55 rtlpci->receive_config = (RCR_APPFCS | 56 rtlpci->receive_config = (RCR_APP_FCS |
56 RCR_AMF | 57 RCR_AMF |
57 RCR_ADF | 58 RCR_ADF |
58 RCR_APP_MIC | 59 RCR_APP_MIC |
@@ -122,7 +123,7 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
122 .switch_channel = rtl92c_phy_sw_chnl, 123 .switch_channel = rtl92c_phy_sw_chnl,
123 .dm_watchdog = rtl92c_dm_watchdog, 124 .dm_watchdog = rtl92c_dm_watchdog,
124 .scan_operation_backup = rtl92c_phy_scan_operation_backup, 125 .scan_operation_backup = rtl92c_phy_scan_operation_backup,
125 .set_rf_power_state = rtl92c_phy_set_rf_power_state, 126 .set_rf_power_state = rtl92ce_phy_set_rf_power_state,
126 .led_control = rtl92ce_led_control, 127 .led_control = rtl92ce_led_control,
127 .set_desc = rtl92ce_set_desc, 128 .set_desc = rtl92ce_set_desc,
128 .get_desc = rtl92ce_get_desc, 129 .get_desc = rtl92ce_get_desc,
@@ -133,8 +134,17 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
133 .deinit_sw_leds = rtl92ce_deinit_sw_leds, 134 .deinit_sw_leds = rtl92ce_deinit_sw_leds,
134 .get_bbreg = rtl92c_phy_query_bb_reg, 135 .get_bbreg = rtl92c_phy_query_bb_reg,
135 .set_bbreg = rtl92c_phy_set_bb_reg, 136 .set_bbreg = rtl92c_phy_set_bb_reg,
136 .get_rfreg = rtl92c_phy_query_rf_reg, 137 .get_rfreg = rtl92ce_phy_query_rf_reg,
137 .set_rfreg = rtl92c_phy_set_rf_reg, 138 .set_rfreg = rtl92ce_phy_set_rf_reg,
139 .cmd_send_packet = _rtl92c_cmd_send_packet,
140 .phy_rf6052_config = rtl92ce_phy_rf6052_config,
141 .phy_rf6052_set_cck_txpower = rtl92ce_phy_rf6052_set_cck_txpower,
142 .phy_rf6052_set_ofdm_txpower = rtl92ce_phy_rf6052_set_ofdm_txpower,
143 .config_bb_with_headerfile = _rtl92ce_phy_config_bb_with_headerfile,
144 .config_bb_with_pgheaderfile = _rtl92ce_phy_config_bb_with_pgheaderfile,
145 .phy_lc_calibrate = _rtl92ce_phy_lc_calibrate,
146 .phy_set_bw_mode_callback = rtl92ce_phy_set_bw_mode_callback,
147 .dm_dynamic_txpower = rtl92ce_dm_dynamic_txpower,
138}; 148};
139 149
140static struct rtl_mod_params rtl92ce_mod_params = { 150static struct rtl_mod_params rtl92ce_mod_params = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h
index de1198c38d4e..36e657668c1e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h
@@ -33,5 +33,19 @@
33int rtl92c_init_sw_vars(struct ieee80211_hw *hw); 33int rtl92c_init_sw_vars(struct ieee80211_hw *hw);
34void rtl92c_deinit_sw_vars(struct ieee80211_hw *hw); 34void rtl92c_deinit_sw_vars(struct ieee80211_hw *hw);
35void rtl92c_init_var_map(struct ieee80211_hw *hw); 35void rtl92c_init_var_map(struct ieee80211_hw *hw);
36bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw,
37 struct sk_buff *skb);
38void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
39 u8 *ppowerlevel);
40void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
41 u8 *ppowerlevel, u8 channel);
42bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
43 u8 configtype);
44bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
45 u8 configtype);
46void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
47u32 rtl92ce_phy_query_rf_reg(struct ieee80211_hw *hw,
48 enum radio_path rfpath, u32 regaddr, u32 bitmask);
49void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
36 50
37#endif 51#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index bf5852f2d634..aa2b5815600f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -36,7 +36,7 @@
36#include "trx.h" 36#include "trx.h"
37#include "led.h" 37#include "led.h"
38 38
39static enum rtl_desc_qsel _rtl92ce_map_hwqueue_to_fwqueue(u16 fc, 39static enum rtl_desc_qsel _rtl92ce_map_hwqueue_to_fwqueue(__le16 fc,
40 unsigned int 40 unsigned int
41 skb_queue) 41 skb_queue)
42{ 42{
@@ -245,24 +245,24 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
245 struct rtl_stats *pstats, 245 struct rtl_stats *pstats,
246 struct rx_desc_92c *pdesc, 246 struct rx_desc_92c *pdesc,
247 struct rx_fwinfo_92c *p_drvinfo, 247 struct rx_fwinfo_92c *p_drvinfo,
248 bool bpacket_match_bssid, 248 bool packet_match_bssid,
249 bool bpacket_toself, 249 bool packet_toself,
250 bool b_packet_beacon) 250 bool packet_beacon)
251{ 251{
252 struct rtl_priv *rtlpriv = rtl_priv(hw); 252 struct rtl_priv *rtlpriv = rtl_priv(hw);
253 struct phy_sts_cck_8192s_t *cck_buf; 253 struct phy_sts_cck_8192s_t *cck_buf;
254 s8 rx_pwr_all, rx_pwr[4]; 254 s8 rx_pwr_all, rx_pwr[4];
255 u8 rf_rx_num, evm, pwdb_all; 255 u8 evm, pwdb_all, rf_rx_num = 0;
256 u8 i, max_spatial_stream; 256 u8 i, max_spatial_stream;
257 u32 rssi, total_rssi; 257 u32 rssi, total_rssi = 0;
258 bool is_cck_rate; 258 bool is_cck_rate;
259 259
260 is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc); 260 is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc);
261 pstats->b_packet_matchbssid = bpacket_match_bssid; 261 pstats->packet_matchbssid = packet_match_bssid;
262 pstats->b_packet_toself = bpacket_toself; 262 pstats->packet_toself = packet_toself;
263 pstats->b_is_cck = is_cck_rate; 263 pstats->is_cck = is_cck_rate;
264 pstats->b_packet_beacon = b_packet_beacon; 264 pstats->packet_beacon = packet_beacon;
265 pstats->b_is_cck = is_cck_rate; 265 pstats->is_cck = is_cck_rate;
266 pstats->rx_mimo_signalquality[0] = -1; 266 pstats->rx_mimo_signalquality[0] = -1;
267 pstats->rx_mimo_signalquality[1] = -1; 267 pstats->rx_mimo_signalquality[1] = -1;
268 268
@@ -315,7 +315,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
315 pstats->rx_pwdb_all = pwdb_all; 315 pstats->rx_pwdb_all = pwdb_all;
316 pstats->recvsignalpower = rx_pwr_all; 316 pstats->recvsignalpower = rx_pwr_all;
317 317
318 if (bpacket_match_bssid) { 318 if (packet_match_bssid) {
319 u8 sq; 319 u8 sq;
320 if (pstats->rx_pwdb_all > 40) 320 if (pstats->rx_pwdb_all > 40)
321 sq = 100; 321 sq = 100;
@@ -334,10 +334,10 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
334 pstats->rx_mimo_signalquality[1] = -1; 334 pstats->rx_mimo_signalquality[1] = -1;
335 } 335 }
336 } else { 336 } else {
337 rtlpriv->dm.brfpath_rxenable[0] = 337 rtlpriv->dm.rfpath_rxenable[0] =
338 rtlpriv->dm.brfpath_rxenable[1] = true; 338 rtlpriv->dm.rfpath_rxenable[1] = true;
339 for (i = RF90_PATH_A; i < RF90_PATH_MAX; i++) { 339 for (i = RF90_PATH_A; i < RF90_PATH_MAX; i++) {
340 if (rtlpriv->dm.brfpath_rxenable[i]) 340 if (rtlpriv->dm.rfpath_rxenable[i])
341 rf_rx_num++; 341 rf_rx_num++;
342 342
343 rx_pwr[i] = 343 rx_pwr[i] =
@@ -347,7 +347,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
347 rtlpriv->stats.rx_snr_db[i] = 347 rtlpriv->stats.rx_snr_db[i] =
348 (long)(p_drvinfo->rxsnr[i] / 2); 348 (long)(p_drvinfo->rxsnr[i] / 2);
349 349
350 if (bpacket_match_bssid) 350 if (packet_match_bssid)
351 pstats->rx_mimo_signalstrength[i] = (u8) rssi; 351 pstats->rx_mimo_signalstrength[i] = (u8) rssi;
352 } 352 }
353 353
@@ -366,7 +366,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
366 for (i = 0; i < max_spatial_stream; i++) { 366 for (i = 0; i < max_spatial_stream; i++) {
367 evm = _rtl92c_evm_db_to_percentage(p_drvinfo->rxevm[i]); 367 evm = _rtl92c_evm_db_to_percentage(p_drvinfo->rxevm[i]);
368 368
369 if (bpacket_match_bssid) { 369 if (packet_match_bssid) {
370 if (i == 0) 370 if (i == 0)
371 pstats->signalquality = 371 pstats->signalquality =
372 (u8) (evm & 0xff); 372 (u8) (evm & 0xff);
@@ -393,7 +393,7 @@ static void _rtl92ce_process_ui_rssi(struct ieee80211_hw *hw,
393 u8 rfpath; 393 u8 rfpath;
394 u32 last_rssi, tmpval; 394 u32 last_rssi, tmpval;
395 395
396 if (pstats->b_packet_toself || pstats->b_packet_beacon) { 396 if (pstats->packet_toself || pstats->packet_beacon) {
397 rtlpriv->stats.rssi_calculate_cnt++; 397 rtlpriv->stats.rssi_calculate_cnt++;
398 398
399 if (rtlpriv->stats.ui_rssi.total_num++ >= 399 if (rtlpriv->stats.ui_rssi.total_num++ >=
@@ -421,7 +421,7 @@ static void _rtl92ce_process_ui_rssi(struct ieee80211_hw *hw,
421 pstats->rssi = rtlpriv->stats.signal_strength; 421 pstats->rssi = rtlpriv->stats.signal_strength;
422 } 422 }
423 423
424 if (!pstats->b_is_cck && pstats->b_packet_toself) { 424 if (!pstats->is_cck && pstats->packet_toself) {
425 for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath; 425 for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
426 rfpath++) { 426 rfpath++) {
427 427
@@ -463,7 +463,7 @@ static void _rtl92ce_update_rxsignalstatistics(struct ieee80211_hw *hw,
463 struct rtl_stats *pstats) 463 struct rtl_stats *pstats)
464{ 464{
465 struct rtl_priv *rtlpriv = rtl_priv(hw); 465 struct rtl_priv *rtlpriv = rtl_priv(hw);
466 int weighting; 466 int weighting = 0;
467 467
468 if (rtlpriv->stats.recv_signal_power == 0) 468 if (rtlpriv->stats.recv_signal_power == 0)
469 rtlpriv->stats.recv_signal_power = pstats->recvsignalpower; 469 rtlpriv->stats.recv_signal_power = pstats->recvsignalpower;
@@ -493,7 +493,7 @@ static void _rtl92ce_process_pwdb(struct ieee80211_hw *hw,
493 rtlpriv->dm.undecorated_smoothed_pwdb; 493 rtlpriv->dm.undecorated_smoothed_pwdb;
494 } 494 }
495 495
496 if (pstats->b_packet_toself || pstats->b_packet_beacon) { 496 if (pstats->packet_toself || pstats->packet_beacon) {
497 if (undecorated_smoothed_pwdb < 0) 497 if (undecorated_smoothed_pwdb < 0)
498 undecorated_smoothed_pwdb = pstats->rx_pwdb_all; 498 undecorated_smoothed_pwdb = pstats->rx_pwdb_all;
499 499
@@ -525,7 +525,7 @@ static void _rtl92ce_process_ui_link_quality(struct ieee80211_hw *hw,
525 u32 last_evm, n_spatialstream, tmpval; 525 u32 last_evm, n_spatialstream, tmpval;
526 526
527 if (pstats->signalquality != 0) { 527 if (pstats->signalquality != 0) {
528 if (pstats->b_packet_toself || pstats->b_packet_beacon) { 528 if (pstats->packet_toself || pstats->packet_beacon) {
529 529
530 if (rtlpriv->stats.ui_link_quality.total_num++ >= 530 if (rtlpriv->stats.ui_link_quality.total_num++ >=
531 PHY_LINKQUALITY_SLID_WIN_MAX) { 531 PHY_LINKQUALITY_SLID_WIN_MAX) {
@@ -595,8 +595,8 @@ static void _rtl92ce_process_phyinfo(struct ieee80211_hw *hw,
595 struct rtl_stats *pcurrent_stats) 595 struct rtl_stats *pcurrent_stats)
596{ 596{
597 597
598 if (!pcurrent_stats->b_packet_matchbssid && 598 if (!pcurrent_stats->packet_matchbssid &&
599 !pcurrent_stats->b_packet_beacon) 599 !pcurrent_stats->packet_beacon)
600 return; 600 return;
601 601
602 _rtl92ce_process_ui_rssi(hw, pcurrent_stats); 602 _rtl92ce_process_ui_rssi(hw, pcurrent_stats);
@@ -617,34 +617,36 @@ static void _rtl92ce_translate_rx_signal_stuff(struct ieee80211_hw *hw,
617 u8 *tmp_buf; 617 u8 *tmp_buf;
618 u8 *praddr; 618 u8 *praddr;
619 u8 *psaddr; 619 u8 *psaddr;
620 u16 fc, type; 620 __le16 fc;
621 bool b_packet_matchbssid, b_packet_toself, b_packet_beacon; 621 u16 type, c_fc;
622 bool packet_matchbssid, packet_toself, packet_beacon;
622 623
623 tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift; 624 tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
624 625
625 hdr = (struct ieee80211_hdr *)tmp_buf; 626 hdr = (struct ieee80211_hdr *)tmp_buf;
626 fc = le16_to_cpu(hdr->frame_control); 627 fc = hdr->frame_control;
628 c_fc = le16_to_cpu(fc);
627 type = WLAN_FC_GET_TYPE(fc); 629 type = WLAN_FC_GET_TYPE(fc);
628 praddr = hdr->addr1; 630 praddr = hdr->addr1;
629 psaddr = hdr->addr2; 631 psaddr = hdr->addr2;
630 632
631 b_packet_matchbssid = 633 packet_matchbssid =
632 ((IEEE80211_FTYPE_CTL != type) && 634 ((IEEE80211_FTYPE_CTL != type) &&
633 (!compare_ether_addr(mac->bssid, 635 (!compare_ether_addr(mac->bssid,
634 (fc & IEEE80211_FCTL_TODS) ? 636 (c_fc & IEEE80211_FCTL_TODS) ?
635 hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS) ? 637 hdr->addr1 : (c_fc & IEEE80211_FCTL_FROMDS) ?
636 hdr->addr2 : hdr->addr3)) && 638 hdr->addr2 : hdr->addr3)) &&
637 (!pstats->b_hwerror) && (!pstats->b_crc) && (!pstats->b_icv)); 639 (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv));
638 640
639 b_packet_toself = b_packet_matchbssid && 641 packet_toself = packet_matchbssid &&
640 (!compare_ether_addr(praddr, rtlefuse->dev_addr)); 642 (!compare_ether_addr(praddr, rtlefuse->dev_addr));
641 643
642 if (ieee80211_is_beacon(fc)) 644 if (ieee80211_is_beacon(fc))
643 b_packet_beacon = true; 645 packet_beacon = true;
644 646
645 _rtl92ce_query_rxphystatus(hw, pstats, pdesc, p_drvinfo, 647 _rtl92ce_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
646 b_packet_matchbssid, b_packet_toself, 648 packet_matchbssid, packet_toself,
647 b_packet_beacon); 649 packet_beacon);
648 650
649 _rtl92ce_process_phyinfo(hw, tmp_buf, pstats); 651 _rtl92ce_process_phyinfo(hw, tmp_buf, pstats);
650} 652}
@@ -662,14 +664,14 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
662 stats->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) * 664 stats->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
663 RX_DRV_INFO_SIZE_UNIT; 665 RX_DRV_INFO_SIZE_UNIT;
664 stats->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03); 666 stats->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03);
665 stats->b_icv = (u16) GET_RX_DESC_ICV(pdesc); 667 stats->icv = (u16) GET_RX_DESC_ICV(pdesc);
666 stats->b_crc = (u16) GET_RX_DESC_CRC32(pdesc); 668 stats->crc = (u16) GET_RX_DESC_CRC32(pdesc);
667 stats->b_hwerror = (stats->b_crc | stats->b_icv); 669 stats->hwerror = (stats->crc | stats->icv);
668 stats->decrypted = !GET_RX_DESC_SWDEC(pdesc); 670 stats->decrypted = !GET_RX_DESC_SWDEC(pdesc);
669 stats->rate = (u8) GET_RX_DESC_RXMCS(pdesc); 671 stats->rate = (u8) GET_RX_DESC_RXMCS(pdesc);
670 stats->b_shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc); 672 stats->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
671 stats->b_isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1); 673 stats->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
672 stats->b_isampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1) 674 stats->isampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1)
673 && (GET_RX_DESC_FAGGR(pdesc) == 1)); 675 && (GET_RX_DESC_FAGGR(pdesc) == 1));
674 stats->timestamp_low = GET_RX_DESC_TSFL(pdesc); 676 stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
675 stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc); 677 stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
@@ -689,7 +691,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
689 if (GET_RX_DESC_RXHT(pdesc)) 691 if (GET_RX_DESC_RXHT(pdesc))
690 rx_status->flag |= RX_FLAG_HT; 692 rx_status->flag |= RX_FLAG_HT;
691 693
692 rx_status->flag |= RX_FLAG_TSFT; 694 rx_status->flag |= RX_FLAG_MACTIME_MPDU;
693 695
694 if (stats->decrypted) 696 if (stats->decrypted)
695 rx_status->flag |= RX_FLAG_DECRYPTED; 697 rx_status->flag |= RX_FLAG_DECRYPTED;
@@ -727,27 +729,24 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
727 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 729 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
728 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 730 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
729 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 731 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
730 bool b_defaultadapter = true; 732 bool defaultadapter = true;
731 733 struct ieee80211_sta *sta;
732 struct ieee80211_sta *sta = ieee80211_find_sta(mac->vif, mac->bssid);
733
734 u8 *pdesc = (u8 *) pdesc_tx; 734 u8 *pdesc = (u8 *) pdesc_tx;
735 struct rtl_tcb_desc tcb_desc; 735 struct rtl_tcb_desc tcb_desc;
736 u8 *qc = ieee80211_get_qos_ctl(hdr); 736 u8 *qc = ieee80211_get_qos_ctl(hdr);
737 u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; 737 u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
738 u16 seq_number; 738 u16 seq_number;
739 u16 fc = le16_to_cpu(hdr->frame_control); 739 __le16 fc = hdr->frame_control;
740 u8 rate_flag = info->control.rates[0].flags; 740 u8 rate_flag = info->control.rates[0].flags;
741 741
742 enum rtl_desc_qsel fw_qsel = 742 enum rtl_desc_qsel fw_qsel =
743 _rtl92ce_map_hwqueue_to_fwqueue(le16_to_cpu(hdr->frame_control), 743 _rtl92ce_map_hwqueue_to_fwqueue(fc, queue_index);
744 queue_index);
745 744
746 bool b_firstseg = ((hdr->seq_ctrl & 745 bool firstseg = ((hdr->seq_ctrl &
747 cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0); 746 cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0);
748 747
749 bool b_lastseg = ((hdr->frame_control & 748 bool lastseg = ((hdr->frame_control &
750 cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0); 749 cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0);
751 750
752 dma_addr_t mapping = pci_map_single(rtlpci->pdev, 751 dma_addr_t mapping = pci_map_single(rtlpci->pdev,
753 skb->data, skb->len, 752 skb->data, skb->len,
@@ -759,7 +758,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
759 758
760 CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_92c)); 759 CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_92c));
761 760
762 if (b_firstseg) { 761 if (firstseg) {
763 SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); 762 SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
764 763
765 SET_TX_DESC_TX_RATE(pdesc, tcb_desc.hw_rate); 764 SET_TX_DESC_TX_RATE(pdesc, tcb_desc.hw_rate);
@@ -774,25 +773,25 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
774 } 773 }
775 SET_TX_DESC_SEQ(pdesc, seq_number); 774 SET_TX_DESC_SEQ(pdesc, seq_number);
776 775
777 SET_TX_DESC_RTS_ENABLE(pdesc, ((tcb_desc.b_rts_enable && 776 SET_TX_DESC_RTS_ENABLE(pdesc, ((tcb_desc.rts_enable &&
778 !tcb_desc. 777 !tcb_desc.
779 b_cts_enable) ? 1 : 0)); 778 cts_enable) ? 1 : 0));
780 SET_TX_DESC_HW_RTS_ENABLE(pdesc, 779 SET_TX_DESC_HW_RTS_ENABLE(pdesc,
781 ((tcb_desc.b_rts_enable 780 ((tcb_desc.rts_enable
782 || tcb_desc.b_cts_enable) ? 1 : 0)); 781 || tcb_desc.cts_enable) ? 1 : 0));
783 SET_TX_DESC_CTS2SELF(pdesc, ((tcb_desc.b_cts_enable) ? 1 : 0)); 782 SET_TX_DESC_CTS2SELF(pdesc, ((tcb_desc.cts_enable) ? 1 : 0));
784 SET_TX_DESC_RTS_STBC(pdesc, ((tcb_desc.b_rts_stbc) ? 1 : 0)); 783 SET_TX_DESC_RTS_STBC(pdesc, ((tcb_desc.rts_stbc) ? 1 : 0));
785 784
786 SET_TX_DESC_RTS_RATE(pdesc, tcb_desc.rts_rate); 785 SET_TX_DESC_RTS_RATE(pdesc, tcb_desc.rts_rate);
787 SET_TX_DESC_RTS_BW(pdesc, 0); 786 SET_TX_DESC_RTS_BW(pdesc, 0);
788 SET_TX_DESC_RTS_SC(pdesc, tcb_desc.rts_sc); 787 SET_TX_DESC_RTS_SC(pdesc, tcb_desc.rts_sc);
789 SET_TX_DESC_RTS_SHORT(pdesc, 788 SET_TX_DESC_RTS_SHORT(pdesc,
790 ((tcb_desc.rts_rate <= DESC92C_RATE54M) ? 789 ((tcb_desc.rts_rate <= DESC92C_RATE54M) ?
791 (tcb_desc.b_rts_use_shortpreamble ? 1 : 0) 790 (tcb_desc.rts_use_shortpreamble ? 1 : 0)
792 : (tcb_desc.b_rts_use_shortgi ? 1 : 0))); 791 : (tcb_desc.rts_use_shortgi ? 1 : 0)));
793 792
794 if (mac->bw_40) { 793 if (mac->bw_40) {
795 if (tcb_desc.b_packet_bw) { 794 if (tcb_desc.packet_bw) {
796 SET_TX_DESC_DATA_BW(pdesc, 1); 795 SET_TX_DESC_DATA_BW(pdesc, 1);
797 SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3); 796 SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3);
798 } else { 797 } else {
@@ -811,10 +810,13 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
811 SET_TX_DESC_LINIP(pdesc, 0); 810 SET_TX_DESC_LINIP(pdesc, 0);
812 SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb->len); 811 SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb->len);
813 812
813 rcu_read_lock();
814 sta = ieee80211_find_sta(mac->vif, mac->bssid);
814 if (sta) { 815 if (sta) {
815 u8 ampdu_density = sta->ht_cap.ampdu_density; 816 u8 ampdu_density = sta->ht_cap.ampdu_density;
816 SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density); 817 SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density);
817 } 818 }
819 rcu_read_unlock();
818 820
819 if (info->control.hw_key) { 821 if (info->control.hw_key) {
820 struct ieee80211_key_conf *keyconf = 822 struct ieee80211_key_conf *keyconf =
@@ -854,14 +856,14 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
854 } 856 }
855 } 857 }
856 858
857 SET_TX_DESC_FIRST_SEG(pdesc, (b_firstseg ? 1 : 0)); 859 SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0));
858 SET_TX_DESC_LAST_SEG(pdesc, (b_lastseg ? 1 : 0)); 860 SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0));
859 861
860 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len); 862 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len);
861 863
862 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping)); 864 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
863 865
864 if (rtlpriv->dm.b_useramask) { 866 if (rtlpriv->dm.useramask) {
865 SET_TX_DESC_RATE_ID(pdesc, tcb_desc.ratr_index); 867 SET_TX_DESC_RATE_ID(pdesc, tcb_desc.ratr_index);
866 SET_TX_DESC_MACID(pdesc, tcb_desc.mac_id); 868 SET_TX_DESC_MACID(pdesc, tcb_desc.mac_id);
867 } else { 869 } else {
@@ -869,16 +871,16 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
869 SET_TX_DESC_MACID(pdesc, tcb_desc.ratr_index); 871 SET_TX_DESC_MACID(pdesc, tcb_desc.ratr_index);
870 } 872 }
871 873
872 if ((!ieee80211_is_data_qos(fc)) && ppsc->b_leisure_ps && 874 if ((!ieee80211_is_data_qos(fc)) && ppsc->leisure_ps &&
873 ppsc->b_fwctrl_lps) { 875 ppsc->fwctrl_lps) {
874 SET_TX_DESC_HWSEQ_EN(pdesc, 1); 876 SET_TX_DESC_HWSEQ_EN(pdesc, 1);
875 SET_TX_DESC_PKT_ID(pdesc, 8); 877 SET_TX_DESC_PKT_ID(pdesc, 8);
876 878
877 if (!b_defaultadapter) 879 if (!defaultadapter)
878 SET_TX_DESC_QOS(pdesc, 1); 880 SET_TX_DESC_QOS(pdesc, 1);
879 } 881 }
880 882
881 SET_TX_DESC_MORE_FRAG(pdesc, (b_lastseg ? 0 : 1)); 883 SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1));
882 884
883 if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) || 885 if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
884 is_broadcast_ether_addr(ieee80211_get_DA(hdr))) { 886 is_broadcast_ether_addr(ieee80211_get_DA(hdr))) {
@@ -889,8 +891,8 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
889} 891}
890 892
891void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, 893void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
892 u8 *pdesc, bool b_firstseg, 894 u8 *pdesc, bool firstseg,
893 bool b_lastseg, struct sk_buff *skb) 895 bool lastseg, struct sk_buff *skb)
894{ 896{
895 struct rtl_priv *rtlpriv = rtl_priv(hw); 897 struct rtl_priv *rtlpriv = rtl_priv(hw);
896 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 898 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -901,11 +903,11 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
901 PCI_DMA_TODEVICE); 903 PCI_DMA_TODEVICE);
902 904
903 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); 905 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
904 u16 fc = le16_to_cpu(hdr->frame_control); 906 __le16 fc = hdr->frame_control;
905 907
906 CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE); 908 CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
907 909
908 if (b_firstseg) 910 if (firstseg)
909 SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); 911 SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
910 912
911 SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M); 913 SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M);
@@ -1029,3 +1031,36 @@ void rtl92ce_tx_polling(struct ieee80211_hw *hw, unsigned int hw_queue)
1029 BIT(0) << (hw_queue)); 1031 BIT(0) << (hw_queue));
1030 } 1032 }
1031} 1033}
1034
1035bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw,
1036 struct sk_buff *skb)
1037{
1038 struct rtl_priv *rtlpriv = rtl_priv(hw);
1039 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1040 struct rtl8192_tx_ring *ring;
1041 struct rtl_tx_desc *pdesc;
1042 u8 own;
1043 unsigned long flags;
1044 struct sk_buff *pskb = NULL;
1045
1046 ring = &rtlpci->tx_ring[BEACON_QUEUE];
1047
1048 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
1049
1050 pskb = __skb_dequeue(&ring->queue);
1051 if (pskb)
1052 kfree_skb(pskb);
1053
1054 pdesc = &ring->desc[0];
1055 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN);
1056
1057 rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb);
1058
1059 __skb_queue_tail(&ring->queue, skb);
1060
1061 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1062
1063 rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);
1064
1065 return true;
1066}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
index 53d0e0a5af5c..803adcc80c96 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
@@ -40,470 +40,494 @@
40#define USB_HWDESC_HEADER_LEN 32 40#define USB_HWDESC_HEADER_LEN 32
41#define CRCLENGTH 4 41#define CRCLENGTH 4
42 42
43/* Define a macro that takes a le32 word, converts it to host ordering,
44 * right shifts by a specified count, creates a mask of the specified
45 * bit count, and extracts that number of bits.
46 */
47
48#define SHIFT_AND_MASK_LE(__pdesc, __shift, __mask) \
49 ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \
50 BIT_LEN_MASK_32(__mask))
51
52/* Define a macro that clears a bit field in an le32 word and
53 * sets the specified value into that bit field. The resulting
54 * value remains in le32 ordering; however, it is properly converted
55 * to host ordering for the clear and set operations before conversion
56 * back to le32.
57 */
58
59#define SET_BITS_OFFSET_LE(__pdesc, __shift, __len, __val) \
60 (*(__le32 *)(__pdesc) = \
61 (cpu_to_le32((le32_to_cpu(*((__le32 *)(__pdesc))) & \
62 (~(BIT_OFFSET_LEN_MASK_32((__shift), __len)))) | \
63 (((u32)(__val) & BIT_LEN_MASK_32(__len)) << (__shift)))));
64
65/* macros to read/write various fields in RX or TX descriptors */
66
43#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \ 67#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \
44 SET_BITS_TO_LE_4BYTE(__pdesc, 0, 16, __val) 68 SET_BITS_OFFSET_LE(__pdesc, 0, 16, __val)
45#define SET_TX_DESC_OFFSET(__pdesc, __val) \ 69#define SET_TX_DESC_OFFSET(__pdesc, __val) \
46 SET_BITS_TO_LE_4BYTE(__pdesc, 16, 8, __val) 70 SET_BITS_OFFSET_LE(__pdesc, 16, 8, __val)
47#define SET_TX_DESC_BMC(__pdesc, __val) \ 71#define SET_TX_DESC_BMC(__pdesc, __val) \
48 SET_BITS_TO_LE_4BYTE(__pdesc, 24, 1, __val) 72 SET_BITS_OFFSET_LE(__pdesc, 24, 1, __val)
49#define SET_TX_DESC_HTC(__pdesc, __val) \ 73#define SET_TX_DESC_HTC(__pdesc, __val) \
50 SET_BITS_TO_LE_4BYTE(__pdesc, 25, 1, __val) 74 SET_BITS_OFFSET_LE(__pdesc, 25, 1, __val)
51#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \ 75#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \
52 SET_BITS_TO_LE_4BYTE(__pdesc, 26, 1, __val) 76 SET_BITS_OFFSET_LE(__pdesc, 26, 1, __val)
53#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \ 77#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \
54 SET_BITS_TO_LE_4BYTE(__pdesc, 27, 1, __val) 78 SET_BITS_OFFSET_LE(__pdesc, 27, 1, __val)
55#define SET_TX_DESC_LINIP(__pdesc, __val) \ 79#define SET_TX_DESC_LINIP(__pdesc, __val) \
56 SET_BITS_TO_LE_4BYTE(__pdesc, 28, 1, __val) 80 SET_BITS_OFFSET_LE(__pdesc, 28, 1, __val)
57#define SET_TX_DESC_NO_ACM(__pdesc, __val) \ 81#define SET_TX_DESC_NO_ACM(__pdesc, __val) \
58 SET_BITS_TO_LE_4BYTE(__pdesc, 29, 1, __val) 82 SET_BITS_OFFSET_LE(__pdesc, 29, 1, __val)
59#define SET_TX_DESC_GF(__pdesc, __val) \ 83#define SET_TX_DESC_GF(__pdesc, __val) \
60 SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val) 84 SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
61#define SET_TX_DESC_OWN(__pdesc, __val) \ 85#define SET_TX_DESC_OWN(__pdesc, __val) \
62 SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val) 86 SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
63 87
64#define GET_TX_DESC_PKT_SIZE(__pdesc) \ 88#define GET_TX_DESC_PKT_SIZE(__pdesc) \
65 LE_BITS_TO_4BYTE(__pdesc, 0, 16) 89 SHIFT_AND_MASK_LE(__pdesc, 0, 16)
66#define GET_TX_DESC_OFFSET(__pdesc) \ 90#define GET_TX_DESC_OFFSET(__pdesc) \
67 LE_BITS_TO_4BYTE(__pdesc, 16, 8) 91 SHIFT_AND_MASK_LE(__pdesc, 16, 8)
68#define GET_TX_DESC_BMC(__pdesc) \ 92#define GET_TX_DESC_BMC(__pdesc) \
69 LE_BITS_TO_4BYTE(__pdesc, 24, 1) 93 SHIFT_AND_MASK_LE(__pdesc, 24, 1)
70#define GET_TX_DESC_HTC(__pdesc) \ 94#define GET_TX_DESC_HTC(__pdesc) \
71 LE_BITS_TO_4BYTE(__pdesc, 25, 1) 95 SHIFT_AND_MASK_LE(__pdesc, 25, 1)
72#define GET_TX_DESC_LAST_SEG(__pdesc) \ 96#define GET_TX_DESC_LAST_SEG(__pdesc) \
73 LE_BITS_TO_4BYTE(__pdesc, 26, 1) 97 SHIFT_AND_MASK_LE(__pdesc, 26, 1)
74#define GET_TX_DESC_FIRST_SEG(__pdesc) \ 98#define GET_TX_DESC_FIRST_SEG(__pdesc) \
75 LE_BITS_TO_4BYTE(__pdesc, 27, 1) 99 SHIFT_AND_MASK_LE(__pdesc, 27, 1)
76#define GET_TX_DESC_LINIP(__pdesc) \ 100#define GET_TX_DESC_LINIP(__pdesc) \
77 LE_BITS_TO_4BYTE(__pdesc, 28, 1) 101 SHIFT_AND_MASK_LE(__pdesc, 28, 1)
78#define GET_TX_DESC_NO_ACM(__pdesc) \ 102#define GET_TX_DESC_NO_ACM(__pdesc) \
79 LE_BITS_TO_4BYTE(__pdesc, 29, 1) 103 SHIFT_AND_MASK_LE(__pdesc, 29, 1)
80#define GET_TX_DESC_GF(__pdesc) \ 104#define GET_TX_DESC_GF(__pdesc) \
81 LE_BITS_TO_4BYTE(__pdesc, 30, 1) 105 SHIFT_AND_MASK_LE(__pdesc, 30, 1)
82#define GET_TX_DESC_OWN(__pdesc) \ 106#define GET_TX_DESC_OWN(__pdesc) \
83 LE_BITS_TO_4BYTE(__pdesc, 31, 1) 107 SHIFT_AND_MASK_LE(__pdesc, 31, 1)
84 108
85#define SET_TX_DESC_MACID(__pdesc, __val) \ 109#define SET_TX_DESC_MACID(__pdesc, __val) \
86 SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 5, __val) 110 SET_BITS_OFFSET_LE(__pdesc+4, 0, 5, __val)
87#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \ 111#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \
88 SET_BITS_TO_LE_4BYTE(__pdesc+4, 5, 1, __val) 112 SET_BITS_OFFSET_LE(__pdesc+4, 5, 1, __val)
89#define SET_TX_DESC_BK(__pdesc, __val) \ 113#define SET_TX_DESC_BK(__pdesc, __val) \
90 SET_BITS_TO_LE_4BYTE(__pdesc+4, 6, 1, __val) 114 SET_BITS_OFFSET_LE(__pdesc+4, 6, 1, __val)
91#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \ 115#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \
92 SET_BITS_TO_LE_4BYTE(__pdesc+4, 7, 1, __val) 116 SET_BITS_OFFSET_LE(__pdesc+4, 7, 1, __val)
93#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \ 117#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \
94 SET_BITS_TO_LE_4BYTE(__pdesc+4, 8, 5, __val) 118 SET_BITS_OFFSET_LE(__pdesc+4, 8, 5, __val)
95#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \ 119#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \
96 SET_BITS_TO_LE_4BYTE(__pdesc+4, 13, 1, __val) 120 SET_BITS_OFFSET_LE(__pdesc+4, 13, 1, __val)
97#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \ 121#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \
98 SET_BITS_TO_LE_4BYTE(__pdesc+4, 14, 1, __val) 122 SET_BITS_OFFSET_LE(__pdesc+4, 14, 1, __val)
99#define SET_TX_DESC_PIFS(__pdesc, __val) \ 123#define SET_TX_DESC_PIFS(__pdesc, __val) \
100 SET_BITS_TO_LE_4BYTE(__pdesc+4, 15, 1, __val) 124 SET_BITS_OFFSET_LE(__pdesc+4, 15, 1, __val)
101#define SET_TX_DESC_RATE_ID(__pdesc, __val) \ 125#define SET_TX_DESC_RATE_ID(__pdesc, __val) \
102 SET_BITS_TO_LE_4BYTE(__pdesc+4, 16, 4, __val) 126 SET_BITS_OFFSET_LE(__pdesc+4, 16, 4, __val)
103#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \ 127#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \
104 SET_BITS_TO_LE_4BYTE(__pdesc+4, 20, 1, __val) 128 SET_BITS_OFFSET_LE(__pdesc+4, 20, 1, __val)
105#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \ 129#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \
106 SET_BITS_TO_LE_4BYTE(__pdesc+4, 21, 1, __val) 130 SET_BITS_OFFSET_LE(__pdesc+4, 21, 1, __val)
107#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \ 131#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \
108 SET_BITS_TO_LE_4BYTE(__pdesc+4, 22, 2, __val) 132 SET_BITS_OFFSET_LE(__pdesc+4, 22, 2, __val)
109#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \ 133#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \
110 SET_BITS_TO_LE_4BYTE(__pdesc+4, 24, 8, __val) 134 SET_BITS_OFFSET_LE(__pdesc+4, 24, 8, __val)
111 135
112#define GET_TX_DESC_MACID(__pdesc) \ 136#define GET_TX_DESC_MACID(__pdesc) \
113 LE_BITS_TO_4BYTE(__pdesc+4, 0, 5) 137 SHIFT_AND_MASK_LE(__pdesc+4, 0, 5)
114#define GET_TX_DESC_AGG_ENABLE(__pdesc) \ 138#define GET_TX_DESC_AGG_ENABLE(__pdesc) \
115 LE_BITS_TO_4BYTE(__pdesc+4, 5, 1) 139 SHIFT_AND_MASK_LE(__pdesc+4, 5, 1)
116#define GET_TX_DESC_AGG_BREAK(__pdesc) \ 140#define GET_TX_DESC_AGG_BREAK(__pdesc) \
117 LE_BITS_TO_4BYTE(__pdesc+4, 6, 1) 141 SHIFT_AND_MASK_LE(__pdesc+4, 6, 1)
118#define GET_TX_DESC_RDG_ENABLE(__pdesc) \ 142#define GET_TX_DESC_RDG_ENABLE(__pdesc) \
119 LE_BITS_TO_4BYTE(__pdesc+4, 7, 1) 143 SHIFT_AND_MASK_LE(__pdesc+4, 7, 1)
120#define GET_TX_DESC_QUEUE_SEL(__pdesc) \ 144#define GET_TX_DESC_QUEUE_SEL(__pdesc) \
121 LE_BITS_TO_4BYTE(__pdesc+4, 8, 5) 145 SHIFT_AND_MASK_LE(__pdesc+4, 8, 5)
122#define GET_TX_DESC_RDG_NAV_EXT(__pdesc) \ 146#define GET_TX_DESC_RDG_NAV_EXT(__pdesc) \
123 LE_BITS_TO_4BYTE(__pdesc+4, 13, 1) 147 SHIFT_AND_MASK_LE(__pdesc+4, 13, 1)
124#define GET_TX_DESC_LSIG_TXOP_EN(__pdesc) \ 148#define GET_TX_DESC_LSIG_TXOP_EN(__pdesc) \
125 LE_BITS_TO_4BYTE(__pdesc+4, 14, 1) 149 SHIFT_AND_MASK_LE(__pdesc+4, 14, 1)
126#define GET_TX_DESC_PIFS(__pdesc) \ 150#define GET_TX_DESC_PIFS(__pdesc) \
127 LE_BITS_TO_4BYTE(__pdesc+4, 15, 1) 151 SHIFT_AND_MASK_LE(__pdesc+4, 15, 1)
128#define GET_TX_DESC_RATE_ID(__pdesc) \ 152#define GET_TX_DESC_RATE_ID(__pdesc) \
129 LE_BITS_TO_4BYTE(__pdesc+4, 16, 4) 153 SHIFT_AND_MASK_LE(__pdesc+4, 16, 4)
130#define GET_TX_DESC_NAV_USE_HDR(__pdesc) \ 154#define GET_TX_DESC_NAV_USE_HDR(__pdesc) \
131 LE_BITS_TO_4BYTE(__pdesc+4, 20, 1) 155 SHIFT_AND_MASK_LE(__pdesc+4, 20, 1)
132#define GET_TX_DESC_EN_DESC_ID(__pdesc) \ 156#define GET_TX_DESC_EN_DESC_ID(__pdesc) \
133 LE_BITS_TO_4BYTE(__pdesc+4, 21, 1) 157 SHIFT_AND_MASK_LE(__pdesc+4, 21, 1)
134#define GET_TX_DESC_SEC_TYPE(__pdesc) \ 158#define GET_TX_DESC_SEC_TYPE(__pdesc) \
135 LE_BITS_TO_4BYTE(__pdesc+4, 22, 2) 159 SHIFT_AND_MASK_LE(__pdesc+4, 22, 2)
136#define GET_TX_DESC_PKT_OFFSET(__pdesc) \ 160#define GET_TX_DESC_PKT_OFFSET(__pdesc) \
137 LE_BITS_TO_4BYTE(__pdesc+4, 24, 8) 161 SHIFT_AND_MASK_LE(__pdesc+4, 24, 8)
138 162
139#define SET_TX_DESC_RTS_RC(__pdesc, __val) \ 163#define SET_TX_DESC_RTS_RC(__pdesc, __val) \
140 SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 6, __val) 164 SET_BITS_OFFSET_LE(__pdesc+8, 0, 6, __val)
141#define SET_TX_DESC_DATA_RC(__pdesc, __val) \ 165#define SET_TX_DESC_DATA_RC(__pdesc, __val) \
142 SET_BITS_TO_LE_4BYTE(__pdesc+8, 6, 6, __val) 166 SET_BITS_OFFSET_LE(__pdesc+8, 6, 6, __val)
143#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \ 167#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \
144 SET_BITS_TO_LE_4BYTE(__pdesc+8, 14, 2, __val) 168 SET_BITS_OFFSET_LE(__pdesc+8, 14, 2, __val)
145#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \ 169#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \
146 SET_BITS_TO_LE_4BYTE(__pdesc+8, 17, 1, __val) 170 SET_BITS_OFFSET_LE(__pdesc+8, 17, 1, __val)
147#define SET_TX_DESC_RAW(__pdesc, __val) \ 171#define SET_TX_DESC_RAW(__pdesc, __val) \
148 SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val) 172 SET_BITS_OFFSET_LE(__pdesc+8, 18, 1, __val)
149#define SET_TX_DESC_CCX(__pdesc, __val) \ 173#define SET_TX_DESC_CCX(__pdesc, __val) \
150 SET_BITS_TO_LE_4BYTE(__pdesc+8, 19, 1, __val) 174 SET_BITS_OFFSET_LE(__pdesc+8, 19, 1, __val)
151#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \ 175#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \
152 SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val) 176 SET_BITS_OFFSET_LE(__pdesc+8, 20, 3, __val)
153#define SET_TX_DESC_ANTSEL_A(__pdesc, __val) \ 177#define SET_TX_DESC_ANTSEL_A(__pdesc, __val) \
154 SET_BITS_TO_LE_4BYTE(__pdesc+8, 24, 1, __val) 178 SET_BITS_OFFSET_LE(__pdesc+8, 24, 1, __val)
155#define SET_TX_DESC_ANTSEL_B(__pdesc, __val) \ 179#define SET_TX_DESC_ANTSEL_B(__pdesc, __val) \
156 SET_BITS_TO_LE_4BYTE(__pdesc+8, 25, 1, __val) 180 SET_BITS_OFFSET_LE(__pdesc+8, 25, 1, __val)
157#define SET_TX_DESC_TX_ANT_CCK(__pdesc, __val) \ 181#define SET_TX_DESC_TX_ANT_CCK(__pdesc, __val) \
158 SET_BITS_TO_LE_4BYTE(__pdesc+8, 26, 2, __val) 182 SET_BITS_OFFSET_LE(__pdesc+8, 26, 2, __val)
159#define SET_TX_DESC_TX_ANTL(__pdesc, __val) \ 183#define SET_TX_DESC_TX_ANTL(__pdesc, __val) \
160 SET_BITS_TO_LE_4BYTE(__pdesc+8, 28, 2, __val) 184 SET_BITS_OFFSET_LE(__pdesc+8, 28, 2, __val)
161#define SET_TX_DESC_TX_ANT_HT(__pdesc, __val) \ 185#define SET_TX_DESC_TX_ANT_HT(__pdesc, __val) \
162 SET_BITS_TO_LE_4BYTE(__pdesc+8, 30, 2, __val) 186 SET_BITS_OFFSET_LE(__pdesc+8, 30, 2, __val)
163 187
164#define GET_TX_DESC_RTS_RC(__pdesc) \ 188#define GET_TX_DESC_RTS_RC(__pdesc) \
165 LE_BITS_TO_4BYTE(__pdesc+8, 0, 6) 189 SHIFT_AND_MASK_LE(__pdesc+8, 0, 6)
166#define GET_TX_DESC_DATA_RC(__pdesc) \ 190#define GET_TX_DESC_DATA_RC(__pdesc) \
167 LE_BITS_TO_4BYTE(__pdesc+8, 6, 6) 191 SHIFT_AND_MASK_LE(__pdesc+8, 6, 6)
168#define GET_TX_DESC_BAR_RTY_TH(__pdesc) \ 192#define GET_TX_DESC_BAR_RTY_TH(__pdesc) \
169 LE_BITS_TO_4BYTE(__pdesc+8, 14, 2) 193 SHIFT_AND_MASK_LE(__pdesc+8, 14, 2)
170#define GET_TX_DESC_MORE_FRAG(__pdesc) \ 194#define GET_TX_DESC_MORE_FRAG(__pdesc) \
171 LE_BITS_TO_4BYTE(__pdesc+8, 17, 1) 195 SHIFT_AND_MASK_LE(__pdesc+8, 17, 1)
172#define GET_TX_DESC_RAW(__pdesc) \ 196#define GET_TX_DESC_RAW(__pdesc) \
173 LE_BITS_TO_4BYTE(__pdesc+8, 18, 1) 197 SHIFT_AND_MASK_LE(__pdesc+8, 18, 1)
174#define GET_TX_DESC_CCX(__pdesc) \ 198#define GET_TX_DESC_CCX(__pdesc) \
175 LE_BITS_TO_4BYTE(__pdesc+8, 19, 1) 199 SHIFT_AND_MASK_LE(__pdesc+8, 19, 1)
176#define GET_TX_DESC_AMPDU_DENSITY(__pdesc) \ 200#define GET_TX_DESC_AMPDU_DENSITY(__pdesc) \
177 LE_BITS_TO_4BYTE(__pdesc+8, 20, 3) 201 SHIFT_AND_MASK_LE(__pdesc+8, 20, 3)
178#define GET_TX_DESC_ANTSEL_A(__pdesc) \ 202#define GET_TX_DESC_ANTSEL_A(__pdesc) \
179 LE_BITS_TO_4BYTE(__pdesc+8, 24, 1) 203 SHIFT_AND_MASK_LE(__pdesc+8, 24, 1)
180#define GET_TX_DESC_ANTSEL_B(__pdesc) \ 204#define GET_TX_DESC_ANTSEL_B(__pdesc) \
181 LE_BITS_TO_4BYTE(__pdesc+8, 25, 1) 205 SHIFT_AND_MASK_LE(__pdesc+8, 25, 1)
182#define GET_TX_DESC_TX_ANT_CCK(__pdesc) \ 206#define GET_TX_DESC_TX_ANT_CCK(__pdesc) \
183 LE_BITS_TO_4BYTE(__pdesc+8, 26, 2) 207 SHIFT_AND_MASK_LE(__pdesc+8, 26, 2)
184#define GET_TX_DESC_TX_ANTL(__pdesc) \ 208#define GET_TX_DESC_TX_ANTL(__pdesc) \
185 LE_BITS_TO_4BYTE(__pdesc+8, 28, 2) 209 SHIFT_AND_MASK_LE(__pdesc+8, 28, 2)
186#define GET_TX_DESC_TX_ANT_HT(__pdesc) \ 210#define GET_TX_DESC_TX_ANT_HT(__pdesc) \
187 LE_BITS_TO_4BYTE(__pdesc+8, 30, 2) 211 SHIFT_AND_MASK_LE(__pdesc+8, 30, 2)
188 212
189#define SET_TX_DESC_NEXT_HEAP_PAGE(__pdesc, __val) \ 213#define SET_TX_DESC_NEXT_HEAP_PAGE(__pdesc, __val) \
190 SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 8, __val) 214 SET_BITS_OFFSET_LE(__pdesc+12, 0, 8, __val)
191#define SET_TX_DESC_TAIL_PAGE(__pdesc, __val) \ 215#define SET_TX_DESC_TAIL_PAGE(__pdesc, __val) \
192 SET_BITS_TO_LE_4BYTE(__pdesc+12, 8, 8, __val) 216 SET_BITS_OFFSET_LE(__pdesc+12, 8, 8, __val)
193#define SET_TX_DESC_SEQ(__pdesc, __val) \ 217#define SET_TX_DESC_SEQ(__pdesc, __val) \
194 SET_BITS_TO_LE_4BYTE(__pdesc+12, 16, 12, __val) 218 SET_BITS_OFFSET_LE(__pdesc+12, 16, 12, __val)
195#define SET_TX_DESC_PKT_ID(__pdesc, __val) \ 219#define SET_TX_DESC_PKT_ID(__pdesc, __val) \
196 SET_BITS_TO_LE_4BYTE(__pdesc+12, 28, 4, __val) 220 SET_BITS_OFFSET_LE(__pdesc+12, 28, 4, __val)
197 221
198#define GET_TX_DESC_NEXT_HEAP_PAGE(__pdesc) \ 222#define GET_TX_DESC_NEXT_HEAP_PAGE(__pdesc) \
199 LE_BITS_TO_4BYTE(__pdesc+12, 0, 8) 223 SHIFT_AND_MASK_LE(__pdesc+12, 0, 8)
200#define GET_TX_DESC_TAIL_PAGE(__pdesc) \ 224#define GET_TX_DESC_TAIL_PAGE(__pdesc) \
201 LE_BITS_TO_4BYTE(__pdesc+12, 8, 8) 225 SHIFT_AND_MASK_LE(__pdesc+12, 8, 8)
202#define GET_TX_DESC_SEQ(__pdesc) \ 226#define GET_TX_DESC_SEQ(__pdesc) \
203 LE_BITS_TO_4BYTE(__pdesc+12, 16, 12) 227 SHIFT_AND_MASK_LE(__pdesc+12, 16, 12)
204#define GET_TX_DESC_PKT_ID(__pdesc) \ 228#define GET_TX_DESC_PKT_ID(__pdesc) \
205 LE_BITS_TO_4BYTE(__pdesc+12, 28, 4) 229 SHIFT_AND_MASK_LE(__pdesc+12, 28, 4)
206 230
207#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \ 231#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \
208 SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 5, __val) 232 SET_BITS_OFFSET_LE(__pdesc+16, 0, 5, __val)
209#define SET_TX_DESC_AP_DCFE(__pdesc, __val) \ 233#define SET_TX_DESC_AP_DCFE(__pdesc, __val) \
210 SET_BITS_TO_LE_4BYTE(__pdesc+16, 5, 1, __val) 234 SET_BITS_OFFSET_LE(__pdesc+16, 5, 1, __val)
211#define SET_TX_DESC_QOS(__pdesc, __val) \ 235#define SET_TX_DESC_QOS(__pdesc, __val) \
212 SET_BITS_TO_LE_4BYTE(__pdesc+16, 6, 1, __val) 236 SET_BITS_OFFSET_LE(__pdesc+16, 6, 1, __val)
213#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \ 237#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \
214 SET_BITS_TO_LE_4BYTE(__pdesc+16, 7, 1, __val) 238 SET_BITS_OFFSET_LE(__pdesc+16, 7, 1, __val)
215#define SET_TX_DESC_USE_RATE(__pdesc, __val) \ 239#define SET_TX_DESC_USE_RATE(__pdesc, __val) \
216 SET_BITS_TO_LE_4BYTE(__pdesc+16, 8, 1, __val) 240 SET_BITS_OFFSET_LE(__pdesc+16, 8, 1, __val)
217#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \ 241#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \
218 SET_BITS_TO_LE_4BYTE(__pdesc+16, 9, 1, __val) 242 SET_BITS_OFFSET_LE(__pdesc+16, 9, 1, __val)
219#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \ 243#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \
220 SET_BITS_TO_LE_4BYTE(__pdesc+16, 10, 1, __val) 244 SET_BITS_OFFSET_LE(__pdesc+16, 10, 1, __val)
221#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \ 245#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \
222 SET_BITS_TO_LE_4BYTE(__pdesc+16, 11, 1, __val) 246 SET_BITS_OFFSET_LE(__pdesc+16, 11, 1, __val)
223#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \ 247#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \
224 SET_BITS_TO_LE_4BYTE(__pdesc+16, 12, 1, __val) 248 SET_BITS_OFFSET_LE(__pdesc+16, 12, 1, __val)
225#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \ 249#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \
226 SET_BITS_TO_LE_4BYTE(__pdesc+16, 13, 1, __val) 250 SET_BITS_OFFSET_LE(__pdesc+16, 13, 1, __val)
227#define SET_TX_DESC_PORT_ID(__pdesc, __val) \ 251#define SET_TX_DESC_PORT_ID(__pdesc, __val) \
228 SET_BITS_TO_LE_4BYTE(__pdesc+16, 14, 1, __val) 252 SET_BITS_OFFSET_LE(__pdesc+16, 14, 1, __val)
229#define SET_TX_DESC_WAIT_DCTS(__pdesc, __val) \ 253#define SET_TX_DESC_WAIT_DCTS(__pdesc, __val) \
230 SET_BITS_TO_LE_4BYTE(__pdesc+16, 18, 1, __val) 254 SET_BITS_OFFSET_LE(__pdesc+16, 18, 1, __val)
231#define SET_TX_DESC_CTS2AP_EN(__pdesc, __val) \ 255#define SET_TX_DESC_CTS2AP_EN(__pdesc, __val) \
232 SET_BITS_TO_LE_4BYTE(__pdesc+16, 19, 1, __val) 256 SET_BITS_OFFSET_LE(__pdesc+16, 19, 1, __val)
233#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \ 257#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \
234 SET_BITS_TO_LE_4BYTE(__pdesc+16, 20, 2, __val) 258 SET_BITS_OFFSET_LE(__pdesc+16, 20, 2, __val)
235#define SET_TX_DESC_TX_STBC(__pdesc, __val) \ 259#define SET_TX_DESC_TX_STBC(__pdesc, __val) \
236 SET_BITS_TO_LE_4BYTE(__pdesc+16, 22, 2, __val) 260 SET_BITS_OFFSET_LE(__pdesc+16, 22, 2, __val)
237#define SET_TX_DESC_DATA_SHORT(__pdesc, __val) \ 261#define SET_TX_DESC_DATA_SHORT(__pdesc, __val) \
238 SET_BITS_TO_LE_4BYTE(__pdesc+16, 24, 1, __val) 262 SET_BITS_OFFSET_LE(__pdesc+16, 24, 1, __val)
239#define SET_TX_DESC_DATA_BW(__pdesc, __val) \ 263#define SET_TX_DESC_DATA_BW(__pdesc, __val) \
240 SET_BITS_TO_LE_4BYTE(__pdesc+16, 25, 1, __val) 264 SET_BITS_OFFSET_LE(__pdesc+16, 25, 1, __val)
241#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \ 265#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \
242 SET_BITS_TO_LE_4BYTE(__pdesc+16, 26, 1, __val) 266 SET_BITS_OFFSET_LE(__pdesc+16, 26, 1, __val)
243#define SET_TX_DESC_RTS_BW(__pdesc, __val) \ 267#define SET_TX_DESC_RTS_BW(__pdesc, __val) \
244 SET_BITS_TO_LE_4BYTE(__pdesc+16, 27, 1, __val) 268 SET_BITS_OFFSET_LE(__pdesc+16, 27, 1, __val)
245#define SET_TX_DESC_RTS_SC(__pdesc, __val) \ 269#define SET_TX_DESC_RTS_SC(__pdesc, __val) \
246 SET_BITS_TO_LE_4BYTE(__pdesc+16, 28, 2, __val) 270 SET_BITS_OFFSET_LE(__pdesc+16, 28, 2, __val)
247#define SET_TX_DESC_RTS_STBC(__pdesc, __val) \ 271#define SET_TX_DESC_RTS_STBC(__pdesc, __val) \
248 SET_BITS_TO_LE_4BYTE(__pdesc+16, 30, 2, __val) 272 SET_BITS_OFFSET_LE(__pdesc+16, 30, 2, __val)
249 273
250#define GET_TX_DESC_RTS_RATE(__pdesc) \ 274#define GET_TX_DESC_RTS_RATE(__pdesc) \
251 LE_BITS_TO_4BYTE(__pdesc+16, 0, 5) 275 SHIFT_AND_MASK_LE(__pdesc+16, 0, 5)
252#define GET_TX_DESC_AP_DCFE(__pdesc) \ 276#define GET_TX_DESC_AP_DCFE(__pdesc) \
253 LE_BITS_TO_4BYTE(__pdesc+16, 5, 1) 277 SHIFT_AND_MASK_LE(__pdesc+16, 5, 1)
254#define GET_TX_DESC_QOS(__pdesc) \ 278#define GET_TX_DESC_QOS(__pdesc) \
255 LE_BITS_TO_4BYTE(__pdesc+16, 6, 1) 279 SHIFT_AND_MASK_LE(__pdesc+16, 6, 1)
256#define GET_TX_DESC_HWSEQ_EN(__pdesc) \ 280#define GET_TX_DESC_HWSEQ_EN(__pdesc) \
257 LE_BITS_TO_4BYTE(__pdesc+16, 7, 1) 281 SHIFT_AND_MASK_LE(__pdesc+16, 7, 1)
258#define GET_TX_DESC_USE_RATE(__pdesc) \ 282#define GET_TX_DESC_USE_RATE(__pdesc) \
259 LE_BITS_TO_4BYTE(__pdesc+16, 8, 1) 283 SHIFT_AND_MASK_LE(__pdesc+16, 8, 1)
260#define GET_TX_DESC_DISABLE_RTS_FB(__pdesc) \ 284#define GET_TX_DESC_DISABLE_RTS_FB(__pdesc) \
261 LE_BITS_TO_4BYTE(__pdesc+16, 9, 1) 285 SHIFT_AND_MASK_LE(__pdesc+16, 9, 1)
262#define GET_TX_DESC_DISABLE_FB(__pdesc) \ 286#define GET_TX_DESC_DISABLE_FB(__pdesc) \
263 LE_BITS_TO_4BYTE(__pdesc+16, 10, 1) 287 SHIFT_AND_MASK_LE(__pdesc+16, 10, 1)
264#define GET_TX_DESC_CTS2SELF(__pdesc) \ 288#define GET_TX_DESC_CTS2SELF(__pdesc) \
265 LE_BITS_TO_4BYTE(__pdesc+16, 11, 1) 289 SHIFT_AND_MASK_LE(__pdesc+16, 11, 1)
266#define GET_TX_DESC_RTS_ENABLE(__pdesc) \ 290#define GET_TX_DESC_RTS_ENABLE(__pdesc) \
267 LE_BITS_TO_4BYTE(__pdesc+16, 12, 1) 291 SHIFT_AND_MASK_LE(__pdesc+16, 12, 1)
268#define GET_TX_DESC_HW_RTS_ENABLE(__pdesc) \ 292#define GET_TX_DESC_HW_RTS_ENABLE(__pdesc) \
269 LE_BITS_TO_4BYTE(__pdesc+16, 13, 1) 293 SHIFT_AND_MASK_LE(__pdesc+16, 13, 1)
270#define GET_TX_DESC_PORT_ID(__pdesc) \ 294#define GET_TX_DESC_PORT_ID(__pdesc) \
271 LE_BITS_TO_4BYTE(__pdesc+16, 14, 1) 295 SHIFT_AND_MASK_LE(__pdesc+16, 14, 1)
272#define GET_TX_DESC_WAIT_DCTS(__pdesc) \ 296#define GET_TX_DESC_WAIT_DCTS(__pdesc) \
273 LE_BITS_TO_4BYTE(__pdesc+16, 18, 1) 297 SHIFT_AND_MASK_LE(__pdesc+16, 18, 1)
274#define GET_TX_DESC_CTS2AP_EN(__pdesc) \ 298#define GET_TX_DESC_CTS2AP_EN(__pdesc) \
275 LE_BITS_TO_4BYTE(__pdesc+16, 19, 1) 299 SHIFT_AND_MASK_LE(__pdesc+16, 19, 1)
276#define GET_TX_DESC_TX_SUB_CARRIER(__pdesc) \ 300#define GET_TX_DESC_TX_SUB_CARRIER(__pdesc) \
277 LE_BITS_TO_4BYTE(__pdesc+16, 20, 2) 301 SHIFT_AND_MASK_LE(__pdesc+16, 20, 2)
278#define GET_TX_DESC_TX_STBC(__pdesc) \ 302#define GET_TX_DESC_TX_STBC(__pdesc) \
279 LE_BITS_TO_4BYTE(__pdesc+16, 22, 2) 303 SHIFT_AND_MASK_LE(__pdesc+16, 22, 2)
280#define GET_TX_DESC_DATA_SHORT(__pdesc) \ 304#define GET_TX_DESC_DATA_SHORT(__pdesc) \
281 LE_BITS_TO_4BYTE(__pdesc+16, 24, 1) 305 SHIFT_AND_MASK_LE(__pdesc+16, 24, 1)
282#define GET_TX_DESC_DATA_BW(__pdesc) \ 306#define GET_TX_DESC_DATA_BW(__pdesc) \
283 LE_BITS_TO_4BYTE(__pdesc+16, 25, 1) 307 SHIFT_AND_MASK_LE(__pdesc+16, 25, 1)
284#define GET_TX_DESC_RTS_SHORT(__pdesc) \ 308#define GET_TX_DESC_RTS_SHORT(__pdesc) \
285 LE_BITS_TO_4BYTE(__pdesc+16, 26, 1) 309 SHIFT_AND_MASK_LE(__pdesc+16, 26, 1)
286#define GET_TX_DESC_RTS_BW(__pdesc) \ 310#define GET_TX_DESC_RTS_BW(__pdesc) \
287 LE_BITS_TO_4BYTE(__pdesc+16, 27, 1) 311 SHIFT_AND_MASK_LE(__pdesc+16, 27, 1)
288#define GET_TX_DESC_RTS_SC(__pdesc) \ 312#define GET_TX_DESC_RTS_SC(__pdesc) \
289 LE_BITS_TO_4BYTE(__pdesc+16, 28, 2) 313 SHIFT_AND_MASK_LE(__pdesc+16, 28, 2)
290#define GET_TX_DESC_RTS_STBC(__pdesc) \ 314#define GET_TX_DESC_RTS_STBC(__pdesc) \
291 LE_BITS_TO_4BYTE(__pdesc+16, 30, 2) 315 SHIFT_AND_MASK_LE(__pdesc+16, 30, 2)
292 316
293#define SET_TX_DESC_TX_RATE(__pdesc, __val) \ 317#define SET_TX_DESC_TX_RATE(__pdesc, __val) \
294 SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 6, __val) 318 SET_BITS_OFFSET_LE(__pdesc+20, 0, 6, __val)
295#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \ 319#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \
296 SET_BITS_TO_LE_4BYTE(__pdesc+20, 6, 1, __val) 320 SET_BITS_OFFSET_LE(__pdesc+20, 6, 1, __val)
297#define SET_TX_DESC_CCX_TAG(__pdesc, __val) \ 321#define SET_TX_DESC_CCX_TAG(__pdesc, __val) \
298 SET_BITS_TO_LE_4BYTE(__pdesc+20, 7, 1, __val) 322 SET_BITS_OFFSET_LE(__pdesc+20, 7, 1, __val)
299#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \ 323#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \
300 SET_BITS_TO_LE_4BYTE(__pdesc+20, 8, 5, __val) 324 SET_BITS_OFFSET_LE(__pdesc+20, 8, 5, __val)
301#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \ 325#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \
302 SET_BITS_TO_LE_4BYTE(__pdesc+20, 13, 4, __val) 326 SET_BITS_OFFSET_LE(__pdesc+20, 13, 4, __val)
303#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \ 327#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \
304 SET_BITS_TO_LE_4BYTE(__pdesc+20, 17, 1, __val) 328 SET_BITS_OFFSET_LE(__pdesc+20, 17, 1, __val)
305#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \ 329#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \
306 SET_BITS_TO_LE_4BYTE(__pdesc+20, 18, 6, __val) 330 SET_BITS_OFFSET_LE(__pdesc+20, 18, 6, __val)
307#define SET_TX_DESC_USB_TXAGG_NUM(__pdesc, __val) \ 331#define SET_TX_DESC_USB_TXAGG_NUM(__pdesc, __val) \
308 SET_BITS_TO_LE_4BYTE(__pdesc+20, 24, 8, __val) 332 SET_BITS_OFFSET_LE(__pdesc+20, 24, 8, __val)
309 333
310#define GET_TX_DESC_TX_RATE(__pdesc) \ 334#define GET_TX_DESC_TX_RATE(__pdesc) \
311 LE_BITS_TO_4BYTE(__pdesc+20, 0, 6) 335 SHIFT_AND_MASK_LE(__pdesc+20, 0, 6)
312#define GET_TX_DESC_DATA_SHORTGI(__pdesc) \ 336#define GET_TX_DESC_DATA_SHORTGI(__pdesc) \
313 LE_BITS_TO_4BYTE(__pdesc+20, 6, 1) 337 SHIFT_AND_MASK_LE(__pdesc+20, 6, 1)
314#define GET_TX_DESC_CCX_TAG(__pdesc) \ 338#define GET_TX_DESC_CCX_TAG(__pdesc) \
315 LE_BITS_TO_4BYTE(__pdesc+20, 7, 1) 339 SHIFT_AND_MASK_LE(__pdesc+20, 7, 1)
316#define GET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc) \ 340#define GET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc) \
317 LE_BITS_TO_4BYTE(__pdesc+20, 8, 5) 341 SHIFT_AND_MASK_LE(__pdesc+20, 8, 5)
318#define GET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc) \ 342#define GET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc) \
319 LE_BITS_TO_4BYTE(__pdesc+20, 13, 4) 343 SHIFT_AND_MASK_LE(__pdesc+20, 13, 4)
320#define GET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc) \ 344#define GET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc) \
321 LE_BITS_TO_4BYTE(__pdesc+20, 17, 1) 345 SHIFT_AND_MASK_LE(__pdesc+20, 17, 1)
322#define GET_TX_DESC_DATA_RETRY_LIMIT(__pdesc) \ 346#define GET_TX_DESC_DATA_RETRY_LIMIT(__pdesc) \
323 LE_BITS_TO_4BYTE(__pdesc+20, 18, 6) 347 SHIFT_AND_MASK_LE(__pdesc+20, 18, 6)
324#define GET_TX_DESC_USB_TXAGG_NUM(__pdesc) \ 348#define GET_TX_DESC_USB_TXAGG_NUM(__pdesc) \
325 LE_BITS_TO_4BYTE(__pdesc+20, 24, 8) 349 SHIFT_AND_MASK_LE(__pdesc+20, 24, 8)
326 350
327#define SET_TX_DESC_TXAGC_A(__pdesc, __val) \ 351#define SET_TX_DESC_TXAGC_A(__pdesc, __val) \
328 SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 5, __val) 352 SET_BITS_OFFSET_LE(__pdesc+24, 0, 5, __val)
329#define SET_TX_DESC_TXAGC_B(__pdesc, __val) \ 353#define SET_TX_DESC_TXAGC_B(__pdesc, __val) \
330 SET_BITS_TO_LE_4BYTE(__pdesc+24, 5, 5, __val) 354 SET_BITS_OFFSET_LE(__pdesc+24, 5, 5, __val)
331#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \ 355#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \
332 SET_BITS_TO_LE_4BYTE(__pdesc+24, 10, 1, __val) 356 SET_BITS_OFFSET_LE(__pdesc+24, 10, 1, __val)
333#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \ 357#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \
334 SET_BITS_TO_LE_4BYTE(__pdesc+24, 11, 5, __val) 358 SET_BITS_OFFSET_LE(__pdesc+24, 11, 5, __val)
335#define SET_TX_DESC_MCSG1_MAX_LEN(__pdesc, __val) \ 359#define SET_TX_DESC_MCSG1_MAX_LEN(__pdesc, __val) \
336 SET_BITS_TO_LE_4BYTE(__pdesc+24, 16, 4, __val) 360 SET_BITS_OFFSET_LE(__pdesc+24, 16, 4, __val)
337#define SET_TX_DESC_MCSG2_MAX_LEN(__pdesc, __val) \ 361#define SET_TX_DESC_MCSG2_MAX_LEN(__pdesc, __val) \
338 SET_BITS_TO_LE_4BYTE(__pdesc+24, 20, 4, __val) 362 SET_BITS_OFFSET_LE(__pdesc+24, 20, 4, __val)
339#define SET_TX_DESC_MCSG3_MAX_LEN(__pdesc, __val) \ 363#define SET_TX_DESC_MCSG3_MAX_LEN(__pdesc, __val) \
340 SET_BITS_TO_LE_4BYTE(__pdesc+24, 24, 4, __val) 364 SET_BITS_OFFSET_LE(__pdesc+24, 24, 4, __val)
341#define SET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc, __val) \ 365#define SET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc, __val) \
342 SET_BITS_TO_LE_4BYTE(__pdesc+24, 28, 4, __val) 366 SET_BITS_OFFSET_LE(__pdesc+24, 28, 4, __val)
343 367
344#define GET_TX_DESC_TXAGC_A(__pdesc) \ 368#define GET_TX_DESC_TXAGC_A(__pdesc) \
345 LE_BITS_TO_4BYTE(__pdesc+24, 0, 5) 369 SHIFT_AND_MASK_LE(__pdesc+24, 0, 5)
346#define GET_TX_DESC_TXAGC_B(__pdesc) \ 370#define GET_TX_DESC_TXAGC_B(__pdesc) \
347 LE_BITS_TO_4BYTE(__pdesc+24, 5, 5) 371 SHIFT_AND_MASK_LE(__pdesc+24, 5, 5)
348#define GET_TX_DESC_USE_MAX_LEN(__pdesc) \ 372#define GET_TX_DESC_USE_MAX_LEN(__pdesc) \
349 LE_BITS_TO_4BYTE(__pdesc+24, 10, 1) 373 SHIFT_AND_MASK_LE(__pdesc+24, 10, 1)
350#define GET_TX_DESC_MAX_AGG_NUM(__pdesc) \ 374#define GET_TX_DESC_MAX_AGG_NUM(__pdesc) \
351 LE_BITS_TO_4BYTE(__pdesc+24, 11, 5) 375 SHIFT_AND_MASK_LE(__pdesc+24, 11, 5)
352#define GET_TX_DESC_MCSG1_MAX_LEN(__pdesc) \ 376#define GET_TX_DESC_MCSG1_MAX_LEN(__pdesc) \
353 LE_BITS_TO_4BYTE(__pdesc+24, 16, 4) 377 SHIFT_AND_MASK_LE(__pdesc+24, 16, 4)
354#define GET_TX_DESC_MCSG2_MAX_LEN(__pdesc) \ 378#define GET_TX_DESC_MCSG2_MAX_LEN(__pdesc) \
355 LE_BITS_TO_4BYTE(__pdesc+24, 20, 4) 379 SHIFT_AND_MASK_LE(__pdesc+24, 20, 4)
356#define GET_TX_DESC_MCSG3_MAX_LEN(__pdesc) \ 380#define GET_TX_DESC_MCSG3_MAX_LEN(__pdesc) \
357 LE_BITS_TO_4BYTE(__pdesc+24, 24, 4) 381 SHIFT_AND_MASK_LE(__pdesc+24, 24, 4)
358#define GET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc) \ 382#define GET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc) \
359 LE_BITS_TO_4BYTE(__pdesc+24, 28, 4) 383 SHIFT_AND_MASK_LE(__pdesc+24, 28, 4)
360 384
361#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \ 385#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \
362 SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 16, __val) 386 SET_BITS_OFFSET_LE(__pdesc+28, 0, 16, __val)
363#define SET_TX_DESC_MCSG4_MAX_LEN(__pdesc, __val) \ 387#define SET_TX_DESC_MCSG4_MAX_LEN(__pdesc, __val) \
364 SET_BITS_TO_LE_4BYTE(__pdesc+28, 16, 4, __val) 388 SET_BITS_OFFSET_LE(__pdesc+28, 16, 4, __val)
365#define SET_TX_DESC_MCSG5_MAX_LEN(__pdesc, __val) \ 389#define SET_TX_DESC_MCSG5_MAX_LEN(__pdesc, __val) \
366 SET_BITS_TO_LE_4BYTE(__pdesc+28, 20, 4, __val) 390 SET_BITS_OFFSET_LE(__pdesc+28, 20, 4, __val)
367#define SET_TX_DESC_MCSG6_MAX_LEN(__pdesc, __val) \ 391#define SET_TX_DESC_MCSG6_MAX_LEN(__pdesc, __val) \
368 SET_BITS_TO_LE_4BYTE(__pdesc+28, 24, 4, __val) 392 SET_BITS_OFFSET_LE(__pdesc+28, 24, 4, __val)
369#define SET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc, __val) \ 393#define SET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc, __val) \
370 SET_BITS_TO_LE_4BYTE(__pdesc+28, 28, 4, __val) 394 SET_BITS_OFFSET_LE(__pdesc+28, 28, 4, __val)
371 395
372#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \ 396#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \
373 LE_BITS_TO_4BYTE(__pdesc+28, 0, 16) 397 SHIFT_AND_MASK_LE(__pdesc+28, 0, 16)
374#define GET_TX_DESC_MCSG4_MAX_LEN(__pdesc) \ 398#define GET_TX_DESC_MCSG4_MAX_LEN(__pdesc) \
375 LE_BITS_TO_4BYTE(__pdesc+28, 16, 4) 399 SHIFT_AND_MASK_LE(__pdesc+28, 16, 4)
376#define GET_TX_DESC_MCSG5_MAX_LEN(__pdesc) \ 400#define GET_TX_DESC_MCSG5_MAX_LEN(__pdesc) \
377 LE_BITS_TO_4BYTE(__pdesc+28, 20, 4) 401 SHIFT_AND_MASK_LE(__pdesc+28, 20, 4)
378#define GET_TX_DESC_MCSG6_MAX_LEN(__pdesc) \ 402#define GET_TX_DESC_MCSG6_MAX_LEN(__pdesc) \
379 LE_BITS_TO_4BYTE(__pdesc+28, 24, 4) 403 SHIFT_AND_MASK_LE(__pdesc+28, 24, 4)
380#define GET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc) \ 404#define GET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc) \
381 LE_BITS_TO_4BYTE(__pdesc+28, 28, 4) 405 SHIFT_AND_MASK_LE(__pdesc+28, 28, 4)
382 406
383#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \ 407#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \
384 SET_BITS_TO_LE_4BYTE(__pdesc+32, 0, 32, __val) 408 SET_BITS_OFFSET_LE(__pdesc+32, 0, 32, __val)
385#define SET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc, __val) \ 409#define SET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc, __val) \
386 SET_BITS_TO_LE_4BYTE(__pdesc+36, 0, 32, __val) 410 SET_BITS_OFFSET_LE(__pdesc+36, 0, 32, __val)
387 411
388#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \ 412#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \
389 LE_BITS_TO_4BYTE(__pdesc+32, 0, 32) 413 SHIFT_AND_MASK_LE(__pdesc+32, 0, 32)
390#define GET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc) \ 414#define GET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc) \
391 LE_BITS_TO_4BYTE(__pdesc+36, 0, 32) 415 SHIFT_AND_MASK_LE(__pdesc+36, 0, 32)
392 416
393#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \ 417#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \
394 SET_BITS_TO_LE_4BYTE(__pdesc+40, 0, 32, __val) 418 SET_BITS_OFFSET_LE(__pdesc+40, 0, 32, __val)
395#define SET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc, __val) \ 419#define SET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc, __val) \
396 SET_BITS_TO_LE_4BYTE(__pdesc+44, 0, 32, __val) 420 SET_BITS_OFFSET_LE(__pdesc+44, 0, 32, __val)
397 421
398#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \ 422#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \
399 LE_BITS_TO_4BYTE(__pdesc+40, 0, 32) 423 SHIFT_AND_MASK_LE(__pdesc+40, 0, 32)
400#define GET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc) \ 424#define GET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc) \
401 LE_BITS_TO_4BYTE(__pdesc+44, 0, 32) 425 SHIFT_AND_MASK_LE(__pdesc+44, 0, 32)
402 426
403#define GET_RX_DESC_PKT_LEN(__pdesc) \ 427#define GET_RX_DESC_PKT_LEN(__pdesc) \
404 LE_BITS_TO_4BYTE(__pdesc, 0, 14) 428 SHIFT_AND_MASK_LE(__pdesc, 0, 14)
405#define GET_RX_DESC_CRC32(__pdesc) \ 429#define GET_RX_DESC_CRC32(__pdesc) \
406 LE_BITS_TO_4BYTE(__pdesc, 14, 1) 430 SHIFT_AND_MASK_LE(__pdesc, 14, 1)
407#define GET_RX_DESC_ICV(__pdesc) \ 431#define GET_RX_DESC_ICV(__pdesc) \
408 LE_BITS_TO_4BYTE(__pdesc, 15, 1) 432 SHIFT_AND_MASK_LE(__pdesc, 15, 1)
409#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \ 433#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \
410 LE_BITS_TO_4BYTE(__pdesc, 16, 4) 434 SHIFT_AND_MASK_LE(__pdesc, 16, 4)
411#define GET_RX_DESC_SECURITY(__pdesc) \ 435#define GET_RX_DESC_SECURITY(__pdesc) \
412 LE_BITS_TO_4BYTE(__pdesc, 20, 3) 436 SHIFT_AND_MASK_LE(__pdesc, 20, 3)
413#define GET_RX_DESC_QOS(__pdesc) \ 437#define GET_RX_DESC_QOS(__pdesc) \
414 LE_BITS_TO_4BYTE(__pdesc, 23, 1) 438 SHIFT_AND_MASK_LE(__pdesc, 23, 1)
415#define GET_RX_DESC_SHIFT(__pdesc) \ 439#define GET_RX_DESC_SHIFT(__pdesc) \
416 LE_BITS_TO_4BYTE(__pdesc, 24, 2) 440 SHIFT_AND_MASK_LE(__pdesc, 24, 2)
417#define GET_RX_DESC_PHYST(__pdesc) \ 441#define GET_RX_DESC_PHYST(__pdesc) \
418 LE_BITS_TO_4BYTE(__pdesc, 26, 1) 442 SHIFT_AND_MASK_LE(__pdesc, 26, 1)
419#define GET_RX_DESC_SWDEC(__pdesc) \ 443#define GET_RX_DESC_SWDEC(__pdesc) \
420 LE_BITS_TO_4BYTE(__pdesc, 27, 1) 444 SHIFT_AND_MASK_LE(__pdesc, 27, 1)
421#define GET_RX_DESC_LS(__pdesc) \ 445#define GET_RX_DESC_LS(__pdesc) \
422 LE_BITS_TO_4BYTE(__pdesc, 28, 1) 446 SHIFT_AND_MASK_LE(__pdesc, 28, 1)
423#define GET_RX_DESC_FS(__pdesc) \ 447#define GET_RX_DESC_FS(__pdesc) \
424 LE_BITS_TO_4BYTE(__pdesc, 29, 1) 448 SHIFT_AND_MASK_LE(__pdesc, 29, 1)
425#define GET_RX_DESC_EOR(__pdesc) \ 449#define GET_RX_DESC_EOR(__pdesc) \
426 LE_BITS_TO_4BYTE(__pdesc, 30, 1) 450 SHIFT_AND_MASK_LE(__pdesc, 30, 1)
427#define GET_RX_DESC_OWN(__pdesc) \ 451#define GET_RX_DESC_OWN(__pdesc) \
428 LE_BITS_TO_4BYTE(__pdesc, 31, 1) 452 SHIFT_AND_MASK_LE(__pdesc, 31, 1)
429 453
430#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \ 454#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \
431 SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val) 455 SET_BITS_OFFSET_LE(__pdesc, 0, 14, __val)
432#define SET_RX_DESC_EOR(__pdesc, __val) \ 456#define SET_RX_DESC_EOR(__pdesc, __val) \
433 SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val) 457 SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
434#define SET_RX_DESC_OWN(__pdesc, __val) \ 458#define SET_RX_DESC_OWN(__pdesc, __val) \
435 SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val) 459 SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
436 460
437#define GET_RX_DESC_MACID(__pdesc) \ 461#define GET_RX_DESC_MACID(__pdesc) \
438 LE_BITS_TO_4BYTE(__pdesc+4, 0, 5) 462 SHIFT_AND_MASK_LE(__pdesc+4, 0, 5)
439#define GET_RX_DESC_TID(__pdesc) \ 463#define GET_RX_DESC_TID(__pdesc) \
440 LE_BITS_TO_4BYTE(__pdesc+4, 5, 4) 464 SHIFT_AND_MASK_LE(__pdesc+4, 5, 4)
441#define GET_RX_DESC_HWRSVD(__pdesc) \ 465#define GET_RX_DESC_HWRSVD(__pdesc) \
442 LE_BITS_TO_4BYTE(__pdesc+4, 9, 5) 466 SHIFT_AND_MASK_LE(__pdesc+4, 9, 5)
443#define GET_RX_DESC_PAGGR(__pdesc) \ 467#define GET_RX_DESC_PAGGR(__pdesc) \
444 LE_BITS_TO_4BYTE(__pdesc+4, 14, 1) 468 SHIFT_AND_MASK_LE(__pdesc+4, 14, 1)
445#define GET_RX_DESC_FAGGR(__pdesc) \ 469#define GET_RX_DESC_FAGGR(__pdesc) \
446 LE_BITS_TO_4BYTE(__pdesc+4, 15, 1) 470 SHIFT_AND_MASK_LE(__pdesc+4, 15, 1)
447#define GET_RX_DESC_A1_FIT(__pdesc) \ 471#define GET_RX_DESC_A1_FIT(__pdesc) \
448 LE_BITS_TO_4BYTE(__pdesc+4, 16, 4) 472 SHIFT_AND_MASK_LE(__pdesc+4, 16, 4)
449#define GET_RX_DESC_A2_FIT(__pdesc) \ 473#define GET_RX_DESC_A2_FIT(__pdesc) \
450 LE_BITS_TO_4BYTE(__pdesc+4, 20, 4) 474 SHIFT_AND_MASK_LE(__pdesc+4, 20, 4)
451#define GET_RX_DESC_PAM(__pdesc) \ 475#define GET_RX_DESC_PAM(__pdesc) \
452 LE_BITS_TO_4BYTE(__pdesc+4, 24, 1) 476 SHIFT_AND_MASK_LE(__pdesc+4, 24, 1)
453#define GET_RX_DESC_PWR(__pdesc) \ 477#define GET_RX_DESC_PWR(__pdesc) \
454 LE_BITS_TO_4BYTE(__pdesc+4, 25, 1) 478 SHIFT_AND_MASK_LE(__pdesc+4, 25, 1)
455#define GET_RX_DESC_MD(__pdesc) \ 479#define GET_RX_DESC_MD(__pdesc) \
456 LE_BITS_TO_4BYTE(__pdesc+4, 26, 1) 480 SHIFT_AND_MASK_LE(__pdesc+4, 26, 1)
457#define GET_RX_DESC_MF(__pdesc) \ 481#define GET_RX_DESC_MF(__pdesc) \
458 LE_BITS_TO_4BYTE(__pdesc+4, 27, 1) 482 SHIFT_AND_MASK_LE(__pdesc+4, 27, 1)
459#define GET_RX_DESC_TYPE(__pdesc) \ 483#define GET_RX_DESC_TYPE(__pdesc) \
460 LE_BITS_TO_4BYTE(__pdesc+4, 28, 2) 484 SHIFT_AND_MASK_LE(__pdesc+4, 28, 2)
461#define GET_RX_DESC_MC(__pdesc) \ 485#define GET_RX_DESC_MC(__pdesc) \
462 LE_BITS_TO_4BYTE(__pdesc+4, 30, 1) 486 SHIFT_AND_MASK_LE(__pdesc+4, 30, 1)
463#define GET_RX_DESC_BC(__pdesc) \ 487#define GET_RX_DESC_BC(__pdesc) \
464 LE_BITS_TO_4BYTE(__pdesc+4, 31, 1) 488 SHIFT_AND_MASK_LE(__pdesc+4, 31, 1)
465#define GET_RX_DESC_SEQ(__pdesc) \ 489#define GET_RX_DESC_SEQ(__pdesc) \
466 LE_BITS_TO_4BYTE(__pdesc+8, 0, 12) 490 SHIFT_AND_MASK_LE(__pdesc+8, 0, 12)
467#define GET_RX_DESC_FRAG(__pdesc) \ 491#define GET_RX_DESC_FRAG(__pdesc) \
468 LE_BITS_TO_4BYTE(__pdesc+8, 12, 4) 492 SHIFT_AND_MASK_LE(__pdesc+8, 12, 4)
469#define GET_RX_DESC_NEXT_PKT_LEN(__pdesc) \ 493#define GET_RX_DESC_NEXT_PKT_LEN(__pdesc) \
470 LE_BITS_TO_4BYTE(__pdesc+8, 16, 14) 494 SHIFT_AND_MASK_LE(__pdesc+8, 16, 14)
471#define GET_RX_DESC_NEXT_IND(__pdesc) \ 495#define GET_RX_DESC_NEXT_IND(__pdesc) \
472 LE_BITS_TO_4BYTE(__pdesc+8, 30, 1) 496 SHIFT_AND_MASK_LE(__pdesc+8, 30, 1)
473#define GET_RX_DESC_RSVD(__pdesc) \ 497#define GET_RX_DESC_RSVD(__pdesc) \
474 LE_BITS_TO_4BYTE(__pdesc+8, 31, 1) 498 SHIFT_AND_MASK_LE(__pdesc+8, 31, 1)
475 499
476#define GET_RX_DESC_RXMCS(__pdesc) \ 500#define GET_RX_DESC_RXMCS(__pdesc) \
477 LE_BITS_TO_4BYTE(__pdesc+12, 0, 6) 501 SHIFT_AND_MASK_LE(__pdesc+12, 0, 6)
478#define GET_RX_DESC_RXHT(__pdesc) \ 502#define GET_RX_DESC_RXHT(__pdesc) \
479 LE_BITS_TO_4BYTE(__pdesc+12, 6, 1) 503 SHIFT_AND_MASK_LE(__pdesc+12, 6, 1)
480#define GET_RX_DESC_SPLCP(__pdesc) \ 504#define GET_RX_DESC_SPLCP(__pdesc) \
481 LE_BITS_TO_4BYTE(__pdesc+12, 8, 1) 505 SHIFT_AND_MASK_LE(__pdesc+12, 8, 1)
482#define GET_RX_DESC_BW(__pdesc) \ 506#define GET_RX_DESC_BW(__pdesc) \
483 LE_BITS_TO_4BYTE(__pdesc+12, 9, 1) 507 SHIFT_AND_MASK_LE(__pdesc+12, 9, 1)
484#define GET_RX_DESC_HTC(__pdesc) \ 508#define GET_RX_DESC_HTC(__pdesc) \
485 LE_BITS_TO_4BYTE(__pdesc+12, 10, 1) 509 SHIFT_AND_MASK_LE(__pdesc+12, 10, 1)
486#define GET_RX_DESC_HWPC_ERR(__pdesc) \ 510#define GET_RX_DESC_HWPC_ERR(__pdesc) \
487 LE_BITS_TO_4BYTE(__pdesc+12, 14, 1) 511 SHIFT_AND_MASK_LE(__pdesc+12, 14, 1)
488#define GET_RX_DESC_HWPC_IND(__pdesc) \ 512#define GET_RX_DESC_HWPC_IND(__pdesc) \
489 LE_BITS_TO_4BYTE(__pdesc+12, 15, 1) 513 SHIFT_AND_MASK_LE(__pdesc+12, 15, 1)
490#define GET_RX_DESC_IV0(__pdesc) \ 514#define GET_RX_DESC_IV0(__pdesc) \
491 LE_BITS_TO_4BYTE(__pdesc+12, 16, 16) 515 SHIFT_AND_MASK_LE(__pdesc+12, 16, 16)
492 516
493#define GET_RX_DESC_IV1(__pdesc) \ 517#define GET_RX_DESC_IV1(__pdesc) \
494 LE_BITS_TO_4BYTE(__pdesc+16, 0, 32) 518 SHIFT_AND_MASK_LE(__pdesc+16, 0, 32)
495#define GET_RX_DESC_TSFL(__pdesc) \ 519#define GET_RX_DESC_TSFL(__pdesc) \
496 LE_BITS_TO_4BYTE(__pdesc+20, 0, 32) 520 SHIFT_AND_MASK_LE(__pdesc+20, 0, 32)
497 521
498#define GET_RX_DESC_BUFF_ADDR(__pdesc) \ 522#define GET_RX_DESC_BUFF_ADDR(__pdesc) \
499 LE_BITS_TO_4BYTE(__pdesc+24, 0, 32) 523 SHIFT_AND_MASK_LE(__pdesc+24, 0, 32)
500#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \ 524#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \
501 LE_BITS_TO_4BYTE(__pdesc+28, 0, 32) 525 SHIFT_AND_MASK_LE(__pdesc+28, 0, 32)
502 526
503#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \ 527#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \
504 SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val) 528 SET_BITS_OFFSET_LE(__pdesc+24, 0, 32, __val)
505#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \ 529#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \
506 SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val) 530 SET_BITS_OFFSET_LE(__pdesc+28, 0, 32, __val)
507 531
508#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \ 532#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
509do { \ 533do { \
@@ -711,4 +735,6 @@ void rtl92ce_tx_polling(struct ieee80211_hw *hw, unsigned int hw_queue);
711void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, 735void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
712 bool b_firstseg, bool b_lastseg, 736 bool b_firstseg, bool b_lastseg,
713 struct sk_buff *skb); 737 struct sk_buff *skb);
738bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb);
739
714#endif 740#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/Makefile b/drivers/net/wireless/rtlwifi/rtl8192cu/Makefile
new file mode 100644
index 000000000000..ad2de6b839ef
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/Makefile
@@ -0,0 +1,14 @@
1rtl8192cu-objs := \
2 dm.o \
3 hw.o \
4 led.o \
5 mac.o \
6 phy.o \
7 rf.o \
8 sw.o \
9 table.o \
10 trx.o
11
12obj-$(CONFIG_RTL8192CU) += rtl8192cu.o
13
14ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/def.h b/drivers/net/wireless/rtlwifi/rtl8192cu/def.h
new file mode 100644
index 000000000000..c54940ea72fe
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/def.h
@@ -0,0 +1,62 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../rtl8192ce/def.h"
31
32/*-------------------------------------------------------------------------
33 * Chip specific
34 *-------------------------------------------------------------------------*/
35#define CHIP_8723 BIT(2) /* RTL8723 With BT feature */
36#define CHIP_8723_DRV_REV BIT(3) /* RTL8723 Driver Revised */
37#define NORMAL_CHIP BIT(4)
38#define CHIP_VENDOR_UMC BIT(5)
39#define CHIP_VENDOR_UMC_B_CUT BIT(6)
40
41#define IS_NORMAL_CHIP(version) \
42 (((version) & NORMAL_CHIP) ? true : false)
43
44#define IS_8723_SERIES(version) \
45 (((version) & CHIP_8723) ? true : false)
46
47#define IS_92C_1T2R(version) \
48 (((version) & CHIP_92C) && ((version) & CHIP_92C_1T2R))
49
50#define IS_VENDOR_UMC(version) \
51 (((version) & CHIP_VENDOR_UMC) ? true : false)
52
53#define IS_VENDOR_UMC_A_CUT(version) \
54 (((version) & CHIP_VENDOR_UMC) ? (((version) & (BIT(6) | BIT(7))) ? \
55 false : true) : false)
56
57#define IS_VENDOR_8723_A_CUT(version) \
58 (((version) & CHIP_VENDOR_UMC) ? (((version) & (BIT(6))) ? \
59 false : true) : false)
60
61#define CHIP_BONDING_92C_1T2R 0x1
62#define CHIP_BONDING_IDENTIFIER(_value) (((_value) >> 22) & 0x3)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
new file mode 100644
index 000000000000..f311baee668d
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
@@ -0,0 +1,113 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../base.h"
32#include "reg.h"
33#include "def.h"
34#include "phy.h"
35#include "dm.h"
36
37void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw)
38{
39 struct rtl_priv *rtlpriv = rtl_priv(hw);
40 struct rtl_phy *rtlphy = &(rtlpriv->phy);
41 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
42 long undecorated_smoothed_pwdb;
43
44 if (!rtlpriv->dm.dynamic_txpower_enable)
45 return;
46
47 if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) {
48 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
49 return;
50 }
51
52 if ((mac->link_state < MAC80211_LINKED) &&
53 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
54 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
55 ("Not connected to any\n"));
56
57 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
58
59 rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
60 return;
61 }
62
63 if (mac->link_state >= MAC80211_LINKED) {
64 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
65 undecorated_smoothed_pwdb =
66 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
67 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
68 ("AP Client PWDB = 0x%lx\n",
69 undecorated_smoothed_pwdb));
70 } else {
71 undecorated_smoothed_pwdb =
72 rtlpriv->dm.undecorated_smoothed_pwdb;
73 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
74 ("STA Default Port PWDB = 0x%lx\n",
75 undecorated_smoothed_pwdb));
76 }
77 } else {
78 undecorated_smoothed_pwdb =
79 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
80
81 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
82 ("AP Ext Port PWDB = 0x%lx\n",
83 undecorated_smoothed_pwdb));
84 }
85
86 if (undecorated_smoothed_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
87 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
88 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
89 ("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n"));
90 } else if ((undecorated_smoothed_pwdb <
91 (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
92 (undecorated_smoothed_pwdb >=
93 TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
94
95 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
96 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
97 ("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n"));
98 } else if (undecorated_smoothed_pwdb <
99 (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
100 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
101 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
102 ("TXHIGHPWRLEVEL_NORMAL\n"));
103 }
104
105 if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
106 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
107 ("PHY_SetTxPowerLevel8192S() Channel = %d\n",
108 rtlphy->current_channel));
109 rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
110 }
111
112 rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
113}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
new file mode 100644
index 000000000000..7f966c666b5a
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
@@ -0,0 +1,32 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../rtl8192ce/dm.h"
31
32void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
new file mode 100644
index 000000000000..9444e76838cf
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -0,0 +1,2504 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../efuse.h"
32#include "../base.h"
33#include "../cam.h"
34#include "../ps.h"
35#include "../usb.h"
36#include "reg.h"
37#include "def.h"
38#include "phy.h"
39#include "mac.h"
40#include "dm.h"
41#include "hw.h"
42#include "trx.h"
43#include "led.h"
44#include "table.h"
45
46static void _rtl92cu_phy_param_tab_init(struct ieee80211_hw *hw)
47{
48 struct rtl_priv *rtlpriv = rtl_priv(hw);
49 struct rtl_phy *rtlphy = &(rtlpriv->phy);
50 struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv);
51
52 rtlphy->hwparam_tables[MAC_REG].length = RTL8192CUMAC_2T_ARRAYLENGTH;
53 rtlphy->hwparam_tables[MAC_REG].pdata = RTL8192CUMAC_2T_ARRAY;
54 if (IS_HIGHT_PA(rtlefuse->board_type)) {
55 rtlphy->hwparam_tables[PHY_REG_PG].length =
56 RTL8192CUPHY_REG_Array_PG_HPLength;
57 rtlphy->hwparam_tables[PHY_REG_PG].pdata =
58 RTL8192CUPHY_REG_Array_PG_HP;
59 } else {
60 rtlphy->hwparam_tables[PHY_REG_PG].length =
61 RTL8192CUPHY_REG_ARRAY_PGLENGTH;
62 rtlphy->hwparam_tables[PHY_REG_PG].pdata =
63 RTL8192CUPHY_REG_ARRAY_PG;
64 }
65 /* 2T */
66 rtlphy->hwparam_tables[PHY_REG_2T].length =
67 RTL8192CUPHY_REG_2TARRAY_LENGTH;
68 rtlphy->hwparam_tables[PHY_REG_2T].pdata =
69 RTL8192CUPHY_REG_2TARRAY;
70 rtlphy->hwparam_tables[RADIOA_2T].length =
71 RTL8192CURADIOA_2TARRAYLENGTH;
72 rtlphy->hwparam_tables[RADIOA_2T].pdata =
73 RTL8192CURADIOA_2TARRAY;
74 rtlphy->hwparam_tables[RADIOB_2T].length =
75 RTL8192CURADIOB_2TARRAYLENGTH;
76 rtlphy->hwparam_tables[RADIOB_2T].pdata =
77 RTL8192CU_RADIOB_2TARRAY;
78 rtlphy->hwparam_tables[AGCTAB_2T].length =
79 RTL8192CUAGCTAB_2TARRAYLENGTH;
80 rtlphy->hwparam_tables[AGCTAB_2T].pdata =
81 RTL8192CUAGCTAB_2TARRAY;
82 /* 1T */
83 if (IS_HIGHT_PA(rtlefuse->board_type)) {
84 rtlphy->hwparam_tables[PHY_REG_1T].length =
85 RTL8192CUPHY_REG_1T_HPArrayLength;
86 rtlphy->hwparam_tables[PHY_REG_1T].pdata =
87 RTL8192CUPHY_REG_1T_HPArray;
88 rtlphy->hwparam_tables[RADIOA_1T].length =
89 RTL8192CURadioA_1T_HPArrayLength;
90 rtlphy->hwparam_tables[RADIOA_1T].pdata =
91 RTL8192CURadioA_1T_HPArray;
92 rtlphy->hwparam_tables[RADIOB_1T].length =
93 RTL8192CURADIOB_1TARRAYLENGTH;
94 rtlphy->hwparam_tables[RADIOB_1T].pdata =
95 RTL8192CU_RADIOB_1TARRAY;
96 rtlphy->hwparam_tables[AGCTAB_1T].length =
97 RTL8192CUAGCTAB_1T_HPArrayLength;
98 rtlphy->hwparam_tables[AGCTAB_1T].pdata =
99 Rtl8192CUAGCTAB_1T_HPArray;
100 } else {
101 rtlphy->hwparam_tables[PHY_REG_1T].length =
102 RTL8192CUPHY_REG_1TARRAY_LENGTH;
103 rtlphy->hwparam_tables[PHY_REG_1T].pdata =
104 RTL8192CUPHY_REG_1TARRAY;
105 rtlphy->hwparam_tables[RADIOA_1T].length =
106 RTL8192CURADIOA_1TARRAYLENGTH;
107 rtlphy->hwparam_tables[RADIOA_1T].pdata =
108 RTL8192CU_RADIOA_1TARRAY;
109 rtlphy->hwparam_tables[RADIOB_1T].length =
110 RTL8192CURADIOB_1TARRAYLENGTH;
111 rtlphy->hwparam_tables[RADIOB_1T].pdata =
112 RTL8192CU_RADIOB_1TARRAY;
113 rtlphy->hwparam_tables[AGCTAB_1T].length =
114 RTL8192CUAGCTAB_1TARRAYLENGTH;
115 rtlphy->hwparam_tables[AGCTAB_1T].pdata =
116 RTL8192CUAGCTAB_1TARRAY;
117 }
118}
119
120static void _rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
121 bool autoload_fail,
122 u8 *hwinfo)
123{
124 struct rtl_priv *rtlpriv = rtl_priv(hw);
125 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
126 u8 rf_path, index, tempval;
127 u16 i;
128
129 for (rf_path = 0; rf_path < 2; rf_path++) {
130 for (i = 0; i < 3; i++) {
131 if (!autoload_fail) {
132 rtlefuse->
133 eeprom_chnlarea_txpwr_cck[rf_path][i] =
134 hwinfo[EEPROM_TXPOWERCCK + rf_path * 3 + i];
135 rtlefuse->
136 eeprom_chnlarea_txpwr_ht40_1s[rf_path][i] =
137 hwinfo[EEPROM_TXPOWERHT40_1S + rf_path * 3 +
138 i];
139 } else {
140 rtlefuse->
141 eeprom_chnlarea_txpwr_cck[rf_path][i] =
142 EEPROM_DEFAULT_TXPOWERLEVEL;
143 rtlefuse->
144 eeprom_chnlarea_txpwr_ht40_1s[rf_path][i] =
145 EEPROM_DEFAULT_TXPOWERLEVEL;
146 }
147 }
148 }
149 for (i = 0; i < 3; i++) {
150 if (!autoload_fail)
151 tempval = hwinfo[EEPROM_TXPOWERHT40_2SDIFF + i];
152 else
153 tempval = EEPROM_DEFAULT_HT40_2SDIFF;
154 rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[RF90_PATH_A][i] =
155 (tempval & 0xf);
156 rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[RF90_PATH_B][i] =
157 ((tempval & 0xf0) >> 4);
158 }
159 for (rf_path = 0; rf_path < 2; rf_path++)
160 for (i = 0; i < 3; i++)
161 RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
162 ("RF(%d) EEPROM CCK Area(%d) = 0x%x\n", rf_path,
163 i, rtlefuse->
164 eeprom_chnlarea_txpwr_cck[rf_path][i]));
165 for (rf_path = 0; rf_path < 2; rf_path++)
166 for (i = 0; i < 3; i++)
167 RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
168 ("RF(%d) EEPROM HT40 1S Area(%d) = 0x%x\n",
169 rf_path, i,
170 rtlefuse->
171 eeprom_chnlarea_txpwr_ht40_1s[rf_path][i]));
172 for (rf_path = 0; rf_path < 2; rf_path++)
173 for (i = 0; i < 3; i++)
174 RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
175 ("RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n",
176 rf_path, i,
177 rtlefuse->
178 eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path]
179 [i]));
180 for (rf_path = 0; rf_path < 2; rf_path++) {
181 for (i = 0; i < 14; i++) {
182 index = _rtl92c_get_chnl_group((u8) i);
183 rtlefuse->txpwrlevel_cck[rf_path][i] =
184 rtlefuse->eeprom_chnlarea_txpwr_cck[rf_path][index];
185 rtlefuse->txpwrlevel_ht40_1s[rf_path][i] =
186 rtlefuse->
187 eeprom_chnlarea_txpwr_ht40_1s[rf_path][index];
188 if ((rtlefuse->
189 eeprom_chnlarea_txpwr_ht40_1s[rf_path][index] -
190 rtlefuse->
191 eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path][index])
192 > 0) {
193 rtlefuse->txpwrlevel_ht40_2s[rf_path][i] =
194 rtlefuse->
195 eeprom_chnlarea_txpwr_ht40_1s[rf_path]
196 [index] - rtlefuse->
197 eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path]
198 [index];
199 } else {
200 rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = 0;
201 }
202 }
203 for (i = 0; i < 14; i++) {
204 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
205 ("RF(%d)-Ch(%d) [CCK / HT40_1S / HT40_2S] = "
206 "[0x%x / 0x%x / 0x%x]\n", rf_path, i,
207 rtlefuse->txpwrlevel_cck[rf_path][i],
208 rtlefuse->txpwrlevel_ht40_1s[rf_path][i],
209 rtlefuse->txpwrlevel_ht40_2s[rf_path][i]));
210 }
211 }
212 for (i = 0; i < 3; i++) {
213 if (!autoload_fail) {
214 rtlefuse->eeprom_pwrlimit_ht40[i] =
215 hwinfo[EEPROM_TXPWR_GROUP + i];
216 rtlefuse->eeprom_pwrlimit_ht20[i] =
217 hwinfo[EEPROM_TXPWR_GROUP + 3 + i];
218 } else {
219 rtlefuse->eeprom_pwrlimit_ht40[i] = 0;
220 rtlefuse->eeprom_pwrlimit_ht20[i] = 0;
221 }
222 }
223 for (rf_path = 0; rf_path < 2; rf_path++) {
224 for (i = 0; i < 14; i++) {
225 index = _rtl92c_get_chnl_group((u8) i);
226 if (rf_path == RF90_PATH_A) {
227 rtlefuse->pwrgroup_ht20[rf_path][i] =
228 (rtlefuse->eeprom_pwrlimit_ht20[index]
229 & 0xf);
230 rtlefuse->pwrgroup_ht40[rf_path][i] =
231 (rtlefuse->eeprom_pwrlimit_ht40[index]
232 & 0xf);
233 } else if (rf_path == RF90_PATH_B) {
234 rtlefuse->pwrgroup_ht20[rf_path][i] =
235 ((rtlefuse->eeprom_pwrlimit_ht20[index]
236 & 0xf0) >> 4);
237 rtlefuse->pwrgroup_ht40[rf_path][i] =
238 ((rtlefuse->eeprom_pwrlimit_ht40[index]
239 & 0xf0) >> 4);
240 }
241 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
242 ("RF-%d pwrgroup_ht20[%d] = 0x%x\n",
243 rf_path, i,
244 rtlefuse->pwrgroup_ht20[rf_path][i]));
245 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
246 ("RF-%d pwrgroup_ht40[%d] = 0x%x\n",
247 rf_path, i,
248 rtlefuse->pwrgroup_ht40[rf_path][i]));
249 }
250 }
251 for (i = 0; i < 14; i++) {
252 index = _rtl92c_get_chnl_group((u8) i);
253 if (!autoload_fail)
254 tempval = hwinfo[EEPROM_TXPOWERHT20DIFF + index];
255 else
256 tempval = EEPROM_DEFAULT_HT20_DIFF;
257 rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] = (tempval & 0xF);
258 rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] =
259 ((tempval >> 4) & 0xF);
260 if (rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] & BIT(3))
261 rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] |= 0xF0;
262 if (rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] & BIT(3))
263 rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] |= 0xF0;
264 index = _rtl92c_get_chnl_group((u8) i);
265 if (!autoload_fail)
266 tempval = hwinfo[EEPROM_TXPOWER_OFDMDIFF + index];
267 else
268 tempval = EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF;
269 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i] = (tempval & 0xF);
270 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i] =
271 ((tempval >> 4) & 0xF);
272 }
273 rtlefuse->legacy_ht_txpowerdiff =
274 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][7];
275 for (i = 0; i < 14; i++)
276 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
277 ("RF-A Ht20 to HT40 Diff[%d] = 0x%x\n", i,
278 rtlefuse->txpwr_ht20diff[RF90_PATH_A][i]));
279 for (i = 0; i < 14; i++)
280 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
281 ("RF-A Legacy to Ht40 Diff[%d] = 0x%x\n", i,
282 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i]));
283 for (i = 0; i < 14; i++)
284 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
285 ("RF-B Ht20 to HT40 Diff[%d] = 0x%x\n", i,
286 rtlefuse->txpwr_ht20diff[RF90_PATH_B][i]));
287 for (i = 0; i < 14; i++)
288 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
289 ("RF-B Legacy to HT40 Diff[%d] = 0x%x\n", i,
290 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i]));
291 if (!autoload_fail)
292 rtlefuse->eeprom_regulatory = (hwinfo[RF_OPTION1] & 0x7);
293 else
294 rtlefuse->eeprom_regulatory = 0;
295 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
296 ("eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory));
297 if (!autoload_fail) {
298 rtlefuse->eeprom_tssi[RF90_PATH_A] = hwinfo[EEPROM_TSSI_A];
299 rtlefuse->eeprom_tssi[RF90_PATH_B] = hwinfo[EEPROM_TSSI_B];
300 } else {
301 rtlefuse->eeprom_tssi[RF90_PATH_A] = EEPROM_DEFAULT_TSSI;
302 rtlefuse->eeprom_tssi[RF90_PATH_B] = EEPROM_DEFAULT_TSSI;
303 }
304 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
305 ("TSSI_A = 0x%x, TSSI_B = 0x%x\n",
306 rtlefuse->eeprom_tssi[RF90_PATH_A],
307 rtlefuse->eeprom_tssi[RF90_PATH_B]));
308 if (!autoload_fail)
309 tempval = hwinfo[EEPROM_THERMAL_METER];
310 else
311 tempval = EEPROM_DEFAULT_THERMALMETER;
312 rtlefuse->eeprom_thermalmeter = (tempval & 0x1f);
313 if (rtlefuse->eeprom_thermalmeter < 0x06 ||
314 rtlefuse->eeprom_thermalmeter > 0x1c)
315 rtlefuse->eeprom_thermalmeter = 0x12;
316 if (rtlefuse->eeprom_thermalmeter == 0x1f || autoload_fail)
317 rtlefuse->apk_thermalmeterignore = true;
318 rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
319 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
320 ("thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter));
321}
322
323static void _rtl92cu_read_board_type(struct ieee80211_hw *hw, u8 *contents)
324{
325 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
326 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
327 u8 boardType;
328
329 if (IS_NORMAL_CHIP(rtlhal->version)) {
330 boardType = ((contents[EEPROM_RF_OPT1]) &
331 BOARD_TYPE_NORMAL_MASK) >> 5; /*bit[7:5]*/
332 } else {
333 boardType = contents[EEPROM_RF_OPT4];
334 boardType &= BOARD_TYPE_TEST_MASK;
335 }
336 rtlefuse->board_type = boardType;
337 if (IS_HIGHT_PA(rtlefuse->board_type))
338 rtlefuse->external_pa = 1;
339 printk(KERN_INFO "rtl8192cu: Board Type %x\n", rtlefuse->board_type);
340
341#ifdef CONFIG_ANTENNA_DIVERSITY
342 /* Antenna Diversity setting. */
343 if (registry_par->antdiv_cfg == 2) /* 2: From Efuse */
344 rtl_efuse->antenna_cfg = (contents[EEPROM_RF_OPT1]&0x18)>>3;
345 else
346 rtl_efuse->antenna_cfg = registry_par->antdiv_cfg; /* 0:OFF, */
347
348 printk(KERN_INFO "rtl8192cu: Antenna Config %x\n",
349 rtl_efuse->antenna_cfg);
350#endif
351}
352
353#ifdef CONFIG_BT_COEXIST
354static void _update_bt_param(_adapter *padapter)
355{
356 struct btcoexist_priv *pbtpriv = &(padapter->halpriv.bt_coexist);
357 struct registry_priv *registry_par = &padapter->registrypriv;
358 if (2 != registry_par->bt_iso) {
359 /* 0:Low, 1:High, 2:From Efuse */
360 pbtpriv->BT_Ant_isolation = registry_par->bt_iso;
361 }
362 if (registry_par->bt_sco == 1) {
363 /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter, 4.Busy,
364 * 5.OtherBusy */
365 pbtpriv->BT_Service = BT_OtherAction;
366 } else if (registry_par->bt_sco == 2) {
367 pbtpriv->BT_Service = BT_SCO;
368 } else if (registry_par->bt_sco == 4) {
369 pbtpriv->BT_Service = BT_Busy;
370 } else if (registry_par->bt_sco == 5) {
371 pbtpriv->BT_Service = BT_OtherBusy;
372 } else {
373 pbtpriv->BT_Service = BT_Idle;
374 }
375 pbtpriv->BT_Ampdu = registry_par->bt_ampdu;
376 pbtpriv->bCOBT = _TRUE;
377 pbtpriv->BtEdcaUL = 0;
378 pbtpriv->BtEdcaDL = 0;
379 pbtpriv->BtRssiState = 0xff;
380 pbtpriv->bInitSet = _FALSE;
381 pbtpriv->bBTBusyTraffic = _FALSE;
382 pbtpriv->bBTTrafficModeSet = _FALSE;
383 pbtpriv->bBTNonTrafficModeSet = _FALSE;
384 pbtpriv->CurrentState = 0;
385 pbtpriv->PreviousState = 0;
386 printk(KERN_INFO "rtl8192cu: BT Coexistance = %s\n",
387 (pbtpriv->BT_Coexist == _TRUE) ? "enable" : "disable");
388 if (pbtpriv->BT_Coexist) {
389 if (pbtpriv->BT_Ant_Num == Ant_x2)
390 printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
391 "Ant_Num = Antx2\n");
392 else if (pbtpriv->BT_Ant_Num == Ant_x1)
393 printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
394 "Ant_Num = Antx1\n");
395 switch (pbtpriv->BT_CoexistType) {
396 case BT_2Wire:
397 printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
398 "CoexistType = BT_2Wire\n");
399 break;
400 case BT_ISSC_3Wire:
401 printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
402 "CoexistType = BT_ISSC_3Wire\n");
403 break;
404 case BT_Accel:
405 printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
406 "CoexistType = BT_Accel\n");
407 break;
408 case BT_CSR_BC4:
409 printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
410 "CoexistType = BT_CSR_BC4\n");
411 break;
412 case BT_CSR_BC8:
413 printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
414 "CoexistType = BT_CSR_BC8\n");
415 break;
416 case BT_RTL8756:
417 printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
418 "CoexistType = BT_RTL8756\n");
419 break;
420 default:
421 printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
422 "CoexistType = Unknown\n");
423 break;
424 }
425 printk(KERN_INFO "rtl8192cu: BlueTooth BT_Ant_isolation = %d\n",
426 pbtpriv->BT_Ant_isolation);
427 switch (pbtpriv->BT_Service) {
428 case BT_OtherAction:
429 printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
430 "BT_OtherAction\n");
431 break;
432 case BT_SCO:
433 printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
434 "BT_SCO\n");
435 break;
436 case BT_Busy:
437 printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
438 "BT_Busy\n");
439 break;
440 case BT_OtherBusy:
441 printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
442 "BT_OtherBusy\n");
443 break;
444 default:
445 printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
446 "BT_Idle\n");
447 break;
448 }
449 printk(KERN_INFO "rtl8192cu: BT_RadioSharedType = 0x%x\n",
450 pbtpriv->BT_RadioSharedType);
451 }
452}
453
454#define GET_BT_COEXIST(priv) (&priv->bt_coexist)
455
456static void _rtl92cu_read_bluetooth_coexistInfo(struct ieee80211_hw *hw,
457 u8 *contents,
458 bool bautoloadfailed);
459{
460 HAL_DATA_TYPE *pHalData = GET_HAL_DATA(Adapter);
461 bool isNormal = IS_NORMAL_CHIP(pHalData->VersionID);
462 struct btcoexist_priv *pbtpriv = &pHalData->bt_coexist;
463 u8 rf_opt4;
464
465 _rtw_memset(pbtpriv, 0, sizeof(struct btcoexist_priv));
466 if (AutoloadFail) {
467 pbtpriv->BT_Coexist = _FALSE;
468 pbtpriv->BT_CoexistType = BT_2Wire;
469 pbtpriv->BT_Ant_Num = Ant_x2;
470 pbtpriv->BT_Ant_isolation = 0;
471 pbtpriv->BT_RadioSharedType = BT_Radio_Shared;
472 return;
473 }
474 if (isNormal) {
475 if (pHalData->BoardType == BOARD_USB_COMBO)
476 pbtpriv->BT_Coexist = _TRUE;
477 else
478 pbtpriv->BT_Coexist = ((PROMContent[EEPROM_RF_OPT3] &
479 0x20) >> 5); /* bit[5] */
480 rf_opt4 = PROMContent[EEPROM_RF_OPT4];
481 pbtpriv->BT_CoexistType = ((rf_opt4&0xe)>>1); /* bit [3:1] */
482 pbtpriv->BT_Ant_Num = (rf_opt4&0x1); /* bit [0] */
483 pbtpriv->BT_Ant_isolation = ((rf_opt4&0x10)>>4); /* bit [4] */
484 pbtpriv->BT_RadioSharedType = ((rf_opt4&0x20)>>5); /* bit [5] */
485 } else {
486 pbtpriv->BT_Coexist = (PROMContent[EEPROM_RF_OPT4] >> 4) ?
487 _TRUE : _FALSE;
488 }
489 _update_bt_param(Adapter);
490}
491#endif
492
493static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
494{
495 struct rtl_priv *rtlpriv = rtl_priv(hw);
496 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
497 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
498 u16 i, usvalue;
499 u8 hwinfo[HWSET_MAX_SIZE] = {0};
500 u16 eeprom_id;
501
502 if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
503 rtl_efuse_shadow_map_update(hw);
504 memcpy((void *)hwinfo,
505 (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
506 HWSET_MAX_SIZE);
507 } else if (rtlefuse->epromtype == EEPROM_93C46) {
508 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
509 ("RTL819X Not boot from eeprom, check it !!"));
510 }
511 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, ("MAP\n"),
512 hwinfo, HWSET_MAX_SIZE);
513 eeprom_id = *((u16 *)&hwinfo[0]);
514 if (eeprom_id != RTL8190_EEPROM_ID) {
515 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
516 ("EEPROM ID(%#x) is invalid!!\n", eeprom_id));
517 rtlefuse->autoload_failflag = true;
518 } else {
519 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
520 rtlefuse->autoload_failflag = false;
521 }
522 if (rtlefuse->autoload_failflag == true)
523 return;
524 for (i = 0; i < 6; i += 2) {
525 usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i];
526 *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue;
527 }
528 printk(KERN_INFO "rtl8192cu: MAC address: %pM\n", rtlefuse->dev_addr);
529 _rtl92cu_read_txpower_info_from_hwpg(hw,
530 rtlefuse->autoload_failflag, hwinfo);
531 rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
532 rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID];
533 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
534 (" VID = 0x%02x PID = 0x%02x\n",
535 rtlefuse->eeprom_vid, rtlefuse->eeprom_did));
536 rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
537 rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
538 rtlefuse->txpwr_fromeprom = true;
539 rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
540 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
541 ("EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid));
542 if (rtlhal->oem_id == RT_CID_DEFAULT) {
543 switch (rtlefuse->eeprom_oemid) {
544 case EEPROM_CID_DEFAULT:
545 if (rtlefuse->eeprom_did == 0x8176) {
546 if ((rtlefuse->eeprom_svid == 0x103C &&
547 rtlefuse->eeprom_smid == 0x1629))
548 rtlhal->oem_id = RT_CID_819x_HP;
549 else
550 rtlhal->oem_id = RT_CID_DEFAULT;
551 } else {
552 rtlhal->oem_id = RT_CID_DEFAULT;
553 }
554 break;
555 case EEPROM_CID_TOSHIBA:
556 rtlhal->oem_id = RT_CID_TOSHIBA;
557 break;
558 case EEPROM_CID_QMI:
559 rtlhal->oem_id = RT_CID_819x_QMI;
560 break;
561 case EEPROM_CID_WHQL:
562 default:
563 rtlhal->oem_id = RT_CID_DEFAULT;
564 break;
565 }
566 }
567 _rtl92cu_read_board_type(hw, hwinfo);
568#ifdef CONFIG_BT_COEXIST
569 _rtl92cu_read_bluetooth_coexistInfo(hw, hwinfo,
570 rtlefuse->autoload_failflag);
571#endif
572}
573
574static void _rtl92cu_hal_customized_behavior(struct ieee80211_hw *hw)
575{
576 struct rtl_priv *rtlpriv = rtl_priv(hw);
577 struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
578 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
579
580 switch (rtlhal->oem_id) {
581 case RT_CID_819x_HP:
582 usb_priv->ledctl.led_opendrain = true;
583 break;
584 case RT_CID_819x_Lenovo:
585 case RT_CID_DEFAULT:
586 case RT_CID_TOSHIBA:
587 case RT_CID_CCX:
588 case RT_CID_819x_Acer:
589 case RT_CID_WHQL:
590 default:
591 break;
592 }
593 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
594 ("RT Customized ID: 0x%02X\n", rtlhal->oem_id));
595}
596
597void rtl92cu_read_eeprom_info(struct ieee80211_hw *hw)
598{
599
600 struct rtl_priv *rtlpriv = rtl_priv(hw);
601 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
602 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
603 u8 tmp_u1b;
604
605 if (!IS_NORMAL_CHIP(rtlhal->version))
606 return;
607 tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
608 rtlefuse->epromtype = (tmp_u1b & EEPROMSEL) ?
609 EEPROM_93C46 : EEPROM_BOOT_EFUSE;
610 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Boot from %s\n",
611 (tmp_u1b & EEPROMSEL) ? "EERROM" : "EFUSE"));
612 rtlefuse->autoload_failflag = (tmp_u1b & EEPROM_EN) ? false : true;
613 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload %s\n",
614 (tmp_u1b & EEPROM_EN) ? "OK!!" : "ERR!!"));
615 _rtl92cu_read_adapter_info(hw);
616 _rtl92cu_hal_customized_behavior(hw);
617 return;
618}
619
620static int _rtl92cu_init_power_on(struct ieee80211_hw *hw)
621{
622 struct rtl_priv *rtlpriv = rtl_priv(hw);
623 int status = 0;
624 u16 value16;
625 u8 value8;
626 /* polling autoload done. */
627 u32 pollingCount = 0;
628
629 do {
630 if (rtl_read_byte(rtlpriv, REG_APS_FSMCO) & PFM_ALDN) {
631 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
632 ("Autoload Done!\n"));
633 break;
634 }
635 if (pollingCount++ > 100) {
636 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
637 ("Failed to polling REG_APS_FSMCO[PFM_ALDN]"
638 " done!\n"));
639 return -ENODEV;
640 }
641 } while (true);
642 /* 0. RSV_CTRL 0x1C[7:0] = 0 unlock ISO/CLK/Power control register */
643 rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0);
644 /* Power on when re-enter from IPS/Radio off/card disable */
645 /* enable SPS into PWM mode */
646 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
647 udelay(100);
648 value8 = rtl_read_byte(rtlpriv, REG_LDOV12D_CTRL);
649 if (0 == (value8 & LDV12_EN)) {
650 value8 |= LDV12_EN;
651 rtl_write_byte(rtlpriv, REG_LDOV12D_CTRL, value8);
652 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
653 (" power-on :REG_LDOV12D_CTRL Reg0x21:0x%02x.\n",
654 value8));
655 udelay(100);
656 value8 = rtl_read_byte(rtlpriv, REG_SYS_ISO_CTRL);
657 value8 &= ~ISO_MD2PP;
658 rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL, value8);
659 }
660 /* auto enable WLAN */
661 pollingCount = 0;
662 value16 = rtl_read_word(rtlpriv, REG_APS_FSMCO);
663 value16 |= APFM_ONMAC;
664 rtl_write_word(rtlpriv, REG_APS_FSMCO, value16);
665 do {
666 if (!(rtl_read_word(rtlpriv, REG_APS_FSMCO) & APFM_ONMAC)) {
667 printk(KERN_INFO "rtl8192cu: MAC auto ON okay!\n");
668 break;
669 }
670 if (pollingCount++ > 100) {
671 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
672 ("Failed to polling REG_APS_FSMCO[APFM_ONMAC]"
673 " done!\n"));
674 return -ENODEV;
675 }
676 } while (true);
677 /* Enable Radio ,GPIO ,and LED function */
678 rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x0812);
679 /* release RF digital isolation */
680 value16 = rtl_read_word(rtlpriv, REG_SYS_ISO_CTRL);
681 value16 &= ~ISO_DIOR;
682 rtl_write_word(rtlpriv, REG_SYS_ISO_CTRL, value16);
683 /* Reconsider when to do this operation after asking HWSD. */
684 pollingCount = 0;
685 rtl_write_byte(rtlpriv, REG_APSD_CTRL, (rtl_read_byte(rtlpriv,
686 REG_APSD_CTRL) & ~BIT(6)));
687 do {
688 pollingCount++;
689 } while ((pollingCount < 200) &&
690 (rtl_read_byte(rtlpriv, REG_APSD_CTRL) & BIT(7)));
691 /* Enable MAC DMA/WMAC/SCHEDULE/SEC block */
692 value16 = rtl_read_word(rtlpriv, REG_CR);
693 value16 |= (HCI_TXDMA_EN | HCI_RXDMA_EN | TXDMA_EN | RXDMA_EN |
694 PROTOCOL_EN | SCHEDULE_EN | MACTXEN | MACRXEN | ENSEC);
695 rtl_write_word(rtlpriv, REG_CR, value16);
696 return status;
697}
698
699static void _rtl92cu_init_queue_reserved_page(struct ieee80211_hw *hw,
700 bool wmm_enable,
701 u8 out_ep_num,
702 u8 queue_sel)
703{
704 struct rtl_priv *rtlpriv = rtl_priv(hw);
705 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
706 bool isChipN = IS_NORMAL_CHIP(rtlhal->version);
707 u32 outEPNum = (u32)out_ep_num;
708 u32 numHQ = 0;
709 u32 numLQ = 0;
710 u32 numNQ = 0;
711 u32 numPubQ;
712 u32 value32;
713 u8 value8;
714 u32 txQPageNum, txQPageUnit, txQRemainPage;
715
716 if (!wmm_enable) {
717 numPubQ = (isChipN) ? CHIP_B_PAGE_NUM_PUBQ :
718 CHIP_A_PAGE_NUM_PUBQ;
719 txQPageNum = TX_TOTAL_PAGE_NUMBER - numPubQ;
720
721 txQPageUnit = txQPageNum/outEPNum;
722 txQRemainPage = txQPageNum % outEPNum;
723 if (queue_sel & TX_SELE_HQ)
724 numHQ = txQPageUnit;
725 if (queue_sel & TX_SELE_LQ)
726 numLQ = txQPageUnit;
727 /* HIGH priority queue always present in the configuration of
728 * 2 out-ep. Remainder pages have assigned to High queue */
729 if ((outEPNum > 1) && (txQRemainPage))
730 numHQ += txQRemainPage;
731 /* NOTE: This step done before writting REG_RQPN. */
732 if (isChipN) {
733 if (queue_sel & TX_SELE_NQ)
734 numNQ = txQPageUnit;
735 value8 = (u8)_NPQ(numNQ);
736 rtl_write_byte(rtlpriv, REG_RQPN_NPQ, value8);
737 }
738 } else {
739 /* for WMM ,number of out-ep must more than or equal to 2! */
740 numPubQ = isChipN ? WMM_CHIP_B_PAGE_NUM_PUBQ :
741 WMM_CHIP_A_PAGE_NUM_PUBQ;
742 if (queue_sel & TX_SELE_HQ) {
743 numHQ = isChipN ? WMM_CHIP_B_PAGE_NUM_HPQ :
744 WMM_CHIP_A_PAGE_NUM_HPQ;
745 }
746 if (queue_sel & TX_SELE_LQ) {
747 numLQ = isChipN ? WMM_CHIP_B_PAGE_NUM_LPQ :
748 WMM_CHIP_A_PAGE_NUM_LPQ;
749 }
750 /* NOTE: This step done before writting REG_RQPN. */
751 if (isChipN) {
752 if (queue_sel & TX_SELE_NQ)
753 numNQ = WMM_CHIP_B_PAGE_NUM_NPQ;
754 value8 = (u8)_NPQ(numNQ);
755 rtl_write_byte(rtlpriv, REG_RQPN_NPQ, value8);
756 }
757 }
758 /* TX DMA */
759 value32 = _HPQ(numHQ) | _LPQ(numLQ) | _PUBQ(numPubQ) | LD_RQPN;
760 rtl_write_dword(rtlpriv, REG_RQPN, value32);
761}
762
763static void _rtl92c_init_trx_buffer(struct ieee80211_hw *hw, bool wmm_enable)
764{
765 struct rtl_priv *rtlpriv = rtl_priv(hw);
766 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
767 u8 txpktbuf_bndy;
768 u8 value8;
769
770 if (!wmm_enable)
771 txpktbuf_bndy = TX_PAGE_BOUNDARY;
772 else /* for WMM */
773 txpktbuf_bndy = (IS_NORMAL_CHIP(rtlhal->version))
774 ? WMM_CHIP_B_TX_PAGE_BOUNDARY
775 : WMM_CHIP_A_TX_PAGE_BOUNDARY;
776 rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy);
777 rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy);
778 rtl_write_byte(rtlpriv, REG_TXPKTBUF_WMAC_LBK_BF_HD, txpktbuf_bndy);
779 rtl_write_byte(rtlpriv, REG_TRXFF_BNDY, txpktbuf_bndy);
780 rtl_write_byte(rtlpriv, REG_TDECTRL+1, txpktbuf_bndy);
781 rtl_write_word(rtlpriv, (REG_TRXFF_BNDY + 2), 0x27FF);
782 value8 = _PSRX(RX_PAGE_SIZE_REG_VALUE) | _PSTX(PBP_128);
783 rtl_write_byte(rtlpriv, REG_PBP, value8);
784}
785
786static void _rtl92c_init_chipN_reg_priority(struct ieee80211_hw *hw, u16 beQ,
787 u16 bkQ, u16 viQ, u16 voQ,
788 u16 mgtQ, u16 hiQ)
789{
790 struct rtl_priv *rtlpriv = rtl_priv(hw);
791 u16 value16 = (rtl_read_word(rtlpriv, REG_TRXDMA_CTRL) & 0x7);
792
793 value16 |= _TXDMA_BEQ_MAP(beQ) | _TXDMA_BKQ_MAP(bkQ) |
794 _TXDMA_VIQ_MAP(viQ) | _TXDMA_VOQ_MAP(voQ) |
795 _TXDMA_MGQ_MAP(mgtQ) | _TXDMA_HIQ_MAP(hiQ);
796 rtl_write_word(rtlpriv, REG_TRXDMA_CTRL, value16);
797}
798
799static void _rtl92cu_init_chipN_one_out_ep_priority(struct ieee80211_hw *hw,
800 bool wmm_enable,
801 u8 queue_sel)
802{
803 u16 uninitialized_var(value);
804
805 switch (queue_sel) {
806 case TX_SELE_HQ:
807 value = QUEUE_HIGH;
808 break;
809 case TX_SELE_LQ:
810 value = QUEUE_LOW;
811 break;
812 case TX_SELE_NQ:
813 value = QUEUE_NORMAL;
814 break;
815 default:
816 WARN_ON(1); /* Shall not reach here! */
817 break;
818 }
819 _rtl92c_init_chipN_reg_priority(hw, value, value, value, value,
820 value, value);
821 printk(KERN_INFO "rtl8192cu: Tx queue select: 0x%02x\n", queue_sel);
822}
823
824static void _rtl92cu_init_chipN_two_out_ep_priority(struct ieee80211_hw *hw,
825 bool wmm_enable,
826 u8 queue_sel)
827{
828 u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
829 u16 uninitialized_var(valueHi);
830 u16 uninitialized_var(valueLow);
831
832 switch (queue_sel) {
833 case (TX_SELE_HQ | TX_SELE_LQ):
834 valueHi = QUEUE_HIGH;
835 valueLow = QUEUE_LOW;
836 break;
837 case (TX_SELE_NQ | TX_SELE_LQ):
838 valueHi = QUEUE_NORMAL;
839 valueLow = QUEUE_LOW;
840 break;
841 case (TX_SELE_HQ | TX_SELE_NQ):
842 valueHi = QUEUE_HIGH;
843 valueLow = QUEUE_NORMAL;
844 break;
845 default:
846 WARN_ON(1);
847 break;
848 }
849 if (!wmm_enable) {
850 beQ = valueLow;
851 bkQ = valueLow;
852 viQ = valueHi;
853 voQ = valueHi;
854 mgtQ = valueHi;
855 hiQ = valueHi;
856 } else {/* for WMM ,CONFIG_OUT_EP_WIFI_MODE */
857 beQ = valueHi;
858 bkQ = valueLow;
859 viQ = valueLow;
860 voQ = valueHi;
861 mgtQ = valueHi;
862 hiQ = valueHi;
863 }
864 _rtl92c_init_chipN_reg_priority(hw, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
865 printk(KERN_INFO "rtl8192cu: Tx queue select: 0x%02x\n", queue_sel);
866}
867
868static void _rtl92cu_init_chipN_three_out_ep_priority(struct ieee80211_hw *hw,
869 bool wmm_enable,
870 u8 queue_sel)
871{
872 u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
873 struct rtl_priv *rtlpriv = rtl_priv(hw);
874
875 if (!wmm_enable) { /* typical setting */
876 beQ = QUEUE_LOW;
877 bkQ = QUEUE_LOW;
878 viQ = QUEUE_NORMAL;
879 voQ = QUEUE_HIGH;
880 mgtQ = QUEUE_HIGH;
881 hiQ = QUEUE_HIGH;
882 } else { /* for WMM */
883 beQ = QUEUE_LOW;
884 bkQ = QUEUE_NORMAL;
885 viQ = QUEUE_NORMAL;
886 voQ = QUEUE_HIGH;
887 mgtQ = QUEUE_HIGH;
888 hiQ = QUEUE_HIGH;
889 }
890 _rtl92c_init_chipN_reg_priority(hw, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
891 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
892 ("Tx queue select :0x%02x..\n", queue_sel));
893}
894
895static void _rtl92cu_init_chipN_queue_priority(struct ieee80211_hw *hw,
896 bool wmm_enable,
897 u8 out_ep_num,
898 u8 queue_sel)
899{
900 switch (out_ep_num) {
901 case 1:
902 _rtl92cu_init_chipN_one_out_ep_priority(hw, wmm_enable,
903 queue_sel);
904 break;
905 case 2:
906 _rtl92cu_init_chipN_two_out_ep_priority(hw, wmm_enable,
907 queue_sel);
908 break;
909 case 3:
910 _rtl92cu_init_chipN_three_out_ep_priority(hw, wmm_enable,
911 queue_sel);
912 break;
913 default:
914 WARN_ON(1); /* Shall not reach here! */
915 break;
916 }
917}
918
919static void _rtl92cu_init_chipT_queue_priority(struct ieee80211_hw *hw,
920 bool wmm_enable,
921 u8 out_ep_num,
922 u8 queue_sel)
923{
924 u8 hq_sele;
925 struct rtl_priv *rtlpriv = rtl_priv(hw);
926
927 switch (out_ep_num) {
928 case 2: /* (TX_SELE_HQ|TX_SELE_LQ) */
929 if (!wmm_enable) /* typical setting */
930 hq_sele = HQSEL_VOQ | HQSEL_VIQ | HQSEL_MGTQ |
931 HQSEL_HIQ;
932 else /* for WMM */
933 hq_sele = HQSEL_VOQ | HQSEL_BEQ | HQSEL_MGTQ |
934 HQSEL_HIQ;
935 break;
936 case 1:
937 if (TX_SELE_LQ == queue_sel) {
938 /* map all endpoint to Low queue */
939 hq_sele = 0;
940 } else if (TX_SELE_HQ == queue_sel) {
941 /* map all endpoint to High queue */
942 hq_sele = HQSEL_VOQ | HQSEL_VIQ | HQSEL_BEQ |
943 HQSEL_BKQ | HQSEL_MGTQ | HQSEL_HIQ;
944 }
945 break;
946 default:
947 WARN_ON(1); /* Shall not reach here! */
948 break;
949 }
950 rtl_write_byte(rtlpriv, (REG_TRXDMA_CTRL+1), hq_sele);
951 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
952 ("Tx queue select :0x%02x..\n", hq_sele));
953}
954
955static void _rtl92cu_init_queue_priority(struct ieee80211_hw *hw,
956 bool wmm_enable,
957 u8 out_ep_num,
958 u8 queue_sel)
959{
960 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
961 if (IS_NORMAL_CHIP(rtlhal->version))
962 _rtl92cu_init_chipN_queue_priority(hw, wmm_enable, out_ep_num,
963 queue_sel);
964 else
965 _rtl92cu_init_chipT_queue_priority(hw, wmm_enable, out_ep_num,
966 queue_sel);
967}
968
969static void _rtl92cu_init_usb_aggregation(struct ieee80211_hw *hw)
970{
971}
972
973static void _rtl92cu_init_wmac_setting(struct ieee80211_hw *hw)
974{
975 u16 value16;
976
977 struct rtl_priv *rtlpriv = rtl_priv(hw);
978 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
979
980 mac->rx_conf = (RCR_APM | RCR_AM | RCR_ADF | RCR_AB | RCR_APP_FCS |
981 RCR_APP_ICV | RCR_AMF | RCR_HTC_LOC_CTRL |
982 RCR_APP_MIC | RCR_APP_PHYSTS | RCR_ACRC32);
983 rtl_write_dword(rtlpriv, REG_RCR, mac->rx_conf);
984 /* Accept all multicast address */
985 rtl_write_dword(rtlpriv, REG_MAR, 0xFFFFFFFF);
986 rtl_write_dword(rtlpriv, REG_MAR + 4, 0xFFFFFFFF);
987 /* Accept all management frames */
988 value16 = 0xFFFF;
989 rtl92c_set_mgt_filter(hw, value16);
990 /* Reject all control frame - default value is 0 */
991 rtl92c_set_ctrl_filter(hw, 0x0);
992 /* Accept all data frames */
993 value16 = 0xFFFF;
994 rtl92c_set_data_filter(hw, value16);
995}
996
997static int _rtl92cu_init_mac(struct ieee80211_hw *hw)
998{
999 struct rtl_priv *rtlpriv = rtl_priv(hw);
1000 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1001 struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
1002 struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
1003 int err = 0;
1004 u32 boundary = 0;
1005 u8 wmm_enable = false; /* TODO */
1006 u8 out_ep_nums = rtlusb->out_ep_nums;
1007 u8 queue_sel = rtlusb->out_queue_sel;
1008 err = _rtl92cu_init_power_on(hw);
1009
1010 if (err) {
1011 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1012 ("Failed to init power on!\n"));
1013 return err;
1014 }
1015 if (!wmm_enable) {
1016 boundary = TX_PAGE_BOUNDARY;
1017 } else { /* for WMM */
1018 boundary = (IS_NORMAL_CHIP(rtlhal->version))
1019 ? WMM_CHIP_B_TX_PAGE_BOUNDARY
1020 : WMM_CHIP_A_TX_PAGE_BOUNDARY;
1021 }
1022 if (false == rtl92c_init_llt_table(hw, boundary)) {
1023 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1024 ("Failed to init LLT Table!\n"));
1025 return -EINVAL;
1026 }
1027 _rtl92cu_init_queue_reserved_page(hw, wmm_enable, out_ep_nums,
1028 queue_sel);
1029 _rtl92c_init_trx_buffer(hw, wmm_enable);
1030 _rtl92cu_init_queue_priority(hw, wmm_enable, out_ep_nums,
1031 queue_sel);
1032 /* Get Rx PHY status in order to report RSSI and others. */
1033 rtl92c_init_driver_info_size(hw, RTL92C_DRIVER_INFO_SIZE);
1034 rtl92c_init_interrupt(hw);
1035 rtl92c_init_network_type(hw);
1036 _rtl92cu_init_wmac_setting(hw);
1037 rtl92c_init_adaptive_ctrl(hw);
1038 rtl92c_init_edca(hw);
1039 rtl92c_init_rate_fallback(hw);
1040 rtl92c_init_retry_function(hw);
1041 _rtl92cu_init_usb_aggregation(hw);
1042 rtlpriv->cfg->ops->set_bw_mode(hw, NL80211_CHAN_HT20);
1043 rtl92c_set_min_space(hw, IS_92C_SERIAL(rtlhal->version));
1044 rtl92c_init_beacon_parameters(hw, rtlhal->version);
1045 rtl92c_init_ampdu_aggregation(hw);
1046 rtl92c_init_beacon_max_error(hw, true);
1047 return err;
1048}
1049
1050void rtl92cu_enable_hw_security_config(struct ieee80211_hw *hw)
1051{
1052 struct rtl_priv *rtlpriv = rtl_priv(hw);
1053 u8 sec_reg_value = 0x0;
1054 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1055
1056 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1057 ("PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
1058 rtlpriv->sec.pairwise_enc_algorithm,
1059 rtlpriv->sec.group_enc_algorithm));
1060 if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
1061 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
1062 ("not open sw encryption\n"));
1063 return;
1064 }
1065 sec_reg_value = SCR_TxEncEnable | SCR_RxDecEnable;
1066 if (rtlpriv->sec.use_defaultkey) {
1067 sec_reg_value |= SCR_TxUseDK;
1068 sec_reg_value |= SCR_RxUseDK;
1069 }
1070 if (IS_NORMAL_CHIP(rtlhal->version))
1071 sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
1072 rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
1073 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
1074 ("The SECR-value %x\n", sec_reg_value));
1075 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
1076}
1077
1078static void _rtl92cu_hw_configure(struct ieee80211_hw *hw)
1079{
1080 struct rtl_priv *rtlpriv = rtl_priv(hw);
1081 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
1082
1083 /* To Fix MAC loopback mode fail. */
1084 rtl_write_byte(rtlpriv, REG_LDOHCI12_CTRL, 0x0f);
1085 rtl_write_byte(rtlpriv, 0x15, 0xe9);
1086 /* HW SEQ CTRL */
1087 /* set 0x0 to 0xFF by tynli. Default enable HW SEQ NUM. */
1088 rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, 0xFF);
1089 /* fixed USB interface interference issue */
1090 rtl_write_byte(rtlpriv, 0xfe40, 0xe0);
1091 rtl_write_byte(rtlpriv, 0xfe41, 0x8d);
1092 rtl_write_byte(rtlpriv, 0xfe42, 0x80);
1093 rtlusb->reg_bcn_ctrl_val = 0x18;
1094 rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8)rtlusb->reg_bcn_ctrl_val);
1095}
1096
1097static void _InitPABias(struct ieee80211_hw *hw)
1098{
1099 struct rtl_priv *rtlpriv = rtl_priv(hw);
1100 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1101 u8 pa_setting;
1102
1103 /* FIXED PA current issue */
1104 pa_setting = efuse_read_1byte(hw, 0x1FA);
1105 if (!(pa_setting & BIT(0))) {
1106 rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0FFFFF, 0x0F406);
1107 rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0FFFFF, 0x4F406);
1108 rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0FFFFF, 0x8F406);
1109 rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0FFFFF, 0xCF406);
1110 }
1111 if (!(pa_setting & BIT(1)) && IS_NORMAL_CHIP(rtlhal->version) &&
1112 IS_92C_SERIAL(rtlhal->version)) {
1113 rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0FFFFF, 0x0F406);
1114 rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0FFFFF, 0x4F406);
1115 rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0FFFFF, 0x8F406);
1116 rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0FFFFF, 0xCF406);
1117 }
1118 if (!(pa_setting & BIT(4))) {
1119 pa_setting = rtl_read_byte(rtlpriv, 0x16);
1120 pa_setting &= 0x0F;
1121 rtl_write_byte(rtlpriv, 0x16, pa_setting | 0x90);
1122 }
1123}
1124
1125static void _InitAntenna_Selection(struct ieee80211_hw *hw)
1126{
1127#ifdef CONFIG_ANTENNA_DIVERSITY
1128 struct rtl_priv *rtlpriv = rtl_priv(hw);
1129 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1130 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1131
1132 if (pHalData->AntDivCfg == 0)
1133 return;
1134
1135 if (rtlphy->rf_type == RF_1T1R) {
1136 rtl_write_dword(rtlpriv, REG_LEDCFG0,
1137 rtl_read_dword(rtlpriv,
1138 REG_LEDCFG0)|BIT(23));
1139 rtl_set_bbreg(hw, rFPGA0_XAB_RFPARAMETER, BIT(13), 0x01);
1140 if (rtl_get_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300) ==
1141 Antenna_A)
1142 pHalData->CurAntenna = Antenna_A;
1143 else
1144 pHalData->CurAntenna = Antenna_B;
1145 }
1146#endif
1147}
1148
1149static void _dump_registers(struct ieee80211_hw *hw)
1150{
1151}
1152
1153static void _update_mac_setting(struct ieee80211_hw *hw)
1154{
1155 struct rtl_priv *rtlpriv = rtl_priv(hw);
1156 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1157
1158 mac->rx_conf = rtl_read_dword(rtlpriv, REG_RCR);
1159 mac->rx_mgt_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP0);
1160 mac->rx_ctrl_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP1);
1161 mac->rx_data_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP2);
1162}
1163
1164int rtl92cu_hw_init(struct ieee80211_hw *hw)
1165{
1166 struct rtl_priv *rtlpriv = rtl_priv(hw);
1167 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1168 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1169 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1170 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1171 int err = 0;
1172 static bool iqk_initialized;
1173
1174 rtlhal->hw_type = HARDWARE_TYPE_RTL8192CU;
1175 err = _rtl92cu_init_mac(hw);
1176 if (err) {
1177 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("init mac failed!\n"));
1178 return err;
1179 }
1180 err = rtl92c_download_fw(hw);
1181 if (err) {
1182 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1183 ("Failed to download FW. Init HW without FW now..\n"));
1184 err = 1;
1185 rtlhal->fw_ready = false;
1186 return err;
1187 } else {
1188 rtlhal->fw_ready = true;
1189 }
1190 rtlhal->last_hmeboxnum = 0; /* h2c */
1191 _rtl92cu_phy_param_tab_init(hw);
1192 rtl92cu_phy_mac_config(hw);
1193 rtl92cu_phy_bb_config(hw);
1194 rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
1195 rtl92c_phy_rf_config(hw);
1196 if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
1197 !IS_92C_SERIAL(rtlhal->version)) {
1198 rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD, 0x30255);
1199 rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G2, MASKDWORD, 0x50a00);
1200 }
1201 rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0,
1202 RF_CHNLBW, RFREG_OFFSET_MASK);
1203 rtlphy->rfreg_chnlval[1] = rtl_get_rfreg(hw, (enum radio_path)1,
1204 RF_CHNLBW, RFREG_OFFSET_MASK);
1205 rtl92cu_bb_block_on(hw);
1206 rtl_cam_reset_all_entry(hw);
1207 rtl92cu_enable_hw_security_config(hw);
1208 ppsc->rfpwr_state = ERFON;
1209 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
1210 if (ppsc->rfpwr_state == ERFON) {
1211 rtl92c_phy_set_rfpath_switch(hw, 1);
1212 if (iqk_initialized) {
1213 rtl92c_phy_iq_calibrate(hw, false);
1214 } else {
1215 rtl92c_phy_iq_calibrate(hw, false);
1216 iqk_initialized = true;
1217 }
1218 rtl92c_dm_check_txpower_tracking(hw);
1219 rtl92c_phy_lc_calibrate(hw);
1220 }
1221 _rtl92cu_hw_configure(hw);
1222 _InitPABias(hw);
1223 _InitAntenna_Selection(hw);
1224 _update_mac_setting(hw);
1225 rtl92c_dm_init(hw);
1226 _dump_registers(hw);
1227 return err;
1228}
1229
1230static void _DisableRFAFEAndResetBB(struct ieee80211_hw *hw)
1231{
1232 struct rtl_priv *rtlpriv = rtl_priv(hw);
1233/**************************************
1234a. TXPAUSE 0x522[7:0] = 0xFF Pause MAC TX queue
1235b. RF path 0 offset 0x00 = 0x00 disable RF
1236c. APSD_CTRL 0x600[7:0] = 0x40
1237d. SYS_FUNC_EN 0x02[7:0] = 0x16 reset BB state machine
1238e. SYS_FUNC_EN 0x02[7:0] = 0x14 reset BB state machine
1239***************************************/
1240 u8 eRFPath = 0, value8 = 0;
1241 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
1242 rtl_set_rfreg(hw, (enum radio_path)eRFPath, 0x0, MASKBYTE0, 0x0);
1243
1244 value8 |= APSDOFF;
1245 rtl_write_byte(rtlpriv, REG_APSD_CTRL, value8); /*0x40*/
1246 value8 = 0;
1247 value8 |= (FEN_USBD | FEN_USBA | FEN_BB_GLB_RSTn);
1248 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, value8);/*0x16*/
1249 value8 &= (~FEN_BB_GLB_RSTn);
1250 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, value8); /*0x14*/
1251}
1252
1253static void _ResetDigitalProcedure1(struct ieee80211_hw *hw, bool bWithoutHWSM)
1254{
1255 struct rtl_priv *rtlpriv = rtl_priv(hw);
1256 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1257
1258 if (rtlhal->fw_version <= 0x20) {
1259 /*****************************
1260 f. MCUFWDL 0x80[7:0]=0 reset MCU ready status
1261 g. SYS_FUNC_EN 0x02[10]= 0 reset MCU reg, (8051 reset)
1262 h. SYS_FUNC_EN 0x02[15-12]= 5 reset MAC reg, DCORE
1263 i. SYS_FUNC_EN 0x02[10]= 1 enable MCU reg, (8051 enable)
1264 ******************************/
1265 u16 valu16 = 0;
1266
1267 rtl_write_byte(rtlpriv, REG_MCUFWDL, 0);
1268 valu16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
1269 rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, (valu16 &
1270 (~FEN_CPUEN))); /* reset MCU ,8051 */
1271 valu16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN)&0x0FFF;
1272 rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, (valu16 |
1273 (FEN_HWPDN|FEN_ELDR))); /* reset MAC */
1274 valu16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
1275 rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, (valu16 |
1276 FEN_CPUEN)); /* enable MCU ,8051 */
1277 } else {
1278 u8 retry_cnts = 0;
1279
1280 /* IF fw in RAM code, do reset */
1281 if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(1)) {
1282 /* reset MCU ready status */
1283 rtl_write_byte(rtlpriv, REG_MCUFWDL, 0);
1284 if (rtlhal->fw_ready) {
1285 /* 8051 reset by self */
1286 rtl_write_byte(rtlpriv, REG_HMETFR+3, 0x20);
1287 while ((retry_cnts++ < 100) &&
1288 (FEN_CPUEN & rtl_read_word(rtlpriv,
1289 REG_SYS_FUNC_EN))) {
1290 udelay(50);
1291 }
1292 if (retry_cnts >= 100) {
1293 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1294 ("#####=> 8051 reset failed!.."
1295 ".......................\n"););
1296 /* if 8051 reset fail, reset MAC. */
1297 rtl_write_byte(rtlpriv,
1298 REG_SYS_FUNC_EN + 1,
1299 0x50);
1300 udelay(100);
1301 }
1302 }
1303 }
1304 /* Reset MAC and Enable 8051 */
1305 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, 0x54);
1306 rtl_write_byte(rtlpriv, REG_MCUFWDL, 0);
1307 }
1308 if (bWithoutHWSM) {
1309 /*****************************
1310 Without HW auto state machine
1311 g.SYS_CLKR 0x08[15:0] = 0x30A3 disable MAC clock
1312 h.AFE_PLL_CTRL 0x28[7:0] = 0x80 disable AFE PLL
1313 i.AFE_XTAL_CTRL 0x24[15:0] = 0x880F gated AFE DIG_CLOCK
1314 j.SYS_ISu_CTRL 0x00[7:0] = 0xF9 isolated digital to PON
1315 ******************************/
1316 rtl_write_word(rtlpriv, REG_SYS_CLKR, 0x70A3);
1317 rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x80);
1318 rtl_write_word(rtlpriv, REG_AFE_XTAL_CTRL, 0x880F);
1319 rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL, 0xF9);
1320 }
1321}
1322
1323static void _ResetDigitalProcedure2(struct ieee80211_hw *hw)
1324{
1325 struct rtl_priv *rtlpriv = rtl_priv(hw);
1326/*****************************
1327k. SYS_FUNC_EN 0x03[7:0] = 0x44 disable ELDR runction
1328l. SYS_CLKR 0x08[15:0] = 0x3083 disable ELDR clock
1329m. SYS_ISO_CTRL 0x01[7:0] = 0x83 isolated ELDR to PON
1330******************************/
1331 rtl_write_word(rtlpriv, REG_SYS_CLKR, 0x70A3);
1332 rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL+1, 0x82);
1333}
1334
1335static void _DisableGPIO(struct ieee80211_hw *hw)
1336{
1337 struct rtl_priv *rtlpriv = rtl_priv(hw);
1338/***************************************
1339j. GPIO_PIN_CTRL 0x44[31:0]=0x000
1340k. Value = GPIO_PIN_CTRL[7:0]
1341l. GPIO_PIN_CTRL 0x44[31:0] = 0x00FF0000 | (value <<8); write ext PIN level
1342m. GPIO_MUXCFG 0x42 [15:0] = 0x0780
1343n. LEDCFG 0x4C[15:0] = 0x8080
1344***************************************/
1345 u8 value8;
1346 u16 value16;
1347 u32 value32;
1348
1349 /* 1. Disable GPIO[7:0] */
1350 rtl_write_word(rtlpriv, REG_GPIO_PIN_CTRL+2, 0x0000);
1351 value32 = rtl_read_dword(rtlpriv, REG_GPIO_PIN_CTRL) & 0xFFFF00FF;
1352 value8 = (u8) (value32&0x000000FF);
1353 value32 |= ((value8<<8) | 0x00FF0000);
1354 rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, value32);
1355 /* 2. Disable GPIO[10:8] */
1356 rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG+3, 0x00);
1357 value16 = rtl_read_word(rtlpriv, REG_GPIO_MUXCFG+2) & 0xFF0F;
1358 value8 = (u8) (value16&0x000F);
1359 value16 |= ((value8<<4) | 0x0780);
1360 rtl_write_word(rtlpriv, REG_GPIO_PIN_CTRL+2, value16);
1361 /* 3. Disable LED0 & 1 */
1362 rtl_write_word(rtlpriv, REG_LEDCFG0, 0x8080);
1363}
1364
1365static void _DisableAnalog(struct ieee80211_hw *hw, bool bWithoutHWSM)
1366{
1367 struct rtl_priv *rtlpriv = rtl_priv(hw);
1368 u16 value16 = 0;
1369 u8 value8 = 0;
1370
1371 if (bWithoutHWSM) {
1372 /*****************************
1373 n. LDOA15_CTRL 0x20[7:0] = 0x04 disable A15 power
1374 o. LDOV12D_CTRL 0x21[7:0] = 0x54 disable digital core power
1375 r. When driver call disable, the ASIC will turn off remaining
1376 clock automatically
1377 ******************************/
1378 rtl_write_byte(rtlpriv, REG_LDOA15_CTRL, 0x04);
1379 value8 = rtl_read_byte(rtlpriv, REG_LDOV12D_CTRL);
1380 value8 &= (~LDV12_EN);
1381 rtl_write_byte(rtlpriv, REG_LDOV12D_CTRL, value8);
1382 }
1383
1384/*****************************
1385h. SPS0_CTRL 0x11[7:0] = 0x23 enter PFM mode
1386i. APS_FSMCO 0x04[15:0] = 0x4802 set USB suspend
1387******************************/
1388 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x23);
1389 value16 |= (APDM_HOST | AFSM_HSUS | PFM_ALDN);
1390 rtl_write_word(rtlpriv, REG_APS_FSMCO, (u16)value16);
1391 rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0E);
1392}
1393
1394static void _CardDisableHWSM(struct ieee80211_hw *hw)
1395{
1396 /* ==== RF Off Sequence ==== */
1397 _DisableRFAFEAndResetBB(hw);
1398 /* ==== Reset digital sequence ====== */
1399 _ResetDigitalProcedure1(hw, false);
1400 /* ==== Pull GPIO PIN to balance level and LED control ====== */
1401 _DisableGPIO(hw);
1402 /* ==== Disable analog sequence === */
1403 _DisableAnalog(hw, false);
1404}
1405
1406static void _CardDisableWithoutHWSM(struct ieee80211_hw *hw)
1407{
1408 /*==== RF Off Sequence ==== */
1409 _DisableRFAFEAndResetBB(hw);
1410 /* ==== Reset digital sequence ====== */
1411 _ResetDigitalProcedure1(hw, true);
1412 /* ==== Pull GPIO PIN to balance level and LED control ====== */
1413 _DisableGPIO(hw);
1414 /* ==== Reset digital sequence ====== */
1415 _ResetDigitalProcedure2(hw);
1416 /* ==== Disable analog sequence === */
1417 _DisableAnalog(hw, true);
1418}
1419
1420static void _rtl92cu_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
1421 u8 set_bits, u8 clear_bits)
1422{
1423 struct rtl_priv *rtlpriv = rtl_priv(hw);
1424 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
1425
1426 rtlusb->reg_bcn_ctrl_val |= set_bits;
1427 rtlusb->reg_bcn_ctrl_val &= ~clear_bits;
1428 rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlusb->reg_bcn_ctrl_val);
1429}
1430
1431static void _rtl92cu_stop_tx_beacon(struct ieee80211_hw *hw)
1432{
1433 struct rtl_priv *rtlpriv = rtl_priv(hw);
1434 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1435 u8 tmp1byte = 0;
1436 if (IS_NORMAL_CHIP(rtlhal->version)) {
1437 tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
1438 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
1439 tmp1byte & (~BIT(6)));
1440 rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64);
1441 tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
1442 tmp1byte &= ~(BIT(0));
1443 rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
1444 } else {
1445 rtl_write_byte(rtlpriv, REG_TXPAUSE,
1446 rtl_read_byte(rtlpriv, REG_TXPAUSE) | BIT(6));
1447 }
1448}
1449
1450static void _rtl92cu_resume_tx_beacon(struct ieee80211_hw *hw)
1451{
1452 struct rtl_priv *rtlpriv = rtl_priv(hw);
1453 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1454 u8 tmp1byte = 0;
1455
1456 if (IS_NORMAL_CHIP(rtlhal->version)) {
1457 tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
1458 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
1459 tmp1byte | BIT(6));
1460 rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
1461 tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
1462 tmp1byte |= BIT(0);
1463 rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
1464 } else {
1465 rtl_write_byte(rtlpriv, REG_TXPAUSE,
1466 rtl_read_byte(rtlpriv, REG_TXPAUSE) & (~BIT(6)));
1467 }
1468}
1469
1470static void _rtl92cu_enable_bcn_sub_func(struct ieee80211_hw *hw)
1471{
1472 struct rtl_priv *rtlpriv = rtl_priv(hw);
1473 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1474
1475 if (IS_NORMAL_CHIP(rtlhal->version))
1476 _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(1));
1477 else
1478 _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
1479}
1480
1481static void _rtl92cu_disable_bcn_sub_func(struct ieee80211_hw *hw)
1482{
1483 struct rtl_priv *rtlpriv = rtl_priv(hw);
1484 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1485
1486 if (IS_NORMAL_CHIP(rtlhal->version))
1487 _rtl92cu_set_bcn_ctrl_reg(hw, BIT(1), 0);
1488 else
1489 _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
1490}
1491
1492static int _rtl92cu_set_media_status(struct ieee80211_hw *hw,
1493 enum nl80211_iftype type)
1494{
1495 struct rtl_priv *rtlpriv = rtl_priv(hw);
1496 u8 bt_msr = rtl_read_byte(rtlpriv, MSR);
1497 enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
1498
1499 bt_msr &= 0xfc;
1500 rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xFF);
1501 if (type == NL80211_IFTYPE_UNSPECIFIED || type ==
1502 NL80211_IFTYPE_STATION) {
1503 _rtl92cu_stop_tx_beacon(hw);
1504 _rtl92cu_enable_bcn_sub_func(hw);
1505 } else if (type == NL80211_IFTYPE_ADHOC || type == NL80211_IFTYPE_AP) {
1506 _rtl92cu_resume_tx_beacon(hw);
1507 _rtl92cu_disable_bcn_sub_func(hw);
1508 } else {
1509 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("Set HW_VAR_MEDIA_"
1510 "STATUS:No such media status(%x).\n", type));
1511 }
1512 switch (type) {
1513 case NL80211_IFTYPE_UNSPECIFIED:
1514 bt_msr |= MSR_NOLINK;
1515 ledaction = LED_CTL_LINK;
1516 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1517 ("Set Network type to NO LINK!\n"));
1518 break;
1519 case NL80211_IFTYPE_ADHOC:
1520 bt_msr |= MSR_ADHOC;
1521 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1522 ("Set Network type to Ad Hoc!\n"));
1523 break;
1524 case NL80211_IFTYPE_STATION:
1525 bt_msr |= MSR_INFRA;
1526 ledaction = LED_CTL_LINK;
1527 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1528 ("Set Network type to STA!\n"));
1529 break;
1530 case NL80211_IFTYPE_AP:
1531 bt_msr |= MSR_AP;
1532 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1533 ("Set Network type to AP!\n"));
1534 break;
1535 default:
1536 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1537 ("Network type %d not support!\n", type));
1538 goto error_out;
1539 }
1540 rtl_write_byte(rtlpriv, (MSR), bt_msr);
1541 rtlpriv->cfg->ops->led_control(hw, ledaction);
1542 if ((bt_msr & 0xfc) == MSR_AP)
1543 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
1544 else
1545 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
1546 return 0;
1547error_out:
1548 return 1;
1549}
1550
1551void rtl92cu_card_disable(struct ieee80211_hw *hw)
1552{
1553 struct rtl_priv *rtlpriv = rtl_priv(hw);
1554 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1555 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
1556 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1557 enum nl80211_iftype opmode;
1558
1559 mac->link_state = MAC80211_NOLINK;
1560 opmode = NL80211_IFTYPE_UNSPECIFIED;
1561 _rtl92cu_set_media_status(hw, opmode);
1562 rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
1563 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
1564 if (rtlusb->disableHWSM)
1565 _CardDisableHWSM(hw);
1566 else
1567 _CardDisableWithoutHWSM(hw);
1568}
1569
1570void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
1571{
1572 /* dummy routine needed for callback from rtl_op_configure_filter() */
1573}
1574
1575/*========================================================================== */
1576
1577static void _rtl92cu_set_check_bssid(struct ieee80211_hw *hw,
1578 enum nl80211_iftype type)
1579{
1580 struct rtl_priv *rtlpriv = rtl_priv(hw);
1581 u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
1582 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1583 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1584 u8 filterout_non_associated_bssid = false;
1585
1586 switch (type) {
1587 case NL80211_IFTYPE_ADHOC:
1588 case NL80211_IFTYPE_STATION:
1589 filterout_non_associated_bssid = true;
1590 break;
1591 case NL80211_IFTYPE_UNSPECIFIED:
1592 case NL80211_IFTYPE_AP:
1593 default:
1594 break;
1595 }
1596 if (filterout_non_associated_bssid == true) {
1597 if (IS_NORMAL_CHIP(rtlhal->version)) {
1598 switch (rtlphy->current_io_type) {
1599 case IO_CMD_RESUME_DM_BY_SCAN:
1600 reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
1601 rtlpriv->cfg->ops->set_hw_reg(hw,
1602 HW_VAR_RCR, (u8 *)(&reg_rcr));
1603 /* enable update TSF */
1604 _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
1605 break;
1606 case IO_CMD_PAUSE_DM_BY_SCAN:
1607 reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
1608 rtlpriv->cfg->ops->set_hw_reg(hw,
1609 HW_VAR_RCR, (u8 *)(&reg_rcr));
1610 /* disable update TSF */
1611 _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
1612 break;
1613 }
1614 } else {
1615 reg_rcr |= (RCR_CBSSID);
1616 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
1617 (u8 *)(&reg_rcr));
1618 _rtl92cu_set_bcn_ctrl_reg(hw, 0, (BIT(4)|BIT(5)));
1619 }
1620 } else if (filterout_non_associated_bssid == false) {
1621 if (IS_NORMAL_CHIP(rtlhal->version)) {
1622 reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
1623 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
1624 (u8 *)(&reg_rcr));
1625 _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
1626 } else {
1627 reg_rcr &= (~RCR_CBSSID);
1628 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
1629 (u8 *)(&reg_rcr));
1630 _rtl92cu_set_bcn_ctrl_reg(hw, (BIT(4)|BIT(5)), 0);
1631 }
1632 }
1633}
1634
1635int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
1636{
1637 if (_rtl92cu_set_media_status(hw, type))
1638 return -EOPNOTSUPP;
1639 _rtl92cu_set_check_bssid(hw, type);
1640 return 0;
1641}
1642
1643static void _InitBeaconParameters(struct ieee80211_hw *hw)
1644{
1645 struct rtl_priv *rtlpriv = rtl_priv(hw);
1646 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1647
1648 rtl_write_word(rtlpriv, REG_BCN_CTRL, 0x1010);
1649
1650 /* TODO: Remove these magic number */
1651 rtl_write_word(rtlpriv, REG_TBTT_PROHIBIT, 0x6404);
1652 rtl_write_byte(rtlpriv, REG_DRVERLYINT, DRIVER_EARLY_INT_TIME);
1653 rtl_write_byte(rtlpriv, REG_BCNDMATIM, BCN_DMA_ATIME_INT_TIME);
1654 /* Change beacon AIFS to the largest number
1655 * beacause test chip does not contension before sending beacon. */
1656 if (IS_NORMAL_CHIP(rtlhal->version))
1657 rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660F);
1658 else
1659 rtl_write_word(rtlpriv, REG_BCNTCFG, 0x66FF);
1660}
1661
1662static void _beacon_function_enable(struct ieee80211_hw *hw, bool Enable,
1663 bool Linked)
1664{
1665 struct rtl_priv *rtlpriv = rtl_priv(hw);
1666
1667 _rtl92cu_set_bcn_ctrl_reg(hw, (BIT(4) | BIT(3) | BIT(1)), 0x00);
1668 rtl_write_byte(rtlpriv, REG_RD_CTRL+1, 0x6F);
1669}
1670
1671void rtl92cu_set_beacon_related_registers(struct ieee80211_hw *hw)
1672{
1673
1674 struct rtl_priv *rtlpriv = rtl_priv(hw);
1675 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1676 u16 bcn_interval, atim_window;
1677 u32 value32;
1678
1679 bcn_interval = mac->beacon_interval;
1680 atim_window = 2; /*FIX MERGE */
1681 rtl_write_word(rtlpriv, REG_ATIMWND, atim_window);
1682 rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
1683 _InitBeaconParameters(hw);
1684 rtl_write_byte(rtlpriv, REG_SLOT, 0x09);
1685 /*
1686 * Force beacon frame transmission even after receiving beacon frame
1687 * from other ad hoc STA
1688 *
1689 *
1690 * Reset TSF Timer to zero, added by Roger. 2008.06.24
1691 */
1692 value32 = rtl_read_dword(rtlpriv, REG_TCR);
1693 value32 &= ~TSFRST;
1694 rtl_write_dword(rtlpriv, REG_TCR, value32);
1695 value32 |= TSFRST;
1696 rtl_write_dword(rtlpriv, REG_TCR, value32);
1697 RT_TRACE(rtlpriv, COMP_INIT|COMP_BEACON, DBG_LOUD,
1698 ("SetBeaconRelatedRegisters8192CUsb(): Set TCR(%x)\n",
1699 value32));
1700 /* TODO: Modify later (Find the right parameters)
1701 * NOTE: Fix test chip's bug (about contention windows's randomness) */
1702 if ((mac->opmode == NL80211_IFTYPE_ADHOC) ||
1703 (mac->opmode == NL80211_IFTYPE_AP)) {
1704 rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x50);
1705 rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x50);
1706 }
1707 _beacon_function_enable(hw, true, true);
1708}
1709
1710void rtl92cu_set_beacon_interval(struct ieee80211_hw *hw)
1711{
1712 struct rtl_priv *rtlpriv = rtl_priv(hw);
1713 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1714 u16 bcn_interval = mac->beacon_interval;
1715
1716 RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
1717 ("beacon_interval:%d\n", bcn_interval));
1718 rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
1719}
1720
1721void rtl92cu_update_interrupt_mask(struct ieee80211_hw *hw,
1722 u32 add_msr, u32 rm_msr)
1723{
1724}
1725
1726void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1727{
1728 struct rtl_priv *rtlpriv = rtl_priv(hw);
1729 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1730 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1731
1732 switch (variable) {
1733 case HW_VAR_RCR:
1734 *((u32 *)(val)) = mac->rx_conf;
1735 break;
1736 case HW_VAR_RF_STATE:
1737 *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state;
1738 break;
1739 case HW_VAR_FWLPS_RF_ON:{
1740 enum rf_pwrstate rfState;
1741 u32 val_rcr;
1742
1743 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE,
1744 (u8 *)(&rfState));
1745 if (rfState == ERFOFF) {
1746 *((bool *) (val)) = true;
1747 } else {
1748 val_rcr = rtl_read_dword(rtlpriv, REG_RCR);
1749 val_rcr &= 0x00070000;
1750 if (val_rcr)
1751 *((bool *) (val)) = false;
1752 else
1753 *((bool *) (val)) = true;
1754 }
1755 break;
1756 }
1757 case HW_VAR_FW_PSMODE_STATUS:
1758 *((bool *) (val)) = ppsc->fw_current_inpsmode;
1759 break;
1760 case HW_VAR_CORRECT_TSF:{
1761 u64 tsf;
1762 u32 *ptsf_low = (u32 *)&tsf;
1763 u32 *ptsf_high = ((u32 *)&tsf) + 1;
1764
1765 *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4));
1766 *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
1767 *((u64 *)(val)) = tsf;
1768 break;
1769 }
1770 case HW_VAR_MGT_FILTER:
1771 *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP0);
1772 break;
1773 case HW_VAR_CTRL_FILTER:
1774 *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP1);
1775 break;
1776 case HW_VAR_DATA_FILTER:
1777 *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP2);
1778 break;
1779 default:
1780 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1781 ("switch case not process\n"));
1782 break;
1783 }
1784}
1785
1786void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1787{
1788 struct rtl_priv *rtlpriv = rtl_priv(hw);
1789 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1790 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1791 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1792 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1793 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
1794 enum wireless_mode wirelessmode = mac->mode;
1795 u8 idx = 0;
1796
1797 switch (variable) {
1798 case HW_VAR_ETHER_ADDR:{
1799 for (idx = 0; idx < ETH_ALEN; idx++) {
1800 rtl_write_byte(rtlpriv, (REG_MACID + idx),
1801 val[idx]);
1802 }
1803 break;
1804 }
1805 case HW_VAR_BASIC_RATE:{
1806 u16 rate_cfg = ((u16 *) val)[0];
1807 u8 rate_index = 0;
1808
1809 rate_cfg &= 0x15f;
1810 /* TODO */
1811 /* if (mac->current_network.vender == HT_IOT_PEER_CISCO
1812 * && ((rate_cfg & 0x150) == 0)) {
1813 * rate_cfg |= 0x010;
1814 * } */
1815 rate_cfg |= 0x01;
1816 rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff);
1817 rtl_write_byte(rtlpriv, REG_RRSR + 1,
1818 (rate_cfg >> 8) & 0xff);
1819 while (rate_cfg > 0x1) {
1820 rate_cfg >>= 1;
1821 rate_index++;
1822 }
1823 rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL,
1824 rate_index);
1825 break;
1826 }
1827 case HW_VAR_BSSID:{
1828 for (idx = 0; idx < ETH_ALEN; idx++) {
1829 rtl_write_byte(rtlpriv, (REG_BSSID + idx),
1830 val[idx]);
1831 }
1832 break;
1833 }
1834 case HW_VAR_SIFS:{
1835 rtl_write_byte(rtlpriv, REG_SIFS_CCK + 1, val[0]);
1836 rtl_write_byte(rtlpriv, REG_SIFS_OFDM + 1, val[1]);
1837 rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]);
1838 rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]);
1839 rtl_write_byte(rtlpriv, REG_R2T_SIFS+1, val[0]);
1840 rtl_write_byte(rtlpriv, REG_T2T_SIFS+1, val[0]);
1841 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
1842 ("HW_VAR_SIFS\n"));
1843 break;
1844 }
1845 case HW_VAR_SLOT_TIME:{
1846 u8 e_aci;
1847 u8 QOS_MODE = 1;
1848
1849 rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
1850 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
1851 ("HW_VAR_SLOT_TIME %x\n", val[0]));
1852 if (QOS_MODE) {
1853 for (e_aci = 0; e_aci < AC_MAX; e_aci++)
1854 rtlpriv->cfg->ops->set_hw_reg(hw,
1855 HW_VAR_AC_PARAM,
1856 (u8 *)(&e_aci));
1857 } else {
1858 u8 sifstime = 0;
1859 u8 u1bAIFS;
1860
1861 if (IS_WIRELESS_MODE_A(wirelessmode) ||
1862 IS_WIRELESS_MODE_N_24G(wirelessmode) ||
1863 IS_WIRELESS_MODE_N_5G(wirelessmode))
1864 sifstime = 16;
1865 else
1866 sifstime = 10;
1867 u1bAIFS = sifstime + (2 * val[0]);
1868 rtl_write_byte(rtlpriv, REG_EDCA_VO_PARAM,
1869 u1bAIFS);
1870 rtl_write_byte(rtlpriv, REG_EDCA_VI_PARAM,
1871 u1bAIFS);
1872 rtl_write_byte(rtlpriv, REG_EDCA_BE_PARAM,
1873 u1bAIFS);
1874 rtl_write_byte(rtlpriv, REG_EDCA_BK_PARAM,
1875 u1bAIFS);
1876 }
1877 break;
1878 }
1879 case HW_VAR_ACK_PREAMBLE:{
1880 u8 reg_tmp;
1881 u8 short_preamble = (bool) (*(u8 *) val);
1882 reg_tmp = 0;
1883 if (short_preamble)
1884 reg_tmp |= 0x80;
1885 rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_tmp);
1886 break;
1887 }
1888 case HW_VAR_AMPDU_MIN_SPACE:{
1889 u8 min_spacing_to_set;
1890 u8 sec_min_space;
1891
1892 min_spacing_to_set = *((u8 *) val);
1893 if (min_spacing_to_set <= 7) {
1894 switch (rtlpriv->sec.pairwise_enc_algorithm) {
1895 case NO_ENCRYPTION:
1896 case AESCCMP_ENCRYPTION:
1897 sec_min_space = 0;
1898 break;
1899 case WEP40_ENCRYPTION:
1900 case WEP104_ENCRYPTION:
1901 case TKIP_ENCRYPTION:
1902 sec_min_space = 6;
1903 break;
1904 default:
1905 sec_min_space = 7;
1906 break;
1907 }
1908 if (min_spacing_to_set < sec_min_space)
1909 min_spacing_to_set = sec_min_space;
1910 mac->min_space_cfg = ((mac->min_space_cfg &
1911 0xf8) |
1912 min_spacing_to_set);
1913 *val = min_spacing_to_set;
1914 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
1915 ("Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
1916 mac->min_space_cfg));
1917 rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
1918 mac->min_space_cfg);
1919 }
1920 break;
1921 }
1922 case HW_VAR_SHORTGI_DENSITY:{
1923 u8 density_to_set;
1924
1925 density_to_set = *((u8 *) val);
1926 density_to_set &= 0x1f;
1927 mac->min_space_cfg &= 0x07;
1928 mac->min_space_cfg |= (density_to_set << 3);
1929 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
1930 ("Set HW_VAR_SHORTGI_DENSITY: %#x\n",
1931 mac->min_space_cfg));
1932 rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
1933 mac->min_space_cfg);
1934 break;
1935 }
1936 case HW_VAR_AMPDU_FACTOR:{
1937 u8 regtoset_normal[4] = {0x41, 0xa8, 0x72, 0xb9};
1938 u8 factor_toset;
1939 u8 *p_regtoset = NULL;
1940 u8 index = 0;
1941
1942 p_regtoset = regtoset_normal;
1943 factor_toset = *((u8 *) val);
1944 if (factor_toset <= 3) {
1945 factor_toset = (1 << (factor_toset + 2));
1946 if (factor_toset > 0xf)
1947 factor_toset = 0xf;
1948 for (index = 0; index < 4; index++) {
1949 if ((p_regtoset[index] & 0xf0) >
1950 (factor_toset << 4))
1951 p_regtoset[index] =
1952 (p_regtoset[index] & 0x0f)
1953 | (factor_toset << 4);
1954 if ((p_regtoset[index] & 0x0f) >
1955 factor_toset)
1956 p_regtoset[index] =
1957 (p_regtoset[index] & 0xf0)
1958 | (factor_toset);
1959 rtl_write_byte(rtlpriv,
1960 (REG_AGGLEN_LMT + index),
1961 p_regtoset[index]);
1962 }
1963 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
1964 ("Set HW_VAR_AMPDU_FACTOR: %#x\n",
1965 factor_toset));
1966 }
1967 break;
1968 }
1969 case HW_VAR_AC_PARAM:{
1970 u8 e_aci = *((u8 *) val);
1971 u32 u4b_ac_param;
1972 u16 cw_min = le16_to_cpu(mac->ac[e_aci].cw_min);
1973 u16 cw_max = le16_to_cpu(mac->ac[e_aci].cw_max);
1974 u16 tx_op = le16_to_cpu(mac->ac[e_aci].tx_op);
1975
1976 u4b_ac_param = (u32) mac->ac[e_aci].aifs;
1977 u4b_ac_param |= (u32) ((cw_min & 0xF) <<
1978 AC_PARAM_ECW_MIN_OFFSET);
1979 u4b_ac_param |= (u32) ((cw_max & 0xF) <<
1980 AC_PARAM_ECW_MAX_OFFSET);
1981 u4b_ac_param |= (u32) tx_op << AC_PARAM_TXOP_OFFSET;
1982 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
1983 ("queue:%x, ac_param:%x\n", e_aci,
1984 u4b_ac_param));
1985 switch (e_aci) {
1986 case AC1_BK:
1987 rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM,
1988 u4b_ac_param);
1989 break;
1990 case AC0_BE:
1991 rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM,
1992 u4b_ac_param);
1993 break;
1994 case AC2_VI:
1995 rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM,
1996 u4b_ac_param);
1997 break;
1998 case AC3_VO:
1999 rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM,
2000 u4b_ac_param);
2001 break;
2002 default:
2003 RT_ASSERT(false, ("SetHwReg8185(): invalid"
2004 " aci: %d !\n", e_aci));
2005 break;
2006 }
2007 if (rtlusb->acm_method != eAcmWay2_SW)
2008 rtlpriv->cfg->ops->set_hw_reg(hw,
2009 HW_VAR_ACM_CTRL, (u8 *)(&e_aci));
2010 break;
2011 }
2012 case HW_VAR_ACM_CTRL:{
2013 u8 e_aci = *((u8 *) val);
2014 union aci_aifsn *p_aci_aifsn = (union aci_aifsn *)
2015 (&(mac->ac[0].aifs));
2016 u8 acm = p_aci_aifsn->f.acm;
2017 u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL);
2018
2019 acm_ctrl =
2020 acm_ctrl | ((rtlusb->acm_method == 2) ? 0x0 : 0x1);
2021 if (acm) {
2022 switch (e_aci) {
2023 case AC0_BE:
2024 acm_ctrl |= AcmHw_BeqEn;
2025 break;
2026 case AC2_VI:
2027 acm_ctrl |= AcmHw_ViqEn;
2028 break;
2029 case AC3_VO:
2030 acm_ctrl |= AcmHw_VoqEn;
2031 break;
2032 default:
2033 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
2034 ("HW_VAR_ACM_CTRL acm set "
2035 "failed: eACI is %d\n", acm));
2036 break;
2037 }
2038 } else {
2039 switch (e_aci) {
2040 case AC0_BE:
2041 acm_ctrl &= (~AcmHw_BeqEn);
2042 break;
2043 case AC2_VI:
2044 acm_ctrl &= (~AcmHw_ViqEn);
2045 break;
2046 case AC3_VO:
2047 acm_ctrl &= (~AcmHw_BeqEn);
2048 break;
2049 default:
2050 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
2051 ("switch case not process\n"));
2052 break;
2053 }
2054 }
2055 RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
2056 ("SetHwReg8190pci(): [HW_VAR_ACM_CTRL] "
2057 "Write 0x%X\n", acm_ctrl));
2058 rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
2059 break;
2060 }
2061 case HW_VAR_RCR:{
2062 rtl_write_dword(rtlpriv, REG_RCR, ((u32 *) (val))[0]);
2063 mac->rx_conf = ((u32 *) (val))[0];
2064 RT_TRACE(rtlpriv, COMP_RECV, DBG_DMESG,
2065 ("### Set RCR(0x%08x) ###\n", mac->rx_conf));
2066 break;
2067 }
2068 case HW_VAR_RETRY_LIMIT:{
2069 u8 retry_limit = ((u8 *) (val))[0];
2070
2071 rtl_write_word(rtlpriv, REG_RL,
2072 retry_limit << RETRY_LIMIT_SHORT_SHIFT |
2073 retry_limit << RETRY_LIMIT_LONG_SHIFT);
2074 RT_TRACE(rtlpriv, COMP_MLME, DBG_DMESG, ("Set HW_VAR_R"
2075 "ETRY_LIMIT(0x%08x)\n", retry_limit));
2076 break;
2077 }
2078 case HW_VAR_DUAL_TSF_RST:
2079 rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
2080 break;
2081 case HW_VAR_EFUSE_BYTES:
2082 rtlefuse->efuse_usedbytes = *((u16 *) val);
2083 break;
2084 case HW_VAR_EFUSE_USAGE:
2085 rtlefuse->efuse_usedpercentage = *((u8 *) val);
2086 break;
2087 case HW_VAR_IO_CMD:
2088 rtl92c_phy_set_io_cmd(hw, (*(enum io_type *)val));
2089 break;
2090 case HW_VAR_WPA_CONFIG:
2091 rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val));
2092 break;
2093 case HW_VAR_SET_RPWM:{
2094 u8 rpwm_val = rtl_read_byte(rtlpriv, REG_USB_HRPWM);
2095
2096 if (rpwm_val & BIT(7))
2097 rtl_write_byte(rtlpriv, REG_USB_HRPWM,
2098 (*(u8 *)val));
2099 else
2100 rtl_write_byte(rtlpriv, REG_USB_HRPWM,
2101 ((*(u8 *)val) | BIT(7)));
2102 break;
2103 }
2104 case HW_VAR_H2C_FW_PWRMODE:{
2105 u8 psmode = (*(u8 *) val);
2106
2107 if ((psmode != FW_PS_ACTIVE_MODE) &&
2108 (!IS_92C_SERIAL(rtlhal->version)))
2109 rtl92c_dm_rf_saving(hw, true);
2110 rtl92c_set_fw_pwrmode_cmd(hw, (*(u8 *) val));
2111 break;
2112 }
2113 case HW_VAR_FW_PSMODE_STATUS:
2114 ppsc->fw_current_inpsmode = *((bool *) val);
2115 break;
2116 case HW_VAR_H2C_FW_JOINBSSRPT:{
2117 u8 mstatus = (*(u8 *) val);
2118 u8 tmp_reg422;
2119 bool recover = false;
2120
2121 if (mstatus == RT_MEDIA_CONNECT) {
2122 rtlpriv->cfg->ops->set_hw_reg(hw,
2123 HW_VAR_AID, NULL);
2124 rtl_write_byte(rtlpriv, REG_CR + 1, 0x03);
2125 _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(3));
2126 _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
2127 tmp_reg422 = rtl_read_byte(rtlpriv,
2128 REG_FWHW_TXQ_CTRL + 2);
2129 if (tmp_reg422 & BIT(6))
2130 recover = true;
2131 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
2132 tmp_reg422 & (~BIT(6)));
2133 rtl92c_set_fw_rsvdpagepkt(hw, 0);
2134 _rtl92cu_set_bcn_ctrl_reg(hw, BIT(3), 0);
2135 _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
2136 if (recover)
2137 rtl_write_byte(rtlpriv,
2138 REG_FWHW_TXQ_CTRL + 2,
2139 tmp_reg422 | BIT(6));
2140 rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
2141 }
2142 rtl92c_set_fw_joinbss_report_cmd(hw, (*(u8 *) val));
2143 break;
2144 }
2145 case HW_VAR_AID:{
2146 u16 u2btmp;
2147
2148 u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT);
2149 u2btmp &= 0xC000;
2150 rtl_write_word(rtlpriv, REG_BCN_PSR_RPT,
2151 (u2btmp | mac->assoc_id));
2152 break;
2153 }
2154 case HW_VAR_CORRECT_TSF:{
2155 u8 btype_ibss = ((u8 *) (val))[0];
2156
2157 if (btype_ibss == true)
2158 _rtl92cu_stop_tx_beacon(hw);
2159 _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(3));
2160 rtl_write_dword(rtlpriv, REG_TSFTR, (u32)(mac->tsf &
2161 0xffffffff));
2162 rtl_write_dword(rtlpriv, REG_TSFTR + 4,
2163 (u32)((mac->tsf >> 32) & 0xffffffff));
2164 _rtl92cu_set_bcn_ctrl_reg(hw, BIT(3), 0);
2165 if (btype_ibss == true)
2166 _rtl92cu_resume_tx_beacon(hw);
2167 break;
2168 }
2169 case HW_VAR_MGT_FILTER:
2170 rtl_write_word(rtlpriv, REG_RXFLTMAP0, *(u16 *)val);
2171 break;
2172 case HW_VAR_CTRL_FILTER:
2173 rtl_write_word(rtlpriv, REG_RXFLTMAP1, *(u16 *)val);
2174 break;
2175 case HW_VAR_DATA_FILTER:
2176 rtl_write_word(rtlpriv, REG_RXFLTMAP2, *(u16 *)val);
2177 break;
2178 default:
2179 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("switch case "
2180 "not process\n"));
2181 break;
2182 }
2183}
2184
2185void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw)
2186{
2187 struct rtl_priv *rtlpriv = rtl_priv(hw);
2188 struct rtl_phy *rtlphy = &(rtlpriv->phy);
2189 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
2190 u32 ratr_value = (u32) mac->basic_rates;
2191 u8 *mcsrate = mac->mcs;
2192 u8 ratr_index = 0;
2193 u8 nmode = mac->ht_enable;
2194 u8 mimo_ps = 1;
2195 u16 shortgi_rate = 0;
2196 u32 tmp_ratr_value = 0;
2197 u8 curtxbw_40mhz = mac->bw_40;
2198 u8 curshortgi_40mhz = mac->sgi_40;
2199 u8 curshortgi_20mhz = mac->sgi_20;
2200 enum wireless_mode wirelessmode = mac->mode;
2201
2202 ratr_value |= ((*(u16 *) (mcsrate))) << 12;
2203 switch (wirelessmode) {
2204 case WIRELESS_MODE_B:
2205 if (ratr_value & 0x0000000c)
2206 ratr_value &= 0x0000000d;
2207 else
2208 ratr_value &= 0x0000000f;
2209 break;
2210 case WIRELESS_MODE_G:
2211 ratr_value &= 0x00000FF5;
2212 break;
2213 case WIRELESS_MODE_N_24G:
2214 case WIRELESS_MODE_N_5G:
2215 nmode = 1;
2216 if (mimo_ps == 0) {
2217 ratr_value &= 0x0007F005;
2218 } else {
2219 u32 ratr_mask;
2220
2221 if (get_rf_type(rtlphy) == RF_1T2R ||
2222 get_rf_type(rtlphy) == RF_1T1R)
2223 ratr_mask = 0x000ff005;
2224 else
2225 ratr_mask = 0x0f0ff005;
2226 if (curtxbw_40mhz)
2227 ratr_mask |= 0x00000010;
2228 ratr_value &= ratr_mask;
2229 }
2230 break;
2231 default:
2232 if (rtlphy->rf_type == RF_1T2R)
2233 ratr_value &= 0x000ff0ff;
2234 else
2235 ratr_value &= 0x0f0ff0ff;
2236 break;
2237 }
2238 ratr_value &= 0x0FFFFFFF;
2239 if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) ||
2240 (!curtxbw_40mhz && curshortgi_20mhz))) {
2241 ratr_value |= 0x10000000;
2242 tmp_ratr_value = (ratr_value >> 12);
2243 for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) {
2244 if ((1 << shortgi_rate) & tmp_ratr_value)
2245 break;
2246 }
2247 shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) |
2248 (shortgi_rate << 4) | (shortgi_rate);
2249 }
2250 rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
2251 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("%x\n", rtl_read_dword(rtlpriv,
2252 REG_ARFR0)));
2253}
2254
2255void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
2256{
2257 struct rtl_priv *rtlpriv = rtl_priv(hw);
2258 struct rtl_phy *rtlphy = &(rtlpriv->phy);
2259 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
2260 u32 ratr_bitmap = (u32) mac->basic_rates;
2261 u8 *p_mcsrate = mac->mcs;
2262 u8 ratr_index = 0;
2263 u8 curtxbw_40mhz = mac->bw_40;
2264 u8 curshortgi_40mhz = mac->sgi_40;
2265 u8 curshortgi_20mhz = mac->sgi_20;
2266 enum wireless_mode wirelessmode = mac->mode;
2267 bool shortgi = false;
2268 u8 rate_mask[5];
2269 u8 macid = 0;
2270 u8 mimops = 1;
2271
2272 ratr_bitmap |= (p_mcsrate[1] << 20) | (p_mcsrate[0] << 12);
2273 switch (wirelessmode) {
2274 case WIRELESS_MODE_B:
2275 ratr_index = RATR_INX_WIRELESS_B;
2276 if (ratr_bitmap & 0x0000000c)
2277 ratr_bitmap &= 0x0000000d;
2278 else
2279 ratr_bitmap &= 0x0000000f;
2280 break;
2281 case WIRELESS_MODE_G:
2282 ratr_index = RATR_INX_WIRELESS_GB;
2283 if (rssi_level == 1)
2284 ratr_bitmap &= 0x00000f00;
2285 else if (rssi_level == 2)
2286 ratr_bitmap &= 0x00000ff0;
2287 else
2288 ratr_bitmap &= 0x00000ff5;
2289 break;
2290 case WIRELESS_MODE_A:
2291 ratr_index = RATR_INX_WIRELESS_A;
2292 ratr_bitmap &= 0x00000ff0;
2293 break;
2294 case WIRELESS_MODE_N_24G:
2295 case WIRELESS_MODE_N_5G:
2296 ratr_index = RATR_INX_WIRELESS_NGB;
2297 if (mimops == 0) {
2298 if (rssi_level == 1)
2299 ratr_bitmap &= 0x00070000;
2300 else if (rssi_level == 2)
2301 ratr_bitmap &= 0x0007f000;
2302 else
2303 ratr_bitmap &= 0x0007f005;
2304 } else {
2305 if (rtlphy->rf_type == RF_1T2R ||
2306 rtlphy->rf_type == RF_1T1R) {
2307 if (curtxbw_40mhz) {
2308 if (rssi_level == 1)
2309 ratr_bitmap &= 0x000f0000;
2310 else if (rssi_level == 2)
2311 ratr_bitmap &= 0x000ff000;
2312 else
2313 ratr_bitmap &= 0x000ff015;
2314 } else {
2315 if (rssi_level == 1)
2316 ratr_bitmap &= 0x000f0000;
2317 else if (rssi_level == 2)
2318 ratr_bitmap &= 0x000ff000;
2319 else
2320 ratr_bitmap &= 0x000ff005;
2321 }
2322 } else {
2323 if (curtxbw_40mhz) {
2324 if (rssi_level == 1)
2325 ratr_bitmap &= 0x0f0f0000;
2326 else if (rssi_level == 2)
2327 ratr_bitmap &= 0x0f0ff000;
2328 else
2329 ratr_bitmap &= 0x0f0ff015;
2330 } else {
2331 if (rssi_level == 1)
2332 ratr_bitmap &= 0x0f0f0000;
2333 else if (rssi_level == 2)
2334 ratr_bitmap &= 0x0f0ff000;
2335 else
2336 ratr_bitmap &= 0x0f0ff005;
2337 }
2338 }
2339 }
2340 if ((curtxbw_40mhz && curshortgi_40mhz) ||
2341 (!curtxbw_40mhz && curshortgi_20mhz)) {
2342 if (macid == 0)
2343 shortgi = true;
2344 else if (macid == 1)
2345 shortgi = false;
2346 }
2347 break;
2348 default:
2349 ratr_index = RATR_INX_WIRELESS_NGB;
2350 if (rtlphy->rf_type == RF_1T2R)
2351 ratr_bitmap &= 0x000ff0ff;
2352 else
2353 ratr_bitmap &= 0x0f0ff0ff;
2354 break;
2355 }
2356 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("ratr_bitmap :%x\n",
2357 ratr_bitmap));
2358 *(u32 *)&rate_mask = ((ratr_bitmap & 0x0fffffff) |
2359 ratr_index << 28);
2360 rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
2361 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("Rate_index:%x, "
2362 "ratr_val:%x, %x:%x:%x:%x:%x\n",
2363 ratr_index, ratr_bitmap,
2364 rate_mask[0], rate_mask[1],
2365 rate_mask[2], rate_mask[3],
2366 rate_mask[4]));
2367 rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask);
2368}
2369
2370void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw)
2371{
2372 struct rtl_priv *rtlpriv = rtl_priv(hw);
2373 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
2374 u16 sifs_timer;
2375
2376 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
2377 (u8 *)&mac->slot_time);
2378 if (!mac->ht_enable)
2379 sifs_timer = 0x0a0a;
2380 else
2381 sifs_timer = 0x0e0e;
2382 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer);
2383}
2384
2385bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
2386{
2387 struct rtl_priv *rtlpriv = rtl_priv(hw);
2388 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
2389 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
2390 enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
2391 u8 u1tmp = 0;
2392 bool actuallyset = false;
2393 unsigned long flag = 0;
2394 /* to do - usb autosuspend */
2395 u8 usb_autosuspend = 0;
2396
2397 if (ppsc->swrf_processing)
2398 return false;
2399 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
2400 if (ppsc->rfchange_inprogress) {
2401 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
2402 return false;
2403 } else {
2404 ppsc->rfchange_inprogress = true;
2405 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
2406 }
2407 cur_rfstate = ppsc->rfpwr_state;
2408 if (usb_autosuspend) {
2409 /* to do................... */
2410 } else {
2411 if (ppsc->pwrdown_mode) {
2412 u1tmp = rtl_read_byte(rtlpriv, REG_HSISR);
2413 e_rfpowerstate_toset = (u1tmp & BIT(7)) ?
2414 ERFOFF : ERFON;
2415 RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
2416 ("pwrdown, 0x5c(BIT7)=%02x\n", u1tmp));
2417 } else {
2418 rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG,
2419 rtl_read_byte(rtlpriv,
2420 REG_MAC_PINMUX_CFG) & ~(BIT(3)));
2421 u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL);
2422 e_rfpowerstate_toset = (u1tmp & BIT(3)) ?
2423 ERFON : ERFOFF;
2424 RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
2425 ("GPIO_IN=%02x\n", u1tmp));
2426 }
2427 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("N-SS RF =%x\n",
2428 e_rfpowerstate_toset));
2429 }
2430 if ((ppsc->hwradiooff) && (e_rfpowerstate_toset == ERFON)) {
2431 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("GPIOChangeRF - HW "
2432 "Radio ON, RF ON\n"));
2433 ppsc->hwradiooff = false;
2434 actuallyset = true;
2435 } else if ((!ppsc->hwradiooff) && (e_rfpowerstate_toset ==
2436 ERFOFF)) {
2437 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("GPIOChangeRF - HW"
2438 " Radio OFF\n"));
2439 ppsc->hwradiooff = true;
2440 actuallyset = true;
2441 } else {
2442 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD ,
2443 ("pHalData->bHwRadioOff and eRfPowerStateToSet do not"
2444 " match: pHalData->bHwRadioOff %x, eRfPowerStateToSet "
2445 "%x\n", ppsc->hwradiooff, e_rfpowerstate_toset));
2446 }
2447 if (actuallyset) {
2448 ppsc->hwradiooff = 1;
2449 if (e_rfpowerstate_toset == ERFON) {
2450 if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
2451 RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM))
2452 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
2453 else if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_PCI_D3)
2454 && RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_PCI_D3))
2455 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_PCI_D3);
2456 }
2457 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
2458 ppsc->rfchange_inprogress = false;
2459 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
2460 /* For power down module, we need to enable register block
2461 * contrl reg at 0x1c. Then enable power down control bit
2462 * of register 0x04 BIT4 and BIT15 as 1.
2463 */
2464 if (ppsc->pwrdown_mode && e_rfpowerstate_toset == ERFOFF) {
2465 /* Enable register area 0x0-0xc. */
2466 rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0);
2467 if (IS_HARDWARE_TYPE_8723U(rtlhal)) {
2468 /*
2469 * We should configure HW PDn source for WiFi
2470 * ONLY, and then our HW will be set in
2471 * power-down mode if PDn source from all
2472 * functions are configured.
2473 */
2474 u1tmp = rtl_read_byte(rtlpriv,
2475 REG_MULTI_FUNC_CTRL);
2476 rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL,
2477 (u1tmp|WL_HWPDN_EN));
2478 } else {
2479 rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x8812);
2480 }
2481 }
2482 if (e_rfpowerstate_toset == ERFOFF) {
2483 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM)
2484 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
2485 else if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_PCI_D3)
2486 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_PCI_D3);
2487 }
2488 } else if (e_rfpowerstate_toset == ERFOFF || cur_rfstate == ERFOFF) {
2489 /* Enter D3 or ASPM after GPIO had been done. */
2490 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM)
2491 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
2492 else if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_PCI_D3)
2493 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_PCI_D3);
2494 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
2495 ppsc->rfchange_inprogress = false;
2496 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
2497 } else {
2498 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
2499 ppsc->rfchange_inprogress = false;
2500 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
2501 }
2502 *valid = 1;
2503 return !ppsc->hwradiooff;
2504}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
new file mode 100644
index 000000000000..62af555bb61c
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
@@ -0,0 +1,116 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92CU_HW_H__
31#define __RTL92CU_HW_H__
32
33#define H2C_RA_MASK 6
34
35#define LLT_POLLING_LLT_THRESHOLD 20
36#define LLT_POLLING_READY_TIMEOUT_COUNT 100
37#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER 255
38
39#define RX_PAGE_SIZE_REG_VALUE PBP_128
40/* Note: We will divide number of page equally for each queue
41 * other than public queue! */
42#define TX_TOTAL_PAGE_NUMBER 0xF8
43#define TX_PAGE_BOUNDARY (TX_TOTAL_PAGE_NUMBER + 1)
44
45
46#define CHIP_B_PAGE_NUM_PUBQ 0xE7
47
48/* For Test Chip Setting
49 * (HPQ + LPQ + PUBQ) shall be TX_TOTAL_PAGE_NUMBER */
50#define CHIP_A_PAGE_NUM_PUBQ 0x7E
51
52
53/* For Chip A Setting */
54#define WMM_CHIP_A_TX_TOTAL_PAGE_NUMBER 0xF5
55#define WMM_CHIP_A_TX_PAGE_BOUNDARY \
56 (WMM_CHIP_A_TX_TOTAL_PAGE_NUMBER + 1) /* F6 */
57
58#define WMM_CHIP_A_PAGE_NUM_PUBQ 0xA3
59#define WMM_CHIP_A_PAGE_NUM_HPQ 0x29
60#define WMM_CHIP_A_PAGE_NUM_LPQ 0x29
61
62
63
64/* Note: For Chip B Setting ,modify later */
65#define WMM_CHIP_B_TX_TOTAL_PAGE_NUMBER 0xF5
66#define WMM_CHIP_B_TX_PAGE_BOUNDARY \
67 (WMM_CHIP_B_TX_TOTAL_PAGE_NUMBER + 1) /* F6 */
68
69#define WMM_CHIP_B_PAGE_NUM_PUBQ 0xB0
70#define WMM_CHIP_B_PAGE_NUM_HPQ 0x29
71#define WMM_CHIP_B_PAGE_NUM_LPQ 0x1C
72#define WMM_CHIP_B_PAGE_NUM_NPQ 0x1C
73
74#define BOARD_TYPE_NORMAL_MASK 0xE0
75#define BOARD_TYPE_TEST_MASK 0x0F
76
77/* should be renamed and moved to another file */
78enum _BOARD_TYPE_8192CUSB {
79 BOARD_USB_DONGLE = 0, /* USB dongle */
80 BOARD_USB_High_PA = 1, /* USB dongle - high power PA */
81 BOARD_MINICARD = 2, /* Minicard */
82 BOARD_USB_SOLO = 3, /* USB solo-Slim module */
83 BOARD_USB_COMBO = 4, /* USB Combo-Slim module */
84};
85
86#define IS_HIGHT_PA(boardtype) \
87 ((boardtype == BOARD_USB_High_PA) ? true : false)
88
89#define RTL92C_DRIVER_INFO_SIZE 4
90void rtl92cu_read_eeprom_info(struct ieee80211_hw *hw);
91void rtl92cu_enable_hw_security_config(struct ieee80211_hw *hw);
92int rtl92cu_hw_init(struct ieee80211_hw *hw);
93void rtl92cu_card_disable(struct ieee80211_hw *hw);
94int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type);
95void rtl92cu_set_beacon_related_registers(struct ieee80211_hw *hw);
96void rtl92cu_set_beacon_interval(struct ieee80211_hw *hw);
97void rtl92cu_update_interrupt_mask(struct ieee80211_hw *hw,
98 u32 add_msr, u32 rm_msr);
99void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
100void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
101void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw);
102void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level);
103
104void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw);
105bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid);
106void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
107u8 _rtl92c_get_chnl_group(u8 chnl);
108int rtl92c_download_fw(struct ieee80211_hw *hw);
109void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
110void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished);
111void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
112void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
113 u8 element_id, u32 cmd_len, u8 *p_cmdbuffer);
114bool rtl92cu_phy_mac_config(struct ieee80211_hw *hw);
115
116#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/led.c b/drivers/net/wireless/rtlwifi/rtl8192cu/led.c
new file mode 100644
index 000000000000..332c74348a69
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/led.c
@@ -0,0 +1,142 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 *****************************************************************************/
27
28#include "../wifi.h"
29#include "../usb.h"
30#include "reg.h"
31#include "led.h"
32
33static void _rtl92cu_init_led(struct ieee80211_hw *hw,
34 struct rtl_led *pled, enum rtl_led_pin ledpin)
35{
36 pled->hw = hw;
37 pled->ledpin = ledpin;
38 pled->ledon = false;
39}
40
41static void _rtl92cu_deInit_led(struct rtl_led *pled)
42{
43}
44
45void rtl92cu_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
46{
47 u8 ledcfg;
48 struct rtl_priv *rtlpriv = rtl_priv(hw);
49
50 RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
51 ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin));
52 ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
53 switch (pled->ledpin) {
54 case LED_PIN_GPIO0:
55 break;
56 case LED_PIN_LED0:
57 rtl_write_byte(rtlpriv,
58 REG_LEDCFG2, (ledcfg & 0xf0) | BIT(5) | BIT(6));
59 break;
60 case LED_PIN_LED1:
61 rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0x0f) | BIT(5));
62 break;
63 default:
64 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
65 ("switch case not process\n"));
66 break;
67 }
68 pled->ledon = true;
69}
70
71void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
72{
73 struct rtl_priv *rtlpriv = rtl_priv(hw);
74 struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw);
75 u8 ledcfg;
76
77 RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
78 ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin));
79 ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
80 switch (pled->ledpin) {
81 case LED_PIN_GPIO0:
82 break;
83 case LED_PIN_LED0:
84 ledcfg &= 0xf0;
85 if (usbpriv->ledctl.led_opendrain == true)
86 rtl_write_byte(rtlpriv, REG_LEDCFG2,
87 (ledcfg | BIT(1) | BIT(5) | BIT(6)));
88 else
89 rtl_write_byte(rtlpriv, REG_LEDCFG2,
90 (ledcfg | BIT(3) | BIT(5) | BIT(6)));
91 break;
92 case LED_PIN_LED1:
93 ledcfg &= 0x0f;
94 rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(3)));
95 break;
96 default:
97 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
98 ("switch case not process\n"));
99 break;
100 }
101 pled->ledon = false;
102}
103
104void rtl92cu_init_sw_leds(struct ieee80211_hw *hw)
105{
106 struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw);
107 _rtl92cu_init_led(hw, &(usbpriv->ledctl.sw_led0), LED_PIN_LED0);
108 _rtl92cu_init_led(hw, &(usbpriv->ledctl.sw_led1), LED_PIN_LED1);
109}
110
111void rtl92cu_deinit_sw_leds(struct ieee80211_hw *hw)
112{
113 struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw);
114 _rtl92cu_deInit_led(&(usbpriv->ledctl.sw_led0));
115 _rtl92cu_deInit_led(&(usbpriv->ledctl.sw_led1));
116}
117
118static void _rtl92cu_sw_led_control(struct ieee80211_hw *hw,
119 enum led_ctl_mode ledaction)
120{
121}
122
123void rtl92cu_led_control(struct ieee80211_hw *hw,
124 enum led_ctl_mode ledaction)
125{
126 struct rtl_priv *rtlpriv = rtl_priv(hw);
127 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
128
129 if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) &&
130 (ledaction == LED_CTL_TX ||
131 ledaction == LED_CTL_RX ||
132 ledaction == LED_CTL_SITE_SURVEY ||
133 ledaction == LED_CTL_LINK ||
134 ledaction == LED_CTL_NO_LINK ||
135 ledaction == LED_CTL_START_TO_LINK ||
136 ledaction == LED_CTL_POWER_ON)) {
137 return;
138 }
139 RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, ("ledaction %d,\n",
140 ledaction));
141 _rtl92cu_sw_led_control(hw, ledaction);
142}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/led.h b/drivers/net/wireless/rtlwifi/rtl8192cu/led.h
new file mode 100644
index 000000000000..decaee4d1eb1
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/led.h
@@ -0,0 +1,37 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 *****************************************************************************/
27
28#ifndef __RTL92CU_LED_H__
29#define __RTL92CU_LED_H__
30
31void rtl92cu_init_sw_leds(struct ieee80211_hw *hw);
32void rtl92cu_deinit_sw_leds(struct ieee80211_hw *hw);
33void rtl92cu_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled);
34void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled);
35void rtl92cu_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction);
36
37#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
new file mode 100644
index 000000000000..f8514cba17b6
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -0,0 +1,1144 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28****************************************************************************/
29#include <linux/module.h>
30
31#include "../wifi.h"
32#include "../pci.h"
33#include "../usb.h"
34#include "../ps.h"
35#include "../cam.h"
36#include "reg.h"
37#include "def.h"
38#include "phy.h"
39#include "rf.h"
40#include "dm.h"
41#include "mac.h"
42#include "trx.h"
43
44/* macro to shorten lines */
45
46#define LINK_Q ui_link_quality
47#define RX_EVM rx_evm_percentage
48#define RX_SIGQ rx_mimo_signalquality
49
50
51void rtl92c_read_chip_version(struct ieee80211_hw *hw)
52{
53 struct rtl_priv *rtlpriv = rtl_priv(hw);
54 struct rtl_phy *rtlphy = &(rtlpriv->phy);
55 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
56 enum version_8192c chip_version = VERSION_UNKNOWN;
57 u32 value32;
58
59 value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG);
60 if (value32 & TRP_VAUX_EN) {
61 chip_version = (value32 & TYPE_ID) ? VERSION_TEST_CHIP_92C :
62 VERSION_TEST_CHIP_88C;
63 } else {
64 /* Normal mass production chip. */
65 chip_version = NORMAL_CHIP;
66 chip_version |= ((value32 & TYPE_ID) ? CHIP_92C : 0);
67 chip_version |= ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0);
68 /* RTL8723 with BT function. */
69 chip_version |= ((value32 & BT_FUNC) ? CHIP_8723 : 0);
70 if (IS_VENDOR_UMC(chip_version))
71 chip_version |= ((value32 & CHIP_VER_RTL_MASK) ?
72 CHIP_VENDOR_UMC_B_CUT : 0);
73 if (IS_92C_SERIAL(chip_version)) {
74 value32 = rtl_read_dword(rtlpriv, REG_HPON_FSM);
75 chip_version |= ((CHIP_BONDING_IDENTIFIER(value32) ==
76 CHIP_BONDING_92C_1T2R) ? CHIP_92C_1T2R : 0);
77 } else if (IS_8723_SERIES(chip_version)) {
78 value32 = rtl_read_dword(rtlpriv, REG_GPIO_OUTSTS);
79 chip_version |= ((value32 & RF_RL_ID) ?
80 CHIP_8723_DRV_REV : 0);
81 }
82 }
83 rtlhal->version = (enum version_8192c)chip_version;
84 switch (rtlhal->version) {
85 case VERSION_NORMAL_TSMC_CHIP_92C_1T2R:
86 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
87 ("Chip Version ID: VERSION_B_CHIP_92C.\n"));
88 break;
89 case VERSION_NORMAL_TSMC_CHIP_92C:
90 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
91 ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_92C.\n"));
92 break;
93 case VERSION_NORMAL_TSMC_CHIP_88C:
94 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
95 ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_88C.\n"));
96 break;
97 case VERSION_NORMAL_UMC_CHIP_92C_1T2R_A_CUT:
98 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
99 ("Chip Version ID: VERSION_NORMAL_UMC_CHIP_i"
100 "92C_1T2R_A_CUT.\n"));
101 break;
102 case VERSION_NORMAL_UMC_CHIP_92C_A_CUT:
103 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
104 ("Chip Version ID: VERSION_NORMAL_UMC_CHIP_"
105 "92C_A_CUT.\n"));
106 break;
107 case VERSION_NORMAL_UMC_CHIP_88C_A_CUT:
108 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
109 ("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
110 "_88C_A_CUT.\n"));
111 break;
112 case VERSION_NORMAL_UMC_CHIP_92C_1T2R_B_CUT:
113 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
114 ("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
115 "_92C_1T2R_B_CUT.\n"));
116 break;
117 case VERSION_NORMAL_UMC_CHIP_92C_B_CUT:
118 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
119 ("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
120 "_92C_B_CUT.\n"));
121 break;
122 case VERSION_NORMAL_UMC_CHIP_88C_B_CUT:
123 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
124 ("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
125 "_88C_B_CUT.\n"));
126 break;
127 case VERSION_NORMA_UMC_CHIP_8723_1T1R_A_CUT:
128 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
129 ("Chip Version ID: VERSION_NORMA_UMC_CHIP"
130 "_8723_1T1R_A_CUT.\n"));
131 break;
132 case VERSION_NORMA_UMC_CHIP_8723_1T1R_B_CUT:
133 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
134 ("Chip Version ID: VERSION_NORMA_UMC_CHIP"
135 "_8723_1T1R_B_CUT.\n"));
136 break;
137 case VERSION_TEST_CHIP_92C:
138 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
139 ("Chip Version ID: VERSION_TEST_CHIP_92C.\n"));
140 break;
141 case VERSION_TEST_CHIP_88C:
142 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
143 ("Chip Version ID: VERSION_TEST_CHIP_88C.\n"));
144 break;
145 default:
146 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
147 ("Chip Version ID: ???????????????.\n"));
148 break;
149 }
150 if (IS_92C_SERIAL(rtlhal->version))
151 rtlphy->rf_type =
152 (IS_92C_1T2R(rtlhal->version)) ? RF_1T2R : RF_2T2R;
153 else
154 rtlphy->rf_type = RF_1T1R;
155 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
156 ("Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
157 "RF_2T2R" : "RF_1T1R"));
158 if (get_rf_type(rtlphy) == RF_1T1R)
159 rtlpriv->dm.rfpath_rxenable[0] = true;
160 else
161 rtlpriv->dm.rfpath_rxenable[0] =
162 rtlpriv->dm.rfpath_rxenable[1] = true;
163 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("VersionID = 0x%4x\n",
164 rtlhal->version));
165}
166
167/**
168 * writeLLT - LLT table write access
169 * @io: io callback
170 * @address: LLT logical address.
171 * @data: LLT data content
172 *
173 * Realtek hardware access function.
174 *
175 */
176bool rtl92c_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
177{
178 struct rtl_priv *rtlpriv = rtl_priv(hw);
179 bool status = true;
180 long count = 0;
181 u32 value = _LLT_INIT_ADDR(address) |
182 _LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS);
183
184 rtl_write_dword(rtlpriv, REG_LLT_INIT, value);
185 do {
186 value = rtl_read_dword(rtlpriv, REG_LLT_INIT);
187 if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
188 break;
189 if (count > POLLING_LLT_THRESHOLD) {
190 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
191 ("Failed to polling write LLT done at"
192 " address %d! _LLT_OP_VALUE(%x)\n",
193 address, _LLT_OP_VALUE(value)));
194 status = false;
195 break;
196 }
197 } while (++count);
198 return status;
199}
200/**
201 * rtl92c_init_LLT_table - Init LLT table
202 * @io: io callback
203 * @boundary:
204 *
205 * Realtek hardware access function.
206 *
207 */
208bool rtl92c_init_llt_table(struct ieee80211_hw *hw, u32 boundary)
209{
210 bool rst = true;
211 u32 i;
212
213 for (i = 0; i < (boundary - 1); i++) {
214 rst = rtl92c_llt_write(hw, i , i + 1);
215 if (true != rst) {
216 printk(KERN_ERR "===> %s #1 fail\n", __func__);
217 return rst;
218 }
219 }
220 /* end of list */
221 rst = rtl92c_llt_write(hw, (boundary - 1), 0xFF);
222 if (true != rst) {
223 printk(KERN_ERR "===> %s #2 fail\n", __func__);
224 return rst;
225 }
226 /* Make the other pages as ring buffer
227 * This ring buffer is used as beacon buffer if we config this MAC
228 * as two MAC transfer.
229 * Otherwise used as local loopback buffer.
230 */
231 for (i = boundary; i < LLT_LAST_ENTRY_OF_TX_PKT_BUFFER; i++) {
232 rst = rtl92c_llt_write(hw, i, (i + 1));
233 if (true != rst) {
234 printk(KERN_ERR "===> %s #3 fail\n", __func__);
235 return rst;
236 }
237 }
238 /* Let last entry point to the start entry of ring buffer */
239 rst = rtl92c_llt_write(hw, LLT_LAST_ENTRY_OF_TX_PKT_BUFFER, boundary);
240 if (true != rst) {
241 printk(KERN_ERR "===> %s #4 fail\n", __func__);
242 return rst;
243 }
244 return rst;
245}
246void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
247 u8 *p_macaddr, bool is_group, u8 enc_algo,
248 bool is_wepkey, bool clear_all)
249{
250 struct rtl_priv *rtlpriv = rtl_priv(hw);
251 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
252 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
253 u8 *macaddr = p_macaddr;
254 u32 entry_id = 0;
255 bool is_pairwise = false;
256 static u8 cam_const_addr[4][6] = {
257 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
258 {0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
259 {0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
260 {0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
261 };
262 static u8 cam_const_broad[] = {
263 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
264 };
265
266 if (clear_all) {
267 u8 idx = 0;
268 u8 cam_offset = 0;
269 u8 clear_number = 5;
270
271 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("clear_all\n"));
272 for (idx = 0; idx < clear_number; idx++) {
273 rtl_cam_mark_invalid(hw, cam_offset + idx);
274 rtl_cam_empty_entry(hw, cam_offset + idx);
275 if (idx < 5) {
276 memset(rtlpriv->sec.key_buf[idx], 0,
277 MAX_KEY_LEN);
278 rtlpriv->sec.key_len[idx] = 0;
279 }
280 }
281 } else {
282 switch (enc_algo) {
283 case WEP40_ENCRYPTION:
284 enc_algo = CAM_WEP40;
285 break;
286 case WEP104_ENCRYPTION:
287 enc_algo = CAM_WEP104;
288 break;
289 case TKIP_ENCRYPTION:
290 enc_algo = CAM_TKIP;
291 break;
292 case AESCCMP_ENCRYPTION:
293 enc_algo = CAM_AES;
294 break;
295 default:
296 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
297 ("iillegal switch case\n"));
298 enc_algo = CAM_TKIP;
299 break;
300 }
301 if (is_wepkey || rtlpriv->sec.use_defaultkey) {
302 macaddr = cam_const_addr[key_index];
303 entry_id = key_index;
304 } else {
305 if (is_group) {
306 macaddr = cam_const_broad;
307 entry_id = key_index;
308 } else {
309 key_index = PAIRWISE_KEYIDX;
310 entry_id = CAM_PAIRWISE_KEY_POSITION;
311 is_pairwise = true;
312 }
313 }
314 if (rtlpriv->sec.key_len[key_index] == 0) {
315 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
316 ("delete one entry\n"));
317 rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
318 } else {
319 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
320 ("The insert KEY length is %d\n",
321 rtlpriv->sec.key_len[PAIRWISE_KEYIDX]));
322 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
323 ("The insert KEY is %x %x\n",
324 rtlpriv->sec.key_buf[0][0],
325 rtlpriv->sec.key_buf[0][1]));
326 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
327 ("add one entry\n"));
328 if (is_pairwise) {
329 RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD,
330 "Pairwiase Key content :",
331 rtlpriv->sec.pairwise_key,
332 rtlpriv->sec.
333 key_len[PAIRWISE_KEYIDX]);
334 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
335 ("set Pairwiase key\n"));
336
337 rtl_cam_add_one_entry(hw, macaddr, key_index,
338 entry_id, enc_algo,
339 CAM_CONFIG_NO_USEDK,
340 rtlpriv->sec.
341 key_buf[key_index]);
342 } else {
343 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
344 ("set group key\n"));
345 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
346 rtl_cam_add_one_entry(hw,
347 rtlefuse->dev_addr,
348 PAIRWISE_KEYIDX,
349 CAM_PAIRWISE_KEY_POSITION,
350 enc_algo,
351 CAM_CONFIG_NO_USEDK,
352 rtlpriv->sec.key_buf
353 [entry_id]);
354 }
355 rtl_cam_add_one_entry(hw, macaddr, key_index,
356 entry_id, enc_algo,
357 CAM_CONFIG_NO_USEDK,
358 rtlpriv->sec.key_buf[entry_id]);
359 }
360 }
361 }
362}
363
364u32 rtl92c_get_txdma_status(struct ieee80211_hw *hw)
365{
366 struct rtl_priv *rtlpriv = rtl_priv(hw);
367
368 return rtl_read_dword(rtlpriv, REG_TXDMA_STATUS);
369}
370
371void rtl92c_enable_interrupt(struct ieee80211_hw *hw)
372{
373 struct rtl_priv *rtlpriv = rtl_priv(hw);
374 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
375 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
376 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
377
378 if (IS_HARDWARE_TYPE_8192CE(rtlhal)) {
379 rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] &
380 0xFFFFFFFF);
381 rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] &
382 0xFFFFFFFF);
383 rtlpci->irq_enabled = true;
384 } else {
385 rtl_write_dword(rtlpriv, REG_HIMR, rtlusb->irq_mask[0] &
386 0xFFFFFFFF);
387 rtl_write_dword(rtlpriv, REG_HIMRE, rtlusb->irq_mask[1] &
388 0xFFFFFFFF);
389 rtlusb->irq_enabled = true;
390 }
391}
392
393void rtl92c_init_interrupt(struct ieee80211_hw *hw)
394{
395 rtl92c_enable_interrupt(hw);
396}
397
398void rtl92c_disable_interrupt(struct ieee80211_hw *hw)
399{
400 struct rtl_priv *rtlpriv = rtl_priv(hw);
401 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
402 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
403 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
404
405 rtl_write_dword(rtlpriv, REG_HIMR, IMR8190_DISABLED);
406 rtl_write_dword(rtlpriv, REG_HIMRE, IMR8190_DISABLED);
407 if (IS_HARDWARE_TYPE_8192CE(rtlhal))
408 rtlpci->irq_enabled = false;
409 else if (IS_HARDWARE_TYPE_8192CU(rtlhal))
410 rtlusb->irq_enabled = false;
411}
412
413void rtl92c_set_qos(struct ieee80211_hw *hw, int aci)
414{
415 struct rtl_priv *rtlpriv = rtl_priv(hw);
416 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
417 u32 u4b_ac_param;
418
419 rtl92c_dm_init_edca_turbo(hw);
420 u4b_ac_param = (u32) mac->ac[aci].aifs;
421 u4b_ac_param |=
422 ((u32) le16_to_cpu(mac->ac[aci].cw_min) & 0xF) <<
423 AC_PARAM_ECW_MIN_OFFSET;
424 u4b_ac_param |=
425 ((u32) le16_to_cpu(mac->ac[aci].cw_max) & 0xF) <<
426 AC_PARAM_ECW_MAX_OFFSET;
427 u4b_ac_param |= (u32) le16_to_cpu(mac->ac[aci].tx_op) <<
428 AC_PARAM_TXOP_OFFSET;
429 RT_TRACE(rtlpriv, COMP_QOS, DBG_LOUD,
430 ("queue:%x, ac_param:%x\n", aci, u4b_ac_param));
431 switch (aci) {
432 case AC1_BK:
433 rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, u4b_ac_param);
434 break;
435 case AC0_BE:
436 rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4b_ac_param);
437 break;
438 case AC2_VI:
439 rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, u4b_ac_param);
440 break;
441 case AC3_VO:
442 rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, u4b_ac_param);
443 break;
444 default:
445 RT_ASSERT(false, ("invalid aci: %d !\n", aci));
446 break;
447 }
448}
449
450/*-------------------------------------------------------------------------
451 * HW MAC Address
452 *-------------------------------------------------------------------------*/
453void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr)
454{
455 u32 i;
456 struct rtl_priv *rtlpriv = rtl_priv(hw);
457
458 for (i = 0 ; i < ETH_ALEN ; i++)
459 rtl_write_byte(rtlpriv, (REG_MACID + i), *(addr+i));
460
461 RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, ("MAC Address: %02X-%02X-%02X-"
462 "%02X-%02X-%02X\n",
463 rtl_read_byte(rtlpriv, REG_MACID),
464 rtl_read_byte(rtlpriv, REG_MACID+1),
465 rtl_read_byte(rtlpriv, REG_MACID+2),
466 rtl_read_byte(rtlpriv, REG_MACID+3),
467 rtl_read_byte(rtlpriv, REG_MACID+4),
468 rtl_read_byte(rtlpriv, REG_MACID+5)));
469}
470
471void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size)
472{
473 struct rtl_priv *rtlpriv = rtl_priv(hw);
474 rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, size);
475}
476
477int rtl92c_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
478{
479 u8 value;
480 struct rtl_priv *rtlpriv = rtl_priv(hw);
481
482 switch (type) {
483 case NL80211_IFTYPE_UNSPECIFIED:
484 value = NT_NO_LINK;
485 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
486 ("Set Network type to NO LINK!\n"));
487 break;
488 case NL80211_IFTYPE_ADHOC:
489 value = NT_LINK_AD_HOC;
490 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
491 ("Set Network type to Ad Hoc!\n"));
492 break;
493 case NL80211_IFTYPE_STATION:
494 value = NT_LINK_AP;
495 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
496 ("Set Network type to STA!\n"));
497 break;
498 case NL80211_IFTYPE_AP:
499 value = NT_AS_AP;
500 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
501 ("Set Network type to AP!\n"));
502 break;
503 default:
504 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
505 ("Network type %d not support!\n", type));
506 return -EOPNOTSUPP;
507 }
508 rtl_write_byte(rtlpriv, (REG_CR + 2), value);
509 return 0;
510}
511
512void rtl92c_init_network_type(struct ieee80211_hw *hw)
513{
514 rtl92c_set_network_type(hw, NL80211_IFTYPE_UNSPECIFIED);
515}
516
517void rtl92c_init_adaptive_ctrl(struct ieee80211_hw *hw)
518{
519 u16 value16;
520 u32 value32;
521 struct rtl_priv *rtlpriv = rtl_priv(hw);
522
523 /* Response Rate Set */
524 value32 = rtl_read_dword(rtlpriv, REG_RRSR);
525 value32 &= ~RATE_BITMAP_ALL;
526 value32 |= RATE_RRSR_CCK_ONLY_1M;
527 rtl_write_dword(rtlpriv, REG_RRSR, value32);
528 /* SIFS (used in NAV) */
529 value16 = _SPEC_SIFS_CCK(0x10) | _SPEC_SIFS_OFDM(0x10);
530 rtl_write_word(rtlpriv, REG_SPEC_SIFS, value16);
531 /* Retry Limit */
532 value16 = _LRL(0x30) | _SRL(0x30);
533 rtl_write_dword(rtlpriv, REG_RL, value16);
534}
535
536void rtl92c_init_rate_fallback(struct ieee80211_hw *hw)
537{
538 struct rtl_priv *rtlpriv = rtl_priv(hw);
539
540 /* Set Data Auto Rate Fallback Retry Count register. */
541 rtl_write_dword(rtlpriv, REG_DARFRC, 0x00000000);
542 rtl_write_dword(rtlpriv, REG_DARFRC+4, 0x10080404);
543 rtl_write_dword(rtlpriv, REG_RARFRC, 0x04030201);
544 rtl_write_dword(rtlpriv, REG_RARFRC+4, 0x08070605);
545}
546
547static void rtl92c_set_cck_sifs(struct ieee80211_hw *hw, u8 trx_sifs,
548 u8 ctx_sifs)
549{
550 struct rtl_priv *rtlpriv = rtl_priv(hw);
551
552 rtl_write_byte(rtlpriv, REG_SIFS_CCK, trx_sifs);
553 rtl_write_byte(rtlpriv, (REG_SIFS_CCK + 1), ctx_sifs);
554}
555
556static void rtl92c_set_ofdm_sifs(struct ieee80211_hw *hw, u8 trx_sifs,
557 u8 ctx_sifs)
558{
559 struct rtl_priv *rtlpriv = rtl_priv(hw);
560
561 rtl_write_byte(rtlpriv, REG_SIFS_OFDM, trx_sifs);
562 rtl_write_byte(rtlpriv, (REG_SIFS_OFDM + 1), ctx_sifs);
563}
564
565void rtl92c_init_edca_param(struct ieee80211_hw *hw,
566 u16 queue, u16 txop, u8 cw_min, u8 cw_max, u8 aifs)
567{
568 /* sequence: VO, VI, BE, BK ==> the same as 92C hardware design.
569 * referenc : enum nl80211_txq_q or ieee80211_set_wmm_default function.
570 */
571 u32 value;
572 struct rtl_priv *rtlpriv = rtl_priv(hw);
573
574 value = (u32)aifs;
575 value |= ((u32)cw_min & 0xF) << 8;
576 value |= ((u32)cw_max & 0xF) << 12;
577 value |= (u32)txop << 16;
578 /* 92C hardware register sequence is the same as queue number. */
579 rtl_write_dword(rtlpriv, (REG_EDCA_VO_PARAM + (queue * 4)), value);
580}
581
582void rtl92c_init_edca(struct ieee80211_hw *hw)
583{
584 u16 value16;
585 struct rtl_priv *rtlpriv = rtl_priv(hw);
586
587 /* disable EDCCA count down, to reduce collison and retry */
588 value16 = rtl_read_word(rtlpriv, REG_RD_CTRL);
589 value16 |= DIS_EDCA_CNT_DWN;
590 rtl_write_word(rtlpriv, REG_RD_CTRL, value16);
591 /* Update SIFS timing. ??????????
592 * pHalData->SifsTime = 0x0e0e0a0a; */
593 rtl92c_set_cck_sifs(hw, 0xa, 0xa);
594 rtl92c_set_ofdm_sifs(hw, 0xe, 0xe);
595 /* Set CCK/OFDM SIFS to be 10us. */
596 rtl_write_word(rtlpriv, REG_SIFS_CCK, 0x0a0a);
597 rtl_write_word(rtlpriv, REG_SIFS_OFDM, 0x1010);
598 rtl_write_word(rtlpriv, REG_PROT_MODE_CTRL, 0x0204);
599 rtl_write_dword(rtlpriv, REG_BAR_MODE_CTRL, 0x014004);
600 /* TXOP */
601 rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, 0x005EA42B);
602 rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0x0000A44F);
603 rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x005EA324);
604 rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x002FA226);
605 /* PIFS */
606 rtl_write_byte(rtlpriv, REG_PIFS, 0x1C);
607 /* AGGR BREAK TIME Register */
608 rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16);
609 rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0040);
610 rtl_write_byte(rtlpriv, REG_BCNDMATIM, 0x02);
611 rtl_write_byte(rtlpriv, REG_ATIMWND, 0x02);
612}
613
614void rtl92c_init_ampdu_aggregation(struct ieee80211_hw *hw)
615{
616 struct rtl_priv *rtlpriv = rtl_priv(hw);
617
618 rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0x99997631);
619 rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16);
620 /* init AMPDU aggregation number, tuning for Tx's TP, */
621 rtl_write_word(rtlpriv, 0x4CA, 0x0708);
622}
623
624void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw, bool infra_mode)
625{
626 struct rtl_priv *rtlpriv = rtl_priv(hw);
627
628 rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xFF);
629}
630
631void rtl92c_init_rdg_setting(struct ieee80211_hw *hw)
632{
633 struct rtl_priv *rtlpriv = rtl_priv(hw);
634
635 rtl_write_byte(rtlpriv, REG_RD_CTRL, 0xFF);
636 rtl_write_word(rtlpriv, REG_RD_NAV_NXT, 0x200);
637 rtl_write_byte(rtlpriv, REG_RD_RESP_PKT_TH, 0x05);
638}
639
640void rtl92c_init_retry_function(struct ieee80211_hw *hw)
641{
642 u8 value8;
643 struct rtl_priv *rtlpriv = rtl_priv(hw);
644
645 value8 = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL);
646 value8 |= EN_AMPDU_RTY_NEW;
647 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL, value8);
648 /* Set ACK timeout */
649 rtl_write_byte(rtlpriv, REG_ACKTO, 0x40);
650}
651
652void rtl92c_init_beacon_parameters(struct ieee80211_hw *hw,
653 enum version_8192c version)
654{
655 struct rtl_priv *rtlpriv = rtl_priv(hw);
656 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
657
658 rtl_write_word(rtlpriv, REG_TBTT_PROHIBIT, 0x6404);/* ms */
659 rtl_write_byte(rtlpriv, REG_DRVERLYINT, DRIVER_EARLY_INT_TIME);/*ms*/
660 rtl_write_byte(rtlpriv, REG_BCNDMATIM, BCN_DMA_ATIME_INT_TIME);
661 if (IS_NORMAL_CHIP(rtlhal->version))
662 rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660F);
663 else
664 rtl_write_word(rtlpriv, REG_BCNTCFG, 0x66FF);
665}
666
667void rtl92c_disable_fast_edca(struct ieee80211_hw *hw)
668{
669 struct rtl_priv *rtlpriv = rtl_priv(hw);
670
671 rtl_write_word(rtlpriv, REG_FAST_EDCA_CTRL, 0);
672}
673
674void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T)
675{
676 struct rtl_priv *rtlpriv = rtl_priv(hw);
677 u8 value = is2T ? MAX_MSS_DENSITY_2T : MAX_MSS_DENSITY_1T;
678
679 rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, value);
680}
681
682u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw)
683{
684 struct rtl_priv *rtlpriv = rtl_priv(hw);
685
686 return rtl_read_word(rtlpriv, REG_RXFLTMAP0);
687}
688
689void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter)
690{
691 struct rtl_priv *rtlpriv = rtl_priv(hw);
692
693 rtl_write_word(rtlpriv, REG_RXFLTMAP0, filter);
694}
695
696u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw)
697{
698 struct rtl_priv *rtlpriv = rtl_priv(hw);
699
700 return rtl_read_word(rtlpriv, REG_RXFLTMAP1);
701}
702
703void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter)
704{
705 struct rtl_priv *rtlpriv = rtl_priv(hw);
706
707 rtl_write_word(rtlpriv, REG_RXFLTMAP1, filter);
708}
709
710u16 rtl92c_get_data_filter(struct ieee80211_hw *hw)
711{
712 struct rtl_priv *rtlpriv = rtl_priv(hw);
713
714 return rtl_read_word(rtlpriv, REG_RXFLTMAP2);
715}
716
717void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter)
718{
719 struct rtl_priv *rtlpriv = rtl_priv(hw);
720
721 rtl_write_word(rtlpriv, REG_RXFLTMAP2, filter);
722}
723/*==============================================================*/
724
725static u8 _rtl92c_query_rxpwrpercentage(char antpower)
726{
727 if ((antpower <= -100) || (antpower >= 20))
728 return 0;
729 else if (antpower >= 0)
730 return 100;
731 else
732 return 100 + antpower;
733}
734
735static u8 _rtl92c_evm_db_to_percentage(char value)
736{
737 char ret_val;
738
739 ret_val = value;
740 if (ret_val >= 0)
741 ret_val = 0;
742 if (ret_val <= -33)
743 ret_val = -33;
744 ret_val = 0 - ret_val;
745 ret_val *= 3;
746 if (ret_val == 99)
747 ret_val = 100;
748 return ret_val;
749}
750
751static long _rtl92c_translate_todbm(struct ieee80211_hw *hw,
752 u8 signal_strength_index)
753{
754 long signal_power;
755
756 signal_power = (long)((signal_strength_index + 1) >> 1);
757 signal_power -= 95;
758 return signal_power;
759}
760
761static long _rtl92c_signal_scale_mapping(struct ieee80211_hw *hw,
762 long currsig)
763{
764 long retsig;
765
766 if (currsig >= 61 && currsig <= 100)
767 retsig = 90 + ((currsig - 60) / 4);
768 else if (currsig >= 41 && currsig <= 60)
769 retsig = 78 + ((currsig - 40) / 2);
770 else if (currsig >= 31 && currsig <= 40)
771 retsig = 66 + (currsig - 30);
772 else if (currsig >= 21 && currsig <= 30)
773 retsig = 54 + (currsig - 20);
774 else if (currsig >= 5 && currsig <= 20)
775 retsig = 42 + (((currsig - 5) * 2) / 3);
776 else if (currsig == 4)
777 retsig = 36;
778 else if (currsig == 3)
779 retsig = 27;
780 else if (currsig == 2)
781 retsig = 18;
782 else if (currsig == 1)
783 retsig = 9;
784 else
785 retsig = currsig;
786 return retsig;
787}
788
789static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
790 struct rtl_stats *pstats,
791 struct rx_desc_92c *pdesc,
792 struct rx_fwinfo_92c *p_drvinfo,
793 bool packet_match_bssid,
794 bool packet_toself,
795 bool packet_beacon)
796{
797 struct rtl_priv *rtlpriv = rtl_priv(hw);
798 struct rtl_phy *rtlphy = &(rtlpriv->phy);
799 struct phy_sts_cck_8192s_t *cck_buf;
800 s8 rx_pwr_all = 0, rx_pwr[4];
801 u8 rf_rx_num = 0, evm, pwdb_all;
802 u8 i, max_spatial_stream;
803 u32 rssi, total_rssi = 0;
804 bool in_powersavemode = false;
805 bool is_cck_rate;
806
807 is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc);
808 pstats->packet_matchbssid = packet_match_bssid;
809 pstats->packet_toself = packet_toself;
810 pstats->is_cck = is_cck_rate;
811 pstats->packet_beacon = packet_beacon;
812 pstats->is_cck = is_cck_rate;
813 pstats->RX_SIGQ[0] = -1;
814 pstats->RX_SIGQ[1] = -1;
815 if (is_cck_rate) {
816 u8 report, cck_highpwr;
817 cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo;
818 if (!in_powersavemode)
819 cck_highpwr = rtlphy->cck_high_power;
820 else
821 cck_highpwr = false;
822 if (!cck_highpwr) {
823 u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
824 report = cck_buf->cck_agc_rpt & 0xc0;
825 report = report >> 6;
826 switch (report) {
827 case 0x3:
828 rx_pwr_all = -46 - (cck_agc_rpt & 0x3e);
829 break;
830 case 0x2:
831 rx_pwr_all = -26 - (cck_agc_rpt & 0x3e);
832 break;
833 case 0x1:
834 rx_pwr_all = -12 - (cck_agc_rpt & 0x3e);
835 break;
836 case 0x0:
837 rx_pwr_all = 16 - (cck_agc_rpt & 0x3e);
838 break;
839 }
840 } else {
841 u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
842 report = p_drvinfo->cfosho[0] & 0x60;
843 report = report >> 5;
844 switch (report) {
845 case 0x3:
846 rx_pwr_all = -46 - ((cck_agc_rpt & 0x1f) << 1);
847 break;
848 case 0x2:
849 rx_pwr_all = -26 - ((cck_agc_rpt & 0x1f) << 1);
850 break;
851 case 0x1:
852 rx_pwr_all = -12 - ((cck_agc_rpt & 0x1f) << 1);
853 break;
854 case 0x0:
855 rx_pwr_all = 16 - ((cck_agc_rpt & 0x1f) << 1);
856 break;
857 }
858 }
859 pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all);
860 pstats->rx_pwdb_all = pwdb_all;
861 pstats->recvsignalpower = rx_pwr_all;
862 if (packet_match_bssid) {
863 u8 sq;
864 if (pstats->rx_pwdb_all > 40)
865 sq = 100;
866 else {
867 sq = cck_buf->sq_rpt;
868 if (sq > 64)
869 sq = 0;
870 else if (sq < 20)
871 sq = 100;
872 else
873 sq = ((64 - sq) * 100) / 44;
874 }
875 pstats->signalquality = sq;
876 pstats->RX_SIGQ[0] = sq;
877 pstats->RX_SIGQ[1] = -1;
878 }
879 } else {
880 rtlpriv->dm.rfpath_rxenable[0] =
881 rtlpriv->dm.rfpath_rxenable[1] = true;
882 for (i = RF90_PATH_A; i < RF90_PATH_MAX; i++) {
883 if (rtlpriv->dm.rfpath_rxenable[i])
884 rf_rx_num++;
885 rx_pwr[i] =
886 ((p_drvinfo->gain_trsw[i] & 0x3f) * 2) - 110;
887 rssi = _rtl92c_query_rxpwrpercentage(rx_pwr[i]);
888 total_rssi += rssi;
889 rtlpriv->stats.rx_snr_db[i] =
890 (long)(p_drvinfo->rxsnr[i] / 2);
891
892 if (packet_match_bssid)
893 pstats->rx_mimo_signalstrength[i] = (u8) rssi;
894 }
895 rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
896 pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all);
897 pstats->rx_pwdb_all = pwdb_all;
898 pstats->rxpower = rx_pwr_all;
899 pstats->recvsignalpower = rx_pwr_all;
900 if (GET_RX_DESC_RX_MCS(pdesc) &&
901 GET_RX_DESC_RX_MCS(pdesc) >= DESC92C_RATEMCS8 &&
902 GET_RX_DESC_RX_MCS(pdesc) <= DESC92C_RATEMCS15)
903 max_spatial_stream = 2;
904 else
905 max_spatial_stream = 1;
906 for (i = 0; i < max_spatial_stream; i++) {
907 evm = _rtl92c_evm_db_to_percentage(p_drvinfo->rxevm[i]);
908 if (packet_match_bssid) {
909 if (i == 0)
910 pstats->signalquality =
911 (u8) (evm & 0xff);
912 pstats->RX_SIGQ[i] =
913 (u8) (evm & 0xff);
914 }
915 }
916 }
917 if (is_cck_rate)
918 pstats->signalstrength =
919 (u8) (_rtl92c_signal_scale_mapping(hw, pwdb_all));
920 else if (rf_rx_num != 0)
921 pstats->signalstrength =
922 (u8) (_rtl92c_signal_scale_mapping
923 (hw, total_rssi /= rf_rx_num));
924}
925
926static void _rtl92c_process_ui_rssi(struct ieee80211_hw *hw,
927 struct rtl_stats *pstats)
928{
929 struct rtl_priv *rtlpriv = rtl_priv(hw);
930 struct rtl_phy *rtlphy = &(rtlpriv->phy);
931 u8 rfpath;
932 u32 last_rssi, tmpval;
933
934 if (pstats->packet_toself || pstats->packet_beacon) {
935 rtlpriv->stats.rssi_calculate_cnt++;
936 if (rtlpriv->stats.ui_rssi.total_num++ >=
937 PHY_RSSI_SLID_WIN_MAX) {
938 rtlpriv->stats.ui_rssi.total_num =
939 PHY_RSSI_SLID_WIN_MAX;
940 last_rssi =
941 rtlpriv->stats.ui_rssi.elements[rtlpriv->
942 stats.ui_rssi.index];
943 rtlpriv->stats.ui_rssi.total_val -= last_rssi;
944 }
945 rtlpriv->stats.ui_rssi.total_val += pstats->signalstrength;
946 rtlpriv->stats.ui_rssi.elements[rtlpriv->stats.ui_rssi.
947 index++] = pstats->signalstrength;
948 if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX)
949 rtlpriv->stats.ui_rssi.index = 0;
950 tmpval = rtlpriv->stats.ui_rssi.total_val /
951 rtlpriv->stats.ui_rssi.total_num;
952 rtlpriv->stats.signal_strength =
953 _rtl92c_translate_todbm(hw, (u8) tmpval);
954 pstats->rssi = rtlpriv->stats.signal_strength;
955 }
956 if (!pstats->is_cck && pstats->packet_toself) {
957 for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
958 rfpath++) {
959 if (!rtl8192_phy_check_is_legal_rfpath(hw, rfpath))
960 continue;
961 if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
962 rtlpriv->stats.rx_rssi_percentage[rfpath] =
963 pstats->rx_mimo_signalstrength[rfpath];
964 }
965 if (pstats->rx_mimo_signalstrength[rfpath] >
966 rtlpriv->stats.rx_rssi_percentage[rfpath]) {
967 rtlpriv->stats.rx_rssi_percentage[rfpath] =
968 ((rtlpriv->stats.
969 rx_rssi_percentage[rfpath] *
970 (RX_SMOOTH_FACTOR - 1)) +
971 (pstats->rx_mimo_signalstrength[rfpath])) /
972 (RX_SMOOTH_FACTOR);
973
974 rtlpriv->stats.rx_rssi_percentage[rfpath] =
975 rtlpriv->stats.rx_rssi_percentage[rfpath] +
976 1;
977 } else {
978 rtlpriv->stats.rx_rssi_percentage[rfpath] =
979 ((rtlpriv->stats.
980 rx_rssi_percentage[rfpath] *
981 (RX_SMOOTH_FACTOR - 1)) +
982 (pstats->rx_mimo_signalstrength[rfpath])) /
983 (RX_SMOOTH_FACTOR);
984 }
985 }
986 }
987}
988
989static void _rtl92c_update_rxsignalstatistics(struct ieee80211_hw *hw,
990 struct rtl_stats *pstats)
991{
992 struct rtl_priv *rtlpriv = rtl_priv(hw);
993 int weighting = 0;
994
995 if (rtlpriv->stats.recv_signal_power == 0)
996 rtlpriv->stats.recv_signal_power = pstats->recvsignalpower;
997 if (pstats->recvsignalpower > rtlpriv->stats.recv_signal_power)
998 weighting = 5;
999 else if (pstats->recvsignalpower < rtlpriv->stats.recv_signal_power)
1000 weighting = (-5);
1001 rtlpriv->stats.recv_signal_power =
1002 (rtlpriv->stats.recv_signal_power * 5 +
1003 pstats->recvsignalpower + weighting) / 6;
1004}
1005
1006static void _rtl92c_process_pwdb(struct ieee80211_hw *hw,
1007 struct rtl_stats *pstats)
1008{
1009 struct rtl_priv *rtlpriv = rtl_priv(hw);
1010 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1011 long undecorated_smoothed_pwdb = 0;
1012
1013 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
1014 return;
1015 } else {
1016 undecorated_smoothed_pwdb =
1017 rtlpriv->dm.undecorated_smoothed_pwdb;
1018 }
1019 if (pstats->packet_toself || pstats->packet_beacon) {
1020 if (undecorated_smoothed_pwdb < 0)
1021 undecorated_smoothed_pwdb = pstats->rx_pwdb_all;
1022 if (pstats->rx_pwdb_all > (u32) undecorated_smoothed_pwdb) {
1023 undecorated_smoothed_pwdb =
1024 (((undecorated_smoothed_pwdb) *
1025 (RX_SMOOTH_FACTOR - 1)) +
1026 (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
1027 undecorated_smoothed_pwdb = undecorated_smoothed_pwdb
1028 + 1;
1029 } else {
1030 undecorated_smoothed_pwdb =
1031 (((undecorated_smoothed_pwdb) *
1032 (RX_SMOOTH_FACTOR - 1)) +
1033 (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
1034 }
1035 rtlpriv->dm.undecorated_smoothed_pwdb =
1036 undecorated_smoothed_pwdb;
1037 _rtl92c_update_rxsignalstatistics(hw, pstats);
1038 }
1039}
1040
1041static void _rtl92c_process_LINK_Q(struct ieee80211_hw *hw,
1042 struct rtl_stats *pstats)
1043{
1044 struct rtl_priv *rtlpriv = rtl_priv(hw);
1045 u32 last_evm = 0, n_stream, tmpval;
1046
1047 if (pstats->signalquality != 0) {
1048 if (pstats->packet_toself || pstats->packet_beacon) {
1049 if (rtlpriv->stats.LINK_Q.total_num++ >=
1050 PHY_LINKQUALITY_SLID_WIN_MAX) {
1051 rtlpriv->stats.LINK_Q.total_num =
1052 PHY_LINKQUALITY_SLID_WIN_MAX;
1053 last_evm =
1054 rtlpriv->stats.LINK_Q.elements
1055 [rtlpriv->stats.LINK_Q.index];
1056 rtlpriv->stats.LINK_Q.total_val -=
1057 last_evm;
1058 }
1059 rtlpriv->stats.LINK_Q.total_val +=
1060 pstats->signalquality;
1061 rtlpriv->stats.LINK_Q.elements
1062 [rtlpriv->stats.LINK_Q.index++] =
1063 pstats->signalquality;
1064 if (rtlpriv->stats.LINK_Q.index >=
1065 PHY_LINKQUALITY_SLID_WIN_MAX)
1066 rtlpriv->stats.LINK_Q.index = 0;
1067 tmpval = rtlpriv->stats.LINK_Q.total_val /
1068 rtlpriv->stats.LINK_Q.total_num;
1069 rtlpriv->stats.signal_quality = tmpval;
1070 rtlpriv->stats.last_sigstrength_inpercent = tmpval;
1071 for (n_stream = 0; n_stream < 2;
1072 n_stream++) {
1073 if (pstats->RX_SIGQ[n_stream] != -1) {
1074 if (!rtlpriv->stats.RX_EVM[n_stream]) {
1075 rtlpriv->stats.RX_EVM[n_stream]
1076 = pstats->RX_SIGQ[n_stream];
1077 }
1078 rtlpriv->stats.RX_EVM[n_stream] =
1079 ((rtlpriv->stats.RX_EVM
1080 [n_stream] *
1081 (RX_SMOOTH_FACTOR - 1)) +
1082 (pstats->RX_SIGQ
1083 [n_stream] * 1)) /
1084 (RX_SMOOTH_FACTOR);
1085 }
1086 }
1087 }
1088 } else {
1089 ;
1090 }
1091}
1092
1093static void _rtl92c_process_phyinfo(struct ieee80211_hw *hw,
1094 u8 *buffer,
1095 struct rtl_stats *pcurrent_stats)
1096{
1097 if (!pcurrent_stats->packet_matchbssid &&
1098 !pcurrent_stats->packet_beacon)
1099 return;
1100 _rtl92c_process_ui_rssi(hw, pcurrent_stats);
1101 _rtl92c_process_pwdb(hw, pcurrent_stats);
1102 _rtl92c_process_LINK_Q(hw, pcurrent_stats);
1103}
1104
1105void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
1106 struct sk_buff *skb,
1107 struct rtl_stats *pstats,
1108 struct rx_desc_92c *pdesc,
1109 struct rx_fwinfo_92c *p_drvinfo)
1110{
1111 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1112 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1113 struct ieee80211_hdr *hdr;
1114 u8 *tmp_buf;
1115 u8 *praddr;
1116 u8 *psaddr;
1117 __le16 fc;
1118 u16 type, cpu_fc;
1119 bool packet_matchbssid, packet_toself, packet_beacon;
1120
1121 tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
1122 hdr = (struct ieee80211_hdr *)tmp_buf;
1123 fc = hdr->frame_control;
1124 cpu_fc = le16_to_cpu(fc);
1125 type = WLAN_FC_GET_TYPE(fc);
1126 praddr = hdr->addr1;
1127 psaddr = hdr->addr2;
1128 packet_matchbssid =
1129 ((IEEE80211_FTYPE_CTL != type) &&
1130 (!compare_ether_addr(mac->bssid,
1131 (cpu_fc & IEEE80211_FCTL_TODS) ?
1132 hdr->addr1 : (cpu_fc & IEEE80211_FCTL_FROMDS) ?
1133 hdr->addr2 : hdr->addr3)) &&
1134 (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv));
1135
1136 packet_toself = packet_matchbssid &&
1137 (!compare_ether_addr(praddr, rtlefuse->dev_addr));
1138 if (ieee80211_is_beacon(fc))
1139 packet_beacon = true;
1140 _rtl92c_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
1141 packet_matchbssid, packet_toself,
1142 packet_beacon);
1143 _rtl92c_process_phyinfo(hw, tmp_buf, pstats);
1144}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
new file mode 100644
index 000000000000..298fdb724aa5
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
@@ -0,0 +1,180 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92C_MAC_H__
31#define __RTL92C_MAC_H__
32
33#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER 255
34#define DRIVER_EARLY_INT_TIME 0x05
35#define BCN_DMA_ATIME_INT_TIME 0x02
36
37void rtl92c_read_chip_version(struct ieee80211_hw *hw);
38bool rtl92c_llt_write(struct ieee80211_hw *hw, u32 address, u32 data);
39bool rtl92c_init_llt_table(struct ieee80211_hw *hw, u32 boundary);
40void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
41 u8 *p_macaddr, bool is_group, u8 enc_algo,
42 bool is_wepkey, bool clear_all);
43void rtl92c_enable_interrupt(struct ieee80211_hw *hw);
44void rtl92c_disable_interrupt(struct ieee80211_hw *hw);
45void rtl92c_set_qos(struct ieee80211_hw *hw, int aci);
46
47
48/*---------------------------------------------------------------
49 * Hardware init functions
50 *---------------------------------------------------------------*/
51void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr);
52void rtl92c_init_interrupt(struct ieee80211_hw *hw);
53void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size);
54
55int rtl92c_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type);
56void rtl92c_init_network_type(struct ieee80211_hw *hw);
57void rtl92c_init_adaptive_ctrl(struct ieee80211_hw *hw);
58void rtl92c_init_rate_fallback(struct ieee80211_hw *hw);
59
60void rtl92c_init_edca_param(struct ieee80211_hw *hw,
61 u16 queue,
62 u16 txop,
63 u8 ecwmax,
64 u8 ecwmin,
65 u8 aifs);
66
67void rtl92c_init_edca(struct ieee80211_hw *hw);
68void rtl92c_init_ampdu_aggregation(struct ieee80211_hw *hw);
69void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw, bool infra_mode);
70void rtl92c_init_rdg_setting(struct ieee80211_hw *hw);
71void rtl92c_init_retry_function(struct ieee80211_hw *hw);
72
73void rtl92c_init_beacon_parameters(struct ieee80211_hw *hw,
74 enum version_8192c version);
75
76void rtl92c_disable_fast_edca(struct ieee80211_hw *hw);
77void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T);
78
79/* For filter */
80u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw);
81void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter);
82u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw);
83void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter);
84u16 rtl92c_get_data_filter(struct ieee80211_hw *hw);
85void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter);
86
87
88u32 rtl92c_get_txdma_status(struct ieee80211_hw *hw);
89
90#define RX_HAL_IS_CCK_RATE(_pdesc)\
91 (GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE1M ||\
92 GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE2M ||\
93 GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE5_5M ||\
94 GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE11M)
95
96struct rx_fwinfo_92c {
97 u8 gain_trsw[4];
98 u8 pwdb_all;
99 u8 cfosho[4];
100 u8 cfotail[4];
101 char rxevm[2];
102 char rxsnr[4];
103 u8 pdsnr[2];
104 u8 csi_current[2];
105 u8 csi_target[2];
106 u8 sigevm;
107 u8 max_ex_pwr;
108 u8 ex_intf_flag:1;
109 u8 sgi_en:1;
110 u8 rxsc:2;
111 u8 reserve:4;
112} __packed;
113
114struct rx_desc_92c {
115 u32 length:14;
116 u32 crc32:1;
117 u32 icverror:1;
118 u32 drv_infosize:4;
119 u32 security:3;
120 u32 qos:1;
121 u32 shift:2;
122 u32 phystatus:1;
123 u32 swdec:1;
124 u32 lastseg:1;
125 u32 firstseg:1;
126 u32 eor:1;
127 u32 own:1;
128 u32 macid:5; /* word 1 */
129 u32 tid:4;
130 u32 hwrsvd:5;
131 u32 paggr:1;
132 u32 faggr:1;
133 u32 a1_fit:4;
134 u32 a2_fit:4;
135 u32 pam:1;
136 u32 pwr:1;
137 u32 moredata:1;
138 u32 morefrag:1;
139 u32 type:2;
140 u32 mc:1;
141 u32 bc:1;
142 u32 seq:12; /* word 2 */
143 u32 frag:4;
144 u32 nextpktlen:14;
145 u32 nextind:1;
146 u32 rsvd:1;
147 u32 rxmcs:6; /* word 3 */
148 u32 rxht:1;
149 u32 amsdu:1;
150 u32 splcp:1;
151 u32 bandwidth:1;
152 u32 htc:1;
153 u32 tcpchk_rpt:1;
154 u32 ipcchk_rpt:1;
155 u32 tcpchk_valid:1;
156 u32 hwpcerr:1;
157 u32 hwpcind:1;
158 u32 iv0:16;
159 u32 iv1; /* word 4 */
160 u32 tsfl; /* word 5 */
161 u32 bufferaddress; /* word 6 */
162 u32 bufferaddress64; /* word 7 */
163} __packed;
164
165enum rtl_desc_qsel rtl92c_map_hwqueue_to_fwqueue(u16 fc,
166 unsigned int
167 skb_queue);
168void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
169 struct sk_buff *skb,
170 struct rtl_stats *pstats,
171 struct rx_desc_92c *pdesc,
172 struct rx_fwinfo_92c *p_drvinfo);
173
174/*---------------------------------------------------------------
175 * Card disable functions
176 *---------------------------------------------------------------*/
177
178
179
180#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
new file mode 100644
index 000000000000..4e020e654e6b
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
@@ -0,0 +1,607 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../pci.h"
32#include "../ps.h"
33#include "reg.h"
34#include "def.h"
35#include "phy.h"
36#include "rf.h"
37#include "dm.h"
38#include "table.h"
39
40u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
41 enum radio_path rfpath, u32 regaddr, u32 bitmask)
42{
43 struct rtl_priv *rtlpriv = rtl_priv(hw);
44 u32 original_value, readback_value, bitshift;
45 struct rtl_phy *rtlphy = &(rtlpriv->phy);
46
47 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
48 "rfpath(%#x), bitmask(%#x)\n",
49 regaddr, rfpath, bitmask));
50 if (rtlphy->rf_mode != RF_OP_BY_FW) {
51 original_value = _rtl92c_phy_rf_serial_read(hw,
52 rfpath, regaddr);
53 } else {
54 original_value = _rtl92c_phy_fw_rf_serial_read(hw,
55 rfpath, regaddr);
56 }
57 bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
58 readback_value = (original_value & bitmask) >> bitshift;
59 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
60 ("regaddr(%#x), rfpath(%#x), "
61 "bitmask(%#x), original_value(%#x)\n",
62 regaddr, rfpath, bitmask, original_value));
63 return readback_value;
64}
65
66void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
67 enum radio_path rfpath,
68 u32 regaddr, u32 bitmask, u32 data)
69{
70 struct rtl_priv *rtlpriv = rtl_priv(hw);
71 struct rtl_phy *rtlphy = &(rtlpriv->phy);
72 u32 original_value, bitshift;
73
74 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
75 ("regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
76 regaddr, bitmask, data, rfpath));
77 if (rtlphy->rf_mode != RF_OP_BY_FW) {
78 if (bitmask != RFREG_OFFSET_MASK) {
79 original_value = _rtl92c_phy_rf_serial_read(hw,
80 rfpath,
81 regaddr);
82 bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
83 data =
84 ((original_value & (~bitmask)) |
85 (data << bitshift));
86 }
87 _rtl92c_phy_rf_serial_write(hw, rfpath, regaddr, data);
88 } else {
89 if (bitmask != RFREG_OFFSET_MASK) {
90 original_value = _rtl92c_phy_fw_rf_serial_read(hw,
91 rfpath,
92 regaddr);
93 bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
94 data =
95 ((original_value & (~bitmask)) |
96 (data << bitshift));
97 }
98 _rtl92c_phy_fw_rf_serial_write(hw, rfpath, regaddr, data);
99 }
100 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
101 "bitmask(%#x), data(%#x), rfpath(%#x)\n",
102 regaddr, bitmask, data, rfpath));
103}
104
105bool rtl92cu_phy_mac_config(struct ieee80211_hw *hw)
106{
107 bool rtstatus;
108 struct rtl_priv *rtlpriv = rtl_priv(hw);
109 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
110 bool is92c = IS_92C_SERIAL(rtlhal->version);
111
112 rtstatus = _rtl92cu_phy_config_mac_with_headerfile(hw);
113 if (is92c && IS_HARDWARE_TYPE_8192CE(rtlhal))
114 rtl_write_byte(rtlpriv, 0x14, 0x71);
115 return rtstatus;
116}
117
118bool rtl92cu_phy_bb_config(struct ieee80211_hw *hw)
119{
120 bool rtstatus = true;
121 struct rtl_priv *rtlpriv = rtl_priv(hw);
122 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
123 u16 regval;
124 u8 b_reg_hwparafile = 1;
125
126 _rtl92c_phy_init_bb_rf_register_definition(hw);
127 regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
128 rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, regval | BIT(13) |
129 BIT(0) | BIT(1));
130 rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x83);
131 rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL + 1, 0xdb);
132 rtl_write_byte(rtlpriv, REG_RF_CTRL, RF_EN | RF_RSTB | RF_SDMRSTB);
133 if (IS_HARDWARE_TYPE_8192CE(rtlhal)) {
134 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, FEN_PPLL | FEN_PCIEA |
135 FEN_DIO_PCIE | FEN_BB_GLB_RSTn | FEN_BBRSTB);
136 } else if (IS_HARDWARE_TYPE_8192CU(rtlhal)) {
137 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, FEN_USBA | FEN_USBD |
138 FEN_BB_GLB_RSTn | FEN_BBRSTB);
139 rtl_write_byte(rtlpriv, REG_LDOHCI12_CTRL, 0x0f);
140 }
141 rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80);
142 if (b_reg_hwparafile == 1)
143 rtstatus = _rtl92c_phy_bb8192c_config_parafile(hw);
144 return rtstatus;
145}
146
147bool _rtl92cu_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
148{
149 struct rtl_priv *rtlpriv = rtl_priv(hw);
150 struct rtl_phy *rtlphy = &(rtlpriv->phy);
151 u32 i;
152 u32 arraylength;
153 u32 *ptrarray;
154
155 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Read Rtl819XMACPHY_Array\n"));
156 arraylength = rtlphy->hwparam_tables[MAC_REG].length ;
157 ptrarray = rtlphy->hwparam_tables[MAC_REG].pdata;
158 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
159 ("Img:RTL8192CEMAC_2T_ARRAY\n"));
160 for (i = 0; i < arraylength; i = i + 2)
161 rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]);
162 return true;
163}
164
165bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
166 u8 configtype)
167{
168 int i;
169 u32 *phy_regarray_table;
170 u32 *agctab_array_table;
171 u16 phy_reg_arraylen, agctab_arraylen;
172 struct rtl_priv *rtlpriv = rtl_priv(hw);
173 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
174 struct rtl_phy *rtlphy = &(rtlpriv->phy);
175
176 if (IS_92C_SERIAL(rtlhal->version)) {
177 agctab_arraylen = rtlphy->hwparam_tables[AGCTAB_2T].length;
178 agctab_array_table = rtlphy->hwparam_tables[AGCTAB_2T].pdata;
179 phy_reg_arraylen = rtlphy->hwparam_tables[PHY_REG_2T].length;
180 phy_regarray_table = rtlphy->hwparam_tables[PHY_REG_2T].pdata;
181 } else {
182 agctab_arraylen = rtlphy->hwparam_tables[AGCTAB_1T].length;
183 agctab_array_table = rtlphy->hwparam_tables[AGCTAB_1T].pdata;
184 phy_reg_arraylen = rtlphy->hwparam_tables[PHY_REG_1T].length;
185 phy_regarray_table = rtlphy->hwparam_tables[PHY_REG_1T].pdata;
186 }
187 if (configtype == BASEBAND_CONFIG_PHY_REG) {
188 for (i = 0; i < phy_reg_arraylen; i = i + 2) {
189 if (phy_regarray_table[i] == 0xfe)
190 mdelay(50);
191 else if (phy_regarray_table[i] == 0xfd)
192 mdelay(5);
193 else if (phy_regarray_table[i] == 0xfc)
194 mdelay(1);
195 else if (phy_regarray_table[i] == 0xfb)
196 udelay(50);
197 else if (phy_regarray_table[i] == 0xfa)
198 udelay(5);
199 else if (phy_regarray_table[i] == 0xf9)
200 udelay(1);
201 rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
202 phy_regarray_table[i + 1]);
203 udelay(1);
204 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
205 ("The phy_regarray_table[0] is %x"
206 " Rtl819XPHY_REGArray[1] is %x\n",
207 phy_regarray_table[i],
208 phy_regarray_table[i + 1]));
209 }
210 } else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
211 for (i = 0; i < agctab_arraylen; i = i + 2) {
212 rtl_set_bbreg(hw, agctab_array_table[i], MASKDWORD,
213 agctab_array_table[i + 1]);
214 udelay(1);
215 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
216 ("The agctab_array_table[0] is "
217 "%x Rtl819XPHY_REGArray[1] is %x\n",
218 agctab_array_table[i],
219 agctab_array_table[i + 1]));
220 }
221 }
222 return true;
223}
224
225bool _rtl92cu_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
226 u8 configtype)
227{
228 struct rtl_priv *rtlpriv = rtl_priv(hw);
229 struct rtl_phy *rtlphy = &(rtlpriv->phy);
230 int i;
231 u32 *phy_regarray_table_pg;
232 u16 phy_regarray_pg_len;
233
234 rtlphy->pwrgroup_cnt = 0;
235 phy_regarray_pg_len = rtlphy->hwparam_tables[PHY_REG_PG].length;
236 phy_regarray_table_pg = rtlphy->hwparam_tables[PHY_REG_PG].pdata;
237 if (configtype == BASEBAND_CONFIG_PHY_REG) {
238 for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
239 if (phy_regarray_table_pg[i] == 0xfe)
240 mdelay(50);
241 else if (phy_regarray_table_pg[i] == 0xfd)
242 mdelay(5);
243 else if (phy_regarray_table_pg[i] == 0xfc)
244 mdelay(1);
245 else if (phy_regarray_table_pg[i] == 0xfb)
246 udelay(50);
247 else if (phy_regarray_table_pg[i] == 0xfa)
248 udelay(5);
249 else if (phy_regarray_table_pg[i] == 0xf9)
250 udelay(1);
251 _rtl92c_store_pwrIndex_diffrate_offset(hw,
252 phy_regarray_table_pg[i],
253 phy_regarray_table_pg[i + 1],
254 phy_regarray_table_pg[i + 2]);
255 }
256 } else {
257 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
258 ("configtype != BaseBand_Config_PHY_REG\n"));
259 }
260 return true;
261}
262
263bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
264 enum radio_path rfpath)
265{
266 int i;
267 u32 *radioa_array_table;
268 u32 *radiob_array_table;
269 u16 radioa_arraylen, radiob_arraylen;
270 struct rtl_priv *rtlpriv = rtl_priv(hw);
271 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
272 struct rtl_phy *rtlphy = &(rtlpriv->phy);
273
274 if (IS_92C_SERIAL(rtlhal->version)) {
275 radioa_arraylen = rtlphy->hwparam_tables[RADIOA_2T].length;
276 radioa_array_table = rtlphy->hwparam_tables[RADIOA_2T].pdata;
277 radiob_arraylen = rtlphy->hwparam_tables[RADIOB_2T].length;
278 radiob_array_table = rtlphy->hwparam_tables[RADIOB_2T].pdata;
279 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
280 ("Radio_A:RTL8192CERADIOA_2TARRAY\n"));
281 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
282 ("Radio_B:RTL8192CE_RADIOB_2TARRAY\n"));
283 } else {
284 radioa_arraylen = rtlphy->hwparam_tables[RADIOA_1T].length;
285 radioa_array_table = rtlphy->hwparam_tables[RADIOA_1T].pdata;
286 radiob_arraylen = rtlphy->hwparam_tables[RADIOB_1T].length;
287 radiob_array_table = rtlphy->hwparam_tables[RADIOB_1T].pdata;
288 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
289 ("Radio_A:RTL8192CE_RADIOA_1TARRAY\n"));
290 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
291 ("Radio_B:RTL8192CE_RADIOB_1TARRAY\n"));
292 }
293 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Radio No %x\n", rfpath));
294 switch (rfpath) {
295 case RF90_PATH_A:
296 for (i = 0; i < radioa_arraylen; i = i + 2) {
297 if (radioa_array_table[i] == 0xfe)
298 mdelay(50);
299 else if (radioa_array_table[i] == 0xfd)
300 mdelay(5);
301 else if (radioa_array_table[i] == 0xfc)
302 mdelay(1);
303 else if (radioa_array_table[i] == 0xfb)
304 udelay(50);
305 else if (radioa_array_table[i] == 0xfa)
306 udelay(5);
307 else if (radioa_array_table[i] == 0xf9)
308 udelay(1);
309 else {
310 rtl_set_rfreg(hw, rfpath, radioa_array_table[i],
311 RFREG_OFFSET_MASK,
312 radioa_array_table[i + 1]);
313 udelay(1);
314 }
315 }
316 break;
317 case RF90_PATH_B:
318 for (i = 0; i < radiob_arraylen; i = i + 2) {
319 if (radiob_array_table[i] == 0xfe) {
320 mdelay(50);
321 } else if (radiob_array_table[i] == 0xfd)
322 mdelay(5);
323 else if (radiob_array_table[i] == 0xfc)
324 mdelay(1);
325 else if (radiob_array_table[i] == 0xfb)
326 udelay(50);
327 else if (radiob_array_table[i] == 0xfa)
328 udelay(5);
329 else if (radiob_array_table[i] == 0xf9)
330 udelay(1);
331 else {
332 rtl_set_rfreg(hw, rfpath, radiob_array_table[i],
333 RFREG_OFFSET_MASK,
334 radiob_array_table[i + 1]);
335 udelay(1);
336 }
337 }
338 break;
339 case RF90_PATH_C:
340 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
341 ("switch case not process\n"));
342 break;
343 case RF90_PATH_D:
344 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
345 ("switch case not process\n"));
346 break;
347 }
348 return true;
349}
350
351void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
352{
353 struct rtl_priv *rtlpriv = rtl_priv(hw);
354 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
355 struct rtl_phy *rtlphy = &(rtlpriv->phy);
356 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
357 u8 reg_bw_opmode;
358 u8 reg_prsr_rsc;
359
360 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
361 ("Switch to %s bandwidth\n",
362 rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
363 "20MHz" : "40MHz"))
364 if (is_hal_stop(rtlhal)) {
365 rtlphy->set_bwmode_inprogress = false;
366 return;
367 }
368 reg_bw_opmode = rtl_read_byte(rtlpriv, REG_BWOPMODE);
369 reg_prsr_rsc = rtl_read_byte(rtlpriv, REG_RRSR + 2);
370 switch (rtlphy->current_chan_bw) {
371 case HT_CHANNEL_WIDTH_20:
372 reg_bw_opmode |= BW_OPMODE_20MHZ;
373 rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
374 break;
375 case HT_CHANNEL_WIDTH_20_40:
376 reg_bw_opmode &= ~BW_OPMODE_20MHZ;
377 rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
378 reg_prsr_rsc =
379 (reg_prsr_rsc & 0x90) | (mac->cur_40_prime_sc << 5);
380 rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
381 break;
382 default:
383 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
384 ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
385 break;
386 }
387 switch (rtlphy->current_chan_bw) {
388 case HT_CHANNEL_WIDTH_20:
389 rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x0);
390 rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x0);
391 rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1);
392 break;
393 case HT_CHANNEL_WIDTH_20_40:
394 rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x1);
395 rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x1);
396 rtl_set_bbreg(hw, RCCK0_SYSTEM, BCCK_SIDEBAND,
397 (mac->cur_40_prime_sc >> 1));
398 rtl_set_bbreg(hw, ROFDM1_LSTF, 0xC00, mac->cur_40_prime_sc);
399 rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 0);
400 rtl_set_bbreg(hw, 0x818, (BIT(26) | BIT(27)),
401 (mac->cur_40_prime_sc ==
402 HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
403 break;
404 default:
405 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
406 ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
407 break;
408 }
409 rtl92cu_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
410 rtlphy->set_bwmode_inprogress = false;
411 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
412}
413
414void rtl92cu_bb_block_on(struct ieee80211_hw *hw)
415{
416 struct rtl_priv *rtlpriv = rtl_priv(hw);
417
418 mutex_lock(&rtlpriv->io.bb_mutex);
419 rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0x1);
420 rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0x1);
421 mutex_unlock(&rtlpriv->io.bb_mutex);
422}
423
424void _rtl92cu_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
425{
426 u8 tmpreg;
427 u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal;
428 struct rtl_priv *rtlpriv = rtl_priv(hw);
429
430 tmpreg = rtl_read_byte(rtlpriv, 0xd03);
431
432 if ((tmpreg & 0x70) != 0)
433 rtl_write_byte(rtlpriv, 0xd03, tmpreg & 0x8F);
434 else
435 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
436
437 if ((tmpreg & 0x70) != 0) {
438 rf_a_mode = rtl_get_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS);
439 if (is2t)
440 rf_b_mode = rtl_get_rfreg(hw, RF90_PATH_B, 0x00,
441 MASK12BITS);
442 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS,
443 (rf_a_mode & 0x8FFFF) | 0x10000);
444 if (is2t)
445 rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS,
446 (rf_b_mode & 0x8FFFF) | 0x10000);
447 }
448 lc_cal = rtl_get_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS);
449 rtl_set_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS, lc_cal | 0x08000);
450 mdelay(100);
451 if ((tmpreg & 0x70) != 0) {
452 rtl_write_byte(rtlpriv, 0xd03, tmpreg);
453 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS, rf_a_mode);
454 if (is2t)
455 rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS,
456 rf_b_mode);
457 } else {
458 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
459 }
460}
461
462bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
463 enum rf_pwrstate rfpwr_state)
464{
465 struct rtl_priv *rtlpriv = rtl_priv(hw);
466 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
467 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
468 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
469 bool bresult = true;
470 u8 i, queue_id;
471 struct rtl8192_tx_ring *ring = NULL;
472
473 ppsc->set_rfpowerstate_inprogress = true;
474 switch (rfpwr_state) {
475 case ERFON:
476 if ((ppsc->rfpwr_state == ERFOFF) &&
477 RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
478 bool rtstatus;
479 u32 InitializeCount = 0;
480
481 do {
482 InitializeCount++;
483 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
484 ("IPS Set eRf nic enable\n"));
485 rtstatus = rtl_ps_enable_nic(hw);
486 } while ((rtstatus != true)
487 && (InitializeCount < 10));
488 RT_CLEAR_PS_LEVEL(ppsc,
489 RT_RF_OFF_LEVL_HALT_NIC);
490 } else {
491 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
492 ("Set ERFON sleeped:%d ms\n",
493 jiffies_to_msecs(jiffies -
494 ppsc->
495 last_sleep_jiffies)));
496 ppsc->last_awake_jiffies = jiffies;
497 rtl92ce_phy_set_rf_on(hw);
498 }
499 if (mac->link_state == MAC80211_LINKED) {
500 rtlpriv->cfg->ops->led_control(hw,
501 LED_CTL_LINK);
502 } else {
503 rtlpriv->cfg->ops->led_control(hw,
504 LED_CTL_NO_LINK);
505 }
506 break;
507 case ERFOFF:
508 for (queue_id = 0, i = 0;
509 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
510 ring = &pcipriv->dev.tx_ring[queue_id];
511 if (skb_queue_len(&ring->queue) == 0 ||
512 queue_id == BEACON_QUEUE) {
513 queue_id++;
514 continue;
515 } else {
516 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
517 ("eRf Off/Sleep: %d times "
518 "TcbBusyQueue[%d] "
519 "=%d before doze!\n", (i + 1),
520 queue_id,
521 skb_queue_len(&ring->queue)));
522 udelay(10);
523 i++;
524 }
525 if (i >= MAX_DOZE_WAITING_TIMES_9x) {
526 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
527 ("\nERFOFF: %d times "
528 "TcbBusyQueue[%d] = %d !\n",
529 MAX_DOZE_WAITING_TIMES_9x,
530 queue_id,
531 skb_queue_len(&ring->queue)));
532 break;
533 }
534 }
535 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
536 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
537 ("IPS Set eRf nic disable\n"));
538 rtl_ps_disable_nic(hw);
539 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
540 } else {
541 if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) {
542 rtlpriv->cfg->ops->led_control(hw,
543 LED_CTL_NO_LINK);
544 } else {
545 rtlpriv->cfg->ops->led_control(hw,
546 LED_CTL_POWER_OFF);
547 }
548 }
549 break;
550 case ERFSLEEP:
551 if (ppsc->rfpwr_state == ERFOFF)
552 break;
553 for (queue_id = 0, i = 0;
554 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
555 ring = &pcipriv->dev.tx_ring[queue_id];
556 if (skb_queue_len(&ring->queue) == 0) {
557 queue_id++;
558 continue;
559 } else {
560 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
561 ("eRf Off/Sleep: %d times "
562 "TcbBusyQueue[%d] =%d before "
563 "doze!\n", (i + 1), queue_id,
564 skb_queue_len(&ring->queue)));
565 udelay(10);
566 i++;
567 }
568 if (i >= MAX_DOZE_WAITING_TIMES_9x) {
569 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
570 ("\n ERFSLEEP: %d times "
571 "TcbBusyQueue[%d] = %d !\n",
572 MAX_DOZE_WAITING_TIMES_9x,
573 queue_id,
574 skb_queue_len(&ring->queue)));
575 break;
576 }
577 }
578 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
579 ("Set ERFSLEEP awaked:%d ms\n",
580 jiffies_to_msecs(jiffies -
581 ppsc->last_awake_jiffies)));
582 ppsc->last_sleep_jiffies = jiffies;
583 _rtl92c_phy_set_rf_sleep(hw);
584 break;
585 default:
586 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
587 ("switch case not process\n"));
588 bresult = false;
589 break;
590 }
591 if (bresult)
592 ppsc->rfpwr_state = rfpwr_state;
593 ppsc->set_rfpowerstate_inprogress = false;
594 return bresult;
595}
596
597bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
598 enum rf_pwrstate rfpwr_state)
599{
600 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
601 bool bresult = false;
602
603 if (rfpwr_state == ppsc->rfpwr_state)
604 return bresult;
605 bresult = _rtl92cu_phy_set_rf_power_state(hw, rfpwr_state);
606 return bresult;
607}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h
new file mode 100644
index 000000000000..06299559ab68
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h
@@ -0,0 +1,36 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../rtl8192ce/phy.h"
31
32void rtl92cu_bb_block_on(struct ieee80211_hw *hw);
33bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, u32 rfpath);
34void rtl92c_phy_set_io(struct ieee80211_hw *hw);
35bool _rtl92cu_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
36bool rtl92cu_phy_bb_config(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/reg.h b/drivers/net/wireless/rtlwifi/rtl8192cu/reg.h
new file mode 100644
index 000000000000..7f1be614c998
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/reg.h
@@ -0,0 +1,30 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../rtl8192ce/reg.h"
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
new file mode 100644
index 000000000000..1c79c226f145
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
@@ -0,0 +1,493 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "reg.h"
32#include "def.h"
33#include "phy.h"
34#include "rf.h"
35#include "dm.h"
36
37static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw);
38
39void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
40{
41 struct rtl_priv *rtlpriv = rtl_priv(hw);
42 struct rtl_phy *rtlphy = &(rtlpriv->phy);
43
44 switch (bandwidth) {
45 case HT_CHANNEL_WIDTH_20:
46 rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
47 0xfffff3ff) | 0x0400);
48 rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
49 rtlphy->rfreg_chnlval[0]);
50 break;
51 case HT_CHANNEL_WIDTH_20_40:
52 rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
53 0xfffff3ff));
54 rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
55 rtlphy->rfreg_chnlval[0]);
56 break;
57 default:
58 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
59 ("unknown bandwidth: %#X\n", bandwidth));
60 break;
61 }
62}
63
64void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
65 u8 *ppowerlevel)
66{
67 struct rtl_priv *rtlpriv = rtl_priv(hw);
68 struct rtl_phy *rtlphy = &(rtlpriv->phy);
69 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
70 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
71 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
72 u32 tx_agc[2] = { 0, 0 }, tmpval = 0;
73 bool turbo_scanoff = false;
74 u8 idx1, idx2;
75 u8 *ptr;
76
77 if (rtlhal->interface == INTF_PCI) {
78 if (rtlefuse->eeprom_regulatory != 0)
79 turbo_scanoff = true;
80 } else {
81 if ((rtlefuse->eeprom_regulatory != 0) ||
82 (rtlefuse->external_pa))
83 turbo_scanoff = true;
84 }
85 if (mac->act_scanning == true) {
86 tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
87 tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
88 if (turbo_scanoff) {
89 for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
90 tx_agc[idx1] = ppowerlevel[idx1] |
91 (ppowerlevel[idx1] << 8) |
92 (ppowerlevel[idx1] << 16) |
93 (ppowerlevel[idx1] << 24);
94 if (rtlhal->interface == INTF_USB) {
95 if (tx_agc[idx1] > 0x20 &&
96 rtlefuse->external_pa)
97 tx_agc[idx1] = 0x20;
98 }
99 }
100 }
101 } else {
102 if (rtlpriv->dm.dynamic_txhighpower_lvl ==
103 TXHIGHPWRLEVEL_LEVEL1) {
104 tx_agc[RF90_PATH_A] = 0x10101010;
105 tx_agc[RF90_PATH_B] = 0x10101010;
106 } else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
107 TXHIGHPWRLEVEL_LEVEL1) {
108 tx_agc[RF90_PATH_A] = 0x00000000;
109 tx_agc[RF90_PATH_B] = 0x00000000;
110 } else{
111 for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
112 tx_agc[idx1] = ppowerlevel[idx1] |
113 (ppowerlevel[idx1] << 8) |
114 (ppowerlevel[idx1] << 16) |
115 (ppowerlevel[idx1] << 24);
116 }
117 if (rtlefuse->eeprom_regulatory == 0) {
118 tmpval = (rtlphy->mcs_txpwrlevel_origoffset
119 [0][6]) +
120 (rtlphy->mcs_txpwrlevel_origoffset
121 [0][7] << 8);
122 tx_agc[RF90_PATH_A] += tmpval;
123 tmpval = (rtlphy->mcs_txpwrlevel_origoffset
124 [0][14]) +
125 (rtlphy->mcs_txpwrlevel_origoffset
126 [0][15] << 24);
127 tx_agc[RF90_PATH_B] += tmpval;
128 }
129 }
130 }
131 for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
132 ptr = (u8 *) (&(tx_agc[idx1]));
133 for (idx2 = 0; idx2 < 4; idx2++) {
134 if (*ptr > RF6052_MAX_TX_PWR)
135 *ptr = RF6052_MAX_TX_PWR;
136 ptr++;
137 }
138 }
139 tmpval = tx_agc[RF90_PATH_A] & 0xff;
140 rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval);
141
142 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
143 ("CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
144 RTXAGC_A_CCK1_MCS32));
145
146 tmpval = tx_agc[RF90_PATH_A] >> 8;
147 if (mac->mode == WIRELESS_MODE_B)
148 tmpval = tmpval & 0xff00ffff;
149 rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
150 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
151 ("CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
152 RTXAGC_B_CCK11_A_CCK2_11));
153 tmpval = tx_agc[RF90_PATH_B] >> 24;
154 rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval);
155 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
156 ("CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
157 RTXAGC_B_CCK11_A_CCK2_11));
158 tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff;
159 rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval);
160 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
161 ("CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
162 RTXAGC_B_CCK1_55_MCS32));
163}
164
165static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw,
166 u8 *ppowerlevel, u8 channel,
167 u32 *ofdmbase, u32 *mcsbase)
168{
169 struct rtl_priv *rtlpriv = rtl_priv(hw);
170 struct rtl_phy *rtlphy = &(rtlpriv->phy);
171 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
172 u32 powerBase0, powerBase1;
173 u8 legacy_pwrdiff = 0, ht20_pwrdiff = 0;
174 u8 i, powerlevel[2];
175
176 for (i = 0; i < 2; i++) {
177 powerlevel[i] = ppowerlevel[i];
178 legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff[i][channel - 1];
179 powerBase0 = powerlevel[i] + legacy_pwrdiff;
180 powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) |
181 (powerBase0 << 8) | powerBase0;
182 *(ofdmbase + i) = powerBase0;
183 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
184 (" [OFDM power base index rf(%c) = 0x%x]\n",
185 ((i == 0) ? 'A' : 'B'), *(ofdmbase + i)));
186 }
187 for (i = 0; i < 2; i++) {
188 if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) {
189 ht20_pwrdiff = rtlefuse->txpwr_ht20diff[i][channel - 1];
190 powerlevel[i] += ht20_pwrdiff;
191 }
192 powerBase1 = powerlevel[i];
193 powerBase1 = (powerBase1 << 24) |
194 (powerBase1 << 16) | (powerBase1 << 8) | powerBase1;
195 *(mcsbase + i) = powerBase1;
196 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
197 (" [MCS power base index rf(%c) = 0x%x]\n",
198 ((i == 0) ? 'A' : 'B'), *(mcsbase + i)));
199 }
200}
201
202static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
203 u8 channel, u8 index,
204 u32 *powerBase0,
205 u32 *powerBase1,
206 u32 *p_outwriteval)
207{
208 struct rtl_priv *rtlpriv = rtl_priv(hw);
209 struct rtl_phy *rtlphy = &(rtlpriv->phy);
210 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
211 u8 i, chnlgroup = 0, pwr_diff_limit[4];
212 u32 writeVal, customer_limit, rf;
213
214 for (rf = 0; rf < 2; rf++) {
215 switch (rtlefuse->eeprom_regulatory) {
216 case 0:
217 chnlgroup = 0;
218 writeVal = rtlphy->mcs_txpwrlevel_origoffset
219 [chnlgroup][index + (rf ? 8 : 0)]
220 + ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
221 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
222 ("RTK better performance,writeVal(%c) = 0x%x\n",
223 ((rf == 0) ? 'A' : 'B'), writeVal));
224 break;
225 case 1:
226 if (rtlphy->pwrgroup_cnt == 1)
227 chnlgroup = 0;
228 if (rtlphy->pwrgroup_cnt >= 3) {
229 if (channel <= 3)
230 chnlgroup = 0;
231 else if (channel >= 4 && channel <= 9)
232 chnlgroup = 1;
233 else if (channel > 9)
234 chnlgroup = 2;
235 if (rtlphy->current_chan_bw ==
236 HT_CHANNEL_WIDTH_20)
237 chnlgroup++;
238 else
239 chnlgroup += 4;
240 }
241 writeVal = rtlphy->mcs_txpwrlevel_origoffset
242 [chnlgroup][index +
243 (rf ? 8 : 0)] +
244 ((index < 2) ? powerBase0[rf] :
245 powerBase1[rf]);
246 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
247 ("Realtek regulatory, 20MHz, "
248 "writeVal(%c) = 0x%x\n",
249 ((rf == 0) ? 'A' : 'B'), writeVal));
250 break;
251 case 2:
252 writeVal = ((index < 2) ? powerBase0[rf] :
253 powerBase1[rf]);
254 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
255 ("Better regulatory,writeVal(%c) = 0x%x\n",
256 ((rf == 0) ? 'A' : 'B'), writeVal));
257 break;
258 case 3:
259 chnlgroup = 0;
260 if (rtlphy->current_chan_bw ==
261 HT_CHANNEL_WIDTH_20_40) {
262 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
263 ("customer's limit, 40MHzrf(%c) = "
264 "0x%x\n", ((rf == 0) ? 'A' : 'B'),
265 rtlefuse->pwrgroup_ht40[rf]
266 [channel - 1]));
267 } else {
268 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
269 ("customer's limit, 20MHz rf(%c) = "
270 "0x%x\n", ((rf == 0) ? 'A' : 'B'),
271 rtlefuse->pwrgroup_ht20[rf]
272 [channel - 1]));
273 }
274 for (i = 0; i < 4; i++) {
275 pwr_diff_limit[i] =
276 (u8) ((rtlphy->mcs_txpwrlevel_origoffset
277 [chnlgroup][index + (rf ? 8 : 0)]
278 & (0x7f << (i * 8))) >> (i * 8));
279 if (rtlphy->current_chan_bw ==
280 HT_CHANNEL_WIDTH_20_40) {
281 if (pwr_diff_limit[i] >
282 rtlefuse->pwrgroup_ht40[rf]
283 [channel - 1])
284 pwr_diff_limit[i] = rtlefuse->
285 pwrgroup_ht40[rf]
286 [channel - 1];
287 } else {
288 if (pwr_diff_limit[i] >
289 rtlefuse->pwrgroup_ht20[rf]
290 [channel - 1])
291 pwr_diff_limit[i] =
292 rtlefuse->pwrgroup_ht20[rf]
293 [channel - 1];
294 }
295 }
296 customer_limit = (pwr_diff_limit[3] << 24) |
297 (pwr_diff_limit[2] << 16) |
298 (pwr_diff_limit[1] << 8) | (pwr_diff_limit[0]);
299 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
300 ("Customer's limit rf(%c) = 0x%x\n",
301 ((rf == 0) ? 'A' : 'B'), customer_limit));
302 writeVal = customer_limit + ((index < 2) ?
303 powerBase0[rf] : powerBase1[rf]);
304 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
305 ("Customer, writeVal rf(%c)= 0x%x\n",
306 ((rf == 0) ? 'A' : 'B'), writeVal));
307 break;
308 default:
309 chnlgroup = 0;
310 writeVal = rtlphy->mcs_txpwrlevel_origoffset[chnlgroup]
311 [index + (rf ? 8 : 0)] + ((index < 2) ?
312 powerBase0[rf] : powerBase1[rf]);
313 RTPRINT(rtlpriv, FPHY, PHY_TXPWR, ("RTK better "
314 "performance, writeValrf(%c) = 0x%x\n",
315 ((rf == 0) ? 'A' : 'B'), writeVal));
316 break;
317 }
318 if (rtlpriv->dm.dynamic_txhighpower_lvl ==
319 TXHIGHPWRLEVEL_LEVEL1)
320 writeVal = 0x14141414;
321 else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
322 TXHIGHPWRLEVEL_LEVEL2)
323 writeVal = 0x00000000;
324 if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1)
325 writeVal = writeVal - 0x06060606;
326 else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
327 TXHIGHPWRLEVEL_BT2)
328 writeVal = writeVal;
329 *(p_outwriteval + rf) = writeVal;
330 }
331}
332
333static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
334 u8 index, u32 *pValue)
335{
336 struct rtl_priv *rtlpriv = rtl_priv(hw);
337 struct rtl_phy *rtlphy = &(rtlpriv->phy);
338 u16 regoffset_a[6] = {
339 RTXAGC_A_RATE18_06, RTXAGC_A_RATE54_24,
340 RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04,
341 RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12
342 };
343 u16 regoffset_b[6] = {
344 RTXAGC_B_RATE18_06, RTXAGC_B_RATE54_24,
345 RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04,
346 RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12
347 };
348 u8 i, rf, pwr_val[4];
349 u32 writeVal;
350 u16 regoffset;
351
352 for (rf = 0; rf < 2; rf++) {
353 writeVal = pValue[rf];
354 for (i = 0; i < 4; i++) {
355 pwr_val[i] = (u8)((writeVal & (0x7f << (i * 8))) >>
356 (i * 8));
357 if (pwr_val[i] > RF6052_MAX_TX_PWR)
358 pwr_val[i] = RF6052_MAX_TX_PWR;
359 }
360 writeVal = (pwr_val[3] << 24) | (pwr_val[2] << 16) |
361 (pwr_val[1] << 8) | pwr_val[0];
362 if (rf == 0)
363 regoffset = regoffset_a[index];
364 else
365 regoffset = regoffset_b[index];
366 rtl_set_bbreg(hw, regoffset, MASKDWORD, writeVal);
367 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
368 ("Set 0x%x = %08x\n", regoffset, writeVal));
369 if (((get_rf_type(rtlphy) == RF_2T2R) &&
370 (regoffset == RTXAGC_A_MCS15_MCS12 ||
371 regoffset == RTXAGC_B_MCS15_MCS12)) ||
372 ((get_rf_type(rtlphy) != RF_2T2R) &&
373 (regoffset == RTXAGC_A_MCS07_MCS04 ||
374 regoffset == RTXAGC_B_MCS07_MCS04))) {
375 writeVal = pwr_val[3];
376 if (regoffset == RTXAGC_A_MCS15_MCS12 ||
377 regoffset == RTXAGC_A_MCS07_MCS04)
378 regoffset = 0xc90;
379 if (regoffset == RTXAGC_B_MCS15_MCS12 ||
380 regoffset == RTXAGC_B_MCS07_MCS04)
381 regoffset = 0xc98;
382 for (i = 0; i < 3; i++) {
383 writeVal = (writeVal > 6) ? (writeVal - 6) : 0;
384 rtl_write_byte(rtlpriv, (u32)(regoffset + i),
385 (u8)writeVal);
386 }
387 }
388 }
389}
390
391void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
392 u8 *ppowerlevel, u8 channel)
393{
394 u32 writeVal[2], powerBase0[2], powerBase1[2];
395 u8 index = 0;
396
397 rtl92c_phy_get_power_base(hw, ppowerlevel,
398 channel, &powerBase0[0], &powerBase1[0]);
399 for (index = 0; index < 6; index++) {
400 _rtl92c_get_txpower_writeval_by_regulatory(hw,
401 channel, index,
402 &powerBase0[0],
403 &powerBase1[0],
404 &writeVal[0]);
405 _rtl92c_write_ofdm_power_reg(hw, index, &writeVal[0]);
406 }
407}
408
409bool rtl92cu_phy_rf6052_config(struct ieee80211_hw *hw)
410{
411 struct rtl_priv *rtlpriv = rtl_priv(hw);
412 struct rtl_phy *rtlphy = &(rtlpriv->phy);
413 bool rtstatus = true;
414 u8 b_reg_hwparafile = 1;
415
416 if (rtlphy->rf_type == RF_1T1R)
417 rtlphy->num_total_rfpath = 1;
418 else
419 rtlphy->num_total_rfpath = 2;
420 if (b_reg_hwparafile == 1)
421 rtstatus = _rtl92c_phy_rf6052_config_parafile(hw);
422 return rtstatus;
423}
424
425static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
426{
427 struct rtl_priv *rtlpriv = rtl_priv(hw);
428 struct rtl_phy *rtlphy = &(rtlpriv->phy);
429 u32 u4_regvalue = 0;
430 u8 rfpath;
431 bool rtstatus = true;
432 struct bb_reg_def *pphyreg;
433
434 for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
435 pphyreg = &rtlphy->phyreg_def[rfpath];
436 switch (rfpath) {
437 case RF90_PATH_A:
438 case RF90_PATH_C:
439 u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
440 BRFSI_RFENV);
441 break;
442 case RF90_PATH_B:
443 case RF90_PATH_D:
444 u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
445 BRFSI_RFENV << 16);
446 break;
447 }
448 rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1);
449 udelay(1);
450 rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
451 udelay(1);
452 rtl_set_bbreg(hw, pphyreg->rfhssi_para2,
453 B3WIREADDREAALENGTH, 0x0);
454 udelay(1);
455 rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0);
456 udelay(1);
457 switch (rfpath) {
458 case RF90_PATH_A:
459 rtstatus = rtl92cu_phy_config_rf_with_headerfile(hw,
460 (enum radio_path) rfpath);
461 break;
462 case RF90_PATH_B:
463 rtstatus = rtl92cu_phy_config_rf_with_headerfile(hw,
464 (enum radio_path) rfpath);
465 break;
466 case RF90_PATH_C:
467 break;
468 case RF90_PATH_D:
469 break;
470 }
471 switch (rfpath) {
472 case RF90_PATH_A:
473 case RF90_PATH_C:
474 rtl_set_bbreg(hw, pphyreg->rfintfs,
475 BRFSI_RFENV, u4_regvalue);
476 break;
477 case RF90_PATH_B:
478 case RF90_PATH_D:
479 rtl_set_bbreg(hw, pphyreg->rfintfs,
480 BRFSI_RFENV << 16, u4_regvalue);
481 break;
482 }
483 if (rtstatus != true) {
484 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
485 ("Radio[%d] Fail!!", rfpath));
486 goto phy_rf_cfg_fail;
487 }
488 }
489 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("<---\n"));
490 return rtstatus;
491phy_rf_cfg_fail:
492 return rtstatus;
493}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
new file mode 100644
index 000000000000..86c2728cfa00
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
@@ -0,0 +1,47 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92CU_RF_H__
31#define __RTL92CU_RF_H__
32
33#define RF6052_MAX_TX_PWR 0x3F
34#define RF6052_MAX_REG 0x3F
35#define RF6052_MAX_PATH 2
36
37extern void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
38 u8 bandwidth);
39extern void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
40 u8 *ppowerlevel);
41extern void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
42 u8 *ppowerlevel, u8 channel);
43bool rtl92cu_phy_rf6052_config(struct ieee80211_hw *hw);
44bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
45 enum radio_path rfpath);
46
47#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
new file mode 100644
index 000000000000..71244a38d49e
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -0,0 +1,336 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../core.h"
32#include "../usb.h"
33#include "../efuse.h"
34#include "reg.h"
35#include "def.h"
36#include "phy.h"
37#include "mac.h"
38#include "dm.h"
39#include "rf.h"
40#include "sw.h"
41#include "trx.h"
42#include "led.h"
43#include "hw.h"
44#include <linux/vmalloc.h>
45
46MODULE_AUTHOR("Georgia <georgia@realtek.com>");
47MODULE_AUTHOR("Ziv Huang <ziv_huang@realtek.com>");
48MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
49MODULE_LICENSE("GPL");
50MODULE_DESCRIPTION("Realtek 8192C/8188C 802.11n USB wireless");
51MODULE_FIRMWARE("rtlwifi/rtl8192cufw.bin");
52
53static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
54{
55 struct rtl_priv *rtlpriv = rtl_priv(hw);
56
57 rtlpriv->dm.dm_initialgain_enable = 1;
58 rtlpriv->dm.dm_flag = 0;
59 rtlpriv->dm.disable_framebursting = 0;
60 rtlpriv->dm.thermalvalue = 0;
61 rtlpriv->rtlhal.pfirmware = vmalloc(0x4000);
62 if (!rtlpriv->rtlhal.pfirmware) {
63 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
64 ("Can't alloc buffer for fw.\n"));
65 return 1;
66 }
67 return 0;
68}
69
70static void rtl92cu_deinit_sw_vars(struct ieee80211_hw *hw)
71{
72 struct rtl_priv *rtlpriv = rtl_priv(hw);
73
74 if (rtlpriv->rtlhal.pfirmware) {
75 vfree(rtlpriv->rtlhal.pfirmware);
76 rtlpriv->rtlhal.pfirmware = NULL;
77 }
78}
79
80static struct rtl_hal_ops rtl8192cu_hal_ops = {
81 .init_sw_vars = rtl92cu_init_sw_vars,
82 .deinit_sw_vars = rtl92cu_deinit_sw_vars,
83 .read_chip_version = rtl92c_read_chip_version,
84 .read_eeprom_info = rtl92cu_read_eeprom_info,
85 .enable_interrupt = rtl92c_enable_interrupt,
86 .disable_interrupt = rtl92c_disable_interrupt,
87 .hw_init = rtl92cu_hw_init,
88 .hw_disable = rtl92cu_card_disable,
89 .set_network_type = rtl92cu_set_network_type,
90 .set_chk_bssid = rtl92cu_set_check_bssid,
91 .set_qos = rtl92c_set_qos,
92 .set_bcn_reg = rtl92cu_set_beacon_related_registers,
93 .set_bcn_intv = rtl92cu_set_beacon_interval,
94 .update_interrupt_mask = rtl92cu_update_interrupt_mask,
95 .get_hw_reg = rtl92cu_get_hw_reg,
96 .set_hw_reg = rtl92cu_set_hw_reg,
97 .update_rate_table = rtl92cu_update_hal_rate_table,
98 .update_rate_mask = rtl92cu_update_hal_rate_mask,
99 .fill_tx_desc = rtl92cu_tx_fill_desc,
100 .fill_fake_txdesc = rtl92cu_fill_fake_txdesc,
101 .fill_tx_cmddesc = rtl92cu_tx_fill_cmddesc,
102 .cmd_send_packet = rtl92cu_cmd_send_packet,
103 .query_rx_desc = rtl92cu_rx_query_desc,
104 .set_channel_access = rtl92cu_update_channel_access_setting,
105 .radio_onoff_checking = rtl92cu_gpio_radio_on_off_checking,
106 .set_bw_mode = rtl92c_phy_set_bw_mode,
107 .switch_channel = rtl92c_phy_sw_chnl,
108 .dm_watchdog = rtl92c_dm_watchdog,
109 .scan_operation_backup = rtl92c_phy_scan_operation_backup,
110 .set_rf_power_state = rtl92cu_phy_set_rf_power_state,
111 .led_control = rtl92cu_led_control,
112 .enable_hw_sec = rtl92cu_enable_hw_security_config,
113 .set_key = rtl92c_set_key,
114 .init_sw_leds = rtl92cu_init_sw_leds,
115 .deinit_sw_leds = rtl92cu_deinit_sw_leds,
116 .get_bbreg = rtl92c_phy_query_bb_reg,
117 .set_bbreg = rtl92c_phy_set_bb_reg,
118 .get_rfreg = rtl92cu_phy_query_rf_reg,
119 .set_rfreg = rtl92cu_phy_set_rf_reg,
120 .phy_rf6052_config = rtl92cu_phy_rf6052_config,
121 .phy_rf6052_set_cck_txpower = rtl92cu_phy_rf6052_set_cck_txpower,
122 .phy_rf6052_set_ofdm_txpower = rtl92cu_phy_rf6052_set_ofdm_txpower,
123 .config_bb_with_headerfile = _rtl92cu_phy_config_bb_with_headerfile,
124 .config_bb_with_pgheaderfile = _rtl92cu_phy_config_bb_with_pgheaderfile,
125 .phy_lc_calibrate = _rtl92cu_phy_lc_calibrate,
126 .phy_set_bw_mode_callback = rtl92cu_phy_set_bw_mode_callback,
127 .dm_dynamic_txpower = rtl92cu_dm_dynamic_txpower,
128};
129
130static struct rtl_mod_params rtl92cu_mod_params = {
131 .sw_crypto = 0,
132};
133
134static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = {
135 /* rx */
136 .in_ep_num = RTL92C_USB_BULK_IN_NUM,
137 .rx_urb_num = RTL92C_NUM_RX_URBS,
138 .rx_max_size = RTL92C_SIZE_MAX_RX_BUFFER,
139 .usb_rx_hdl = rtl8192cu_rx_hdl,
140 .usb_rx_segregate_hdl = NULL, /* rtl8192c_rx_segregate_hdl; */
141 /* tx */
142 .usb_tx_cleanup = rtl8192c_tx_cleanup,
143 .usb_tx_post_hdl = rtl8192c_tx_post_hdl,
144 .usb_tx_aggregate_hdl = rtl8192c_tx_aggregate_hdl,
145 /* endpoint mapping */
146 .usb_endpoint_mapping = rtl8192cu_endpoint_mapping,
147 .usb_mq_to_hwq = rtl8192cu_mq_to_hwq,
148};
149
150static struct rtl_hal_cfg rtl92cu_hal_cfg = {
151 .name = "rtl92c_usb",
152 .fw_name = "rtlwifi/rtl8192cufw.bin",
153 .ops = &rtl8192cu_hal_ops,
154 .mod_params = &rtl92cu_mod_params,
155 .usb_interface_cfg = &rtl92cu_interface_cfg,
156
157 .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
158 .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN,
159 .maps[SYS_CLK] = REG_SYS_CLKR,
160 .maps[MAC_RCR_AM] = AM,
161 .maps[MAC_RCR_AB] = AB,
162 .maps[MAC_RCR_ACRC32] = ACRC32,
163 .maps[MAC_RCR_ACF] = ACF,
164 .maps[MAC_RCR_AAP] = AAP,
165
166 .maps[EFUSE_TEST] = REG_EFUSE_TEST,
167 .maps[EFUSE_CTRL] = REG_EFUSE_CTRL,
168 .maps[EFUSE_CLK] = 0,
169 .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL,
170 .maps[EFUSE_PWC_EV12V] = PWC_EV12V,
171 .maps[EFUSE_FEN_ELDR] = FEN_ELDR,
172 .maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN,
173 .maps[EFUSE_ANA8M] = EFUSE_ANA8M,
174 .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE,
175 .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION,
176 .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN,
177
178 .maps[RWCAM] = REG_CAMCMD,
179 .maps[WCAMI] = REG_CAMWRITE,
180 .maps[RCAMO] = REG_CAMREAD,
181 .maps[CAMDBG] = REG_CAMDBG,
182 .maps[SECR] = REG_SECCFG,
183 .maps[SEC_CAM_NONE] = CAM_NONE,
184 .maps[SEC_CAM_WEP40] = CAM_WEP40,
185 .maps[SEC_CAM_TKIP] = CAM_TKIP,
186 .maps[SEC_CAM_AES] = CAM_AES,
187 .maps[SEC_CAM_WEP104] = CAM_WEP104,
188
189 .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6,
190 .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5,
191 .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4,
192 .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3,
193 .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2,
194 .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1,
195 .maps[RTL_IMR_BCNDOK8] = IMR_BCNDOK8,
196 .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7,
197 .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6,
198 .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5,
199 .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4,
200 .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3,
201 .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2,
202 .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1,
203 .maps[RTL_IMR_TIMEOUT2] = IMR_TIMEOUT2,
204 .maps[RTL_IMR_TIMEOUT1] = IMR_TIMEOUT1,
205
206 .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW,
207 .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT,
208 .maps[RTL_IMR_BcnInt] = IMR_BCNINT,
209 .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW,
210 .maps[RTL_IMR_RDU] = IMR_RDU,
211 .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND,
212 .maps[RTL_IMR_BDOK] = IMR_BDOK,
213 .maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK,
214 .maps[RTL_IMR_TBDER] = IMR_TBDER,
215 .maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK,
216 .maps[RTL_IMR_TBDOK] = IMR_TBDOK,
217 .maps[RTL_IMR_BKDOK] = IMR_BKDOK,
218 .maps[RTL_IMR_BEDOK] = IMR_BEDOK,
219 .maps[RTL_IMR_VIDOK] = IMR_VIDOK,
220 .maps[RTL_IMR_VODOK] = IMR_VODOK,
221 .maps[RTL_IMR_ROK] = IMR_ROK,
222 .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
223
224 .maps[RTL_RC_CCK_RATE1M] = DESC92C_RATE1M,
225 .maps[RTL_RC_CCK_RATE2M] = DESC92C_RATE2M,
226 .maps[RTL_RC_CCK_RATE5_5M] = DESC92C_RATE5_5M,
227 .maps[RTL_RC_CCK_RATE11M] = DESC92C_RATE11M,
228 .maps[RTL_RC_OFDM_RATE6M] = DESC92C_RATE6M,
229 .maps[RTL_RC_OFDM_RATE9M] = DESC92C_RATE9M,
230 .maps[RTL_RC_OFDM_RATE12M] = DESC92C_RATE12M,
231 .maps[RTL_RC_OFDM_RATE18M] = DESC92C_RATE18M,
232 .maps[RTL_RC_OFDM_RATE24M] = DESC92C_RATE24M,
233 .maps[RTL_RC_OFDM_RATE36M] = DESC92C_RATE36M,
234 .maps[RTL_RC_OFDM_RATE48M] = DESC92C_RATE48M,
235 .maps[RTL_RC_OFDM_RATE54M] = DESC92C_RATE54M,
236 .maps[RTL_RC_HT_RATEMCS7] = DESC92C_RATEMCS7,
237 .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15,
238};
239
240#define USB_VENDER_ID_REALTEK 0x0bda
241
242/* 2010-10-19 DID_USB_V3.4 */
243static struct usb_device_id rtl8192c_usb_ids[] = {
244
245 /*=== Realtek demoboard ===*/
246 /* Default ID */
247 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8191, rtl92cu_hal_cfg)},
248
249 /****** 8188CU ********/
250 /* 8188CE-VAU USB minCard */
251 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8170, rtl92cu_hal_cfg)},
252 /* 8188cu 1*1 dongle */
253 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8176, rtl92cu_hal_cfg)},
254 /* 8188cu 1*1 dongle, (b/g mode only) */
255 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8177, rtl92cu_hal_cfg)},
256 /* 8188cu Slim Solo */
257 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817a, rtl92cu_hal_cfg)},
258 /* 8188cu Slim Combo */
259 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817b, rtl92cu_hal_cfg)},
260 /* 8188RU High-power USB Dongle */
261 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817d, rtl92cu_hal_cfg)},
262 /* 8188CE-VAU USB minCard (b/g mode only) */
263 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)},
264 /* 8188 Combo for BC4 */
265 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
266
267 /****** 8192CU ********/
268 /* 8191cu 1*2 */
269 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8177, rtl92cu_hal_cfg)},
270 /* 8192cu 2*2 */
271 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817b, rtl92cu_hal_cfg)},
272 /* 8192CE-VAU USB minCard */
273 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817c, rtl92cu_hal_cfg)},
274
275 /*=== Customer ID ===*/
276 /****** 8188CU ********/
277 {RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/
278 {RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/
279 {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
280 {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
281 {RTL_USB_DEVICE(0x0Df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
282 {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
283 /* HP - Lite-On ,8188CUS Slim Combo */
284 {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
285 {RTL_USB_DEVICE(0x2001, 0x3308, rtl92cu_hal_cfg)}, /*D-Link - Alpha*/
286 {RTL_USB_DEVICE(0x2019, 0xab2a, rtl92cu_hal_cfg)}, /*Planex - Abocom*/
287 {RTL_USB_DEVICE(0x2019, 0xed17, rtl92cu_hal_cfg)}, /*PCI - Edimax*/
288 {RTL_USB_DEVICE(0x20f4, 0x648b, rtl92cu_hal_cfg)}, /*TRENDnet - Cameo*/
289 {RTL_USB_DEVICE(0x7392, 0x7811, rtl92cu_hal_cfg)}, /*Edimax - Edimax*/
290 {RTL_USB_DEVICE(0x3358, 0x13d3, rtl92cu_hal_cfg)}, /*Azwave 8188CE-VAU*/
291 /* Russian customer -Azwave (8188CE-VAU b/g mode only) */
292 {RTL_USB_DEVICE(0x3359, 0x13d3, rtl92cu_hal_cfg)},
293
294 /****** 8192CU ********/
295 {RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/
296 {RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/
297 {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
298 {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Abocom -Abocom*/
299 {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
300 {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
301 {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
302 {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
303 {RTL_USB_DEVICE(0x7392, 0x7822, rtl92cu_hal_cfg)}, /*Edimax -Edimax*/
304 {}
305};
306
307MODULE_DEVICE_TABLE(usb, rtl8192c_usb_ids);
308
309static struct usb_driver rtl8192cu_driver = {
310 .name = "rtl8192cu",
311 .probe = rtl_usb_probe,
312 .disconnect = rtl_usb_disconnect,
313 .id_table = rtl8192c_usb_ids,
314
315#ifdef CONFIG_PM
316 /* .suspend = rtl_usb_suspend, */
317 /* .resume = rtl_usb_resume, */
318 /* .reset_resume = rtl8192c_resume, */
319#endif /* CONFIG_PM */
320#ifdef CONFIG_AUTOSUSPEND
321 .supports_autosuspend = 1,
322#endif
323};
324
325static int __init rtl8192cu_init(void)
326{
327 return usb_register(&rtl8192cu_driver);
328}
329
330static void __exit rtl8192cu_exit(void)
331{
332 usb_deregister(&rtl8192cu_driver);
333}
334
335module_init(rtl8192cu_init);
336module_exit(rtl8192cu_exit);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
new file mode 100644
index 000000000000..43b1177924ab
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
@@ -0,0 +1,53 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92CU_SW_H__
31#define __RTL92CU_SW_H__
32
33#define EFUSE_MAX_SECTION 16
34
35void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
36 u8 *powerlevel);
37void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
38 u8 *ppowerlevel, u8 channel);
39bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
40 u8 configtype);
41bool _rtl92cu_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
42 u8 configtype);
43void _rtl92cu_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
44void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
45 enum radio_path rfpath,
46 u32 regaddr, u32 bitmask, u32 data);
47bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
48 enum rf_pwrstate rfpwr_state);
49u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
50 enum radio_path rfpath, u32 regaddr, u32 bitmask);
51void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
52
53#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/table.c b/drivers/net/wireless/rtlwifi/rtl8192cu/table.c
new file mode 100644
index 000000000000..d57ef5e88a9e
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/table.c
@@ -0,0 +1,1888 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "table.h"
31
32u32 RTL8192CUPHY_REG_2TARRAY[RTL8192CUPHY_REG_2TARRAY_LENGTH] = {
33 0x024, 0x0011800f,
34 0x028, 0x00ffdb83,
35 0x800, 0x80040002,
36 0x804, 0x00000003,
37 0x808, 0x0000fc00,
38 0x80c, 0x0000000a,
39 0x810, 0x10005388,
40 0x814, 0x020c3d10,
41 0x818, 0x02200385,
42 0x81c, 0x00000000,
43 0x820, 0x01000100,
44 0x824, 0x00390004,
45 0x828, 0x01000100,
46 0x82c, 0x00390004,
47 0x830, 0x27272727,
48 0x834, 0x27272727,
49 0x838, 0x27272727,
50 0x83c, 0x27272727,
51 0x840, 0x00010000,
52 0x844, 0x00010000,
53 0x848, 0x27272727,
54 0x84c, 0x27272727,
55 0x850, 0x00000000,
56 0x854, 0x00000000,
57 0x858, 0x569a569a,
58 0x85c, 0x0c1b25a4,
59 0x860, 0x66e60230,
60 0x864, 0x061f0130,
61 0x868, 0x27272727,
62 0x86c, 0x2b2b2b27,
63 0x870, 0x07000700,
64 0x874, 0x22184000,
65 0x878, 0x08080808,
66 0x87c, 0x00000000,
67 0x880, 0xc0083070,
68 0x884, 0x000004d5,
69 0x888, 0x00000000,
70 0x88c, 0xcc0000c0,
71 0x890, 0x00000800,
72 0x894, 0xfffffffe,
73 0x898, 0x40302010,
74 0x89c, 0x00706050,
75 0x900, 0x00000000,
76 0x904, 0x00000023,
77 0x908, 0x00000000,
78 0x90c, 0x81121313,
79 0xa00, 0x00d047c8,
80 0xa04, 0x80ff000c,
81 0xa08, 0x8c838300,
82 0xa0c, 0x2e68120f,
83 0xa10, 0x9500bb78,
84 0xa14, 0x11144028,
85 0xa18, 0x00881117,
86 0xa1c, 0x89140f00,
87 0xa20, 0x1a1b0000,
88 0xa24, 0x090e1317,
89 0xa28, 0x00000204,
90 0xa2c, 0x00d30000,
91 0xa70, 0x101fbf00,
92 0xa74, 0x00000007,
93 0xc00, 0x48071d40,
94 0xc04, 0x03a05633,
95 0xc08, 0x000000e4,
96 0xc0c, 0x6c6c6c6c,
97 0xc10, 0x08800000,
98 0xc14, 0x40000100,
99 0xc18, 0x08800000,
100 0xc1c, 0x40000100,
101 0xc20, 0x00000000,
102 0xc24, 0x00000000,
103 0xc28, 0x00000000,
104 0xc2c, 0x00000000,
105 0xc30, 0x69e9ac44,
106 0xc34, 0x469652cf,
107 0xc38, 0x49795994,
108 0xc3c, 0x0a97971c,
109 0xc40, 0x1f7c403f,
110 0xc44, 0x000100b7,
111 0xc48, 0xec020107,
112 0xc4c, 0x007f037f,
113 0xc50, 0x6954341e,
114 0xc54, 0x43bc0094,
115 0xc58, 0x6954341e,
116 0xc5c, 0x433c0094,
117 0xc60, 0x00000000,
118 0xc64, 0x5116848b,
119 0xc68, 0x47c00bff,
120 0xc6c, 0x00000036,
121 0xc70, 0x2c7f000d,
122 0xc74, 0x0186115b,
123 0xc78, 0x0000001f,
124 0xc7c, 0x00b99612,
125 0xc80, 0x40000100,
126 0xc84, 0x20f60000,
127 0xc88, 0x40000100,
128 0xc8c, 0x20200000,
129 0xc90, 0x00121820,
130 0xc94, 0x00000000,
131 0xc98, 0x00121820,
132 0xc9c, 0x00007f7f,
133 0xca0, 0x00000000,
134 0xca4, 0x00000080,
135 0xca8, 0x00000000,
136 0xcac, 0x00000000,
137 0xcb0, 0x00000000,
138 0xcb4, 0x00000000,
139 0xcb8, 0x00000000,
140 0xcbc, 0x28000000,
141 0xcc0, 0x00000000,
142 0xcc4, 0x00000000,
143 0xcc8, 0x00000000,
144 0xccc, 0x00000000,
145 0xcd0, 0x00000000,
146 0xcd4, 0x00000000,
147 0xcd8, 0x64b22427,
148 0xcdc, 0x00766932,
149 0xce0, 0x00222222,
150 0xce4, 0x00000000,
151 0xce8, 0x37644302,
152 0xcec, 0x2f97d40c,
153 0xd00, 0x00080740,
154 0xd04, 0x00020403,
155 0xd08, 0x0000907f,
156 0xd0c, 0x20010201,
157 0xd10, 0xa0633333,
158 0xd14, 0x3333bc43,
159 0xd18, 0x7a8f5b6b,
160 0xd2c, 0xcc979975,
161 0xd30, 0x00000000,
162 0xd34, 0x80608000,
163 0xd38, 0x00000000,
164 0xd3c, 0x00027293,
165 0xd40, 0x00000000,
166 0xd44, 0x00000000,
167 0xd48, 0x00000000,
168 0xd4c, 0x00000000,
169 0xd50, 0x6437140a,
170 0xd54, 0x00000000,
171 0xd58, 0x00000000,
172 0xd5c, 0x30032064,
173 0xd60, 0x4653de68,
174 0xd64, 0x04518a3c,
175 0xd68, 0x00002101,
176 0xd6c, 0x2a201c16,
177 0xd70, 0x1812362e,
178 0xd74, 0x322c2220,
179 0xd78, 0x000e3c24,
180 0xe00, 0x2a2a2a2a,
181 0xe04, 0x2a2a2a2a,
182 0xe08, 0x03902a2a,
183 0xe10, 0x2a2a2a2a,
184 0xe14, 0x2a2a2a2a,
185 0xe18, 0x2a2a2a2a,
186 0xe1c, 0x2a2a2a2a,
187 0xe28, 0x00000000,
188 0xe30, 0x1000dc1f,
189 0xe34, 0x10008c1f,
190 0xe38, 0x02140102,
191 0xe3c, 0x681604c2,
192 0xe40, 0x01007c00,
193 0xe44, 0x01004800,
194 0xe48, 0xfb000000,
195 0xe4c, 0x000028d1,
196 0xe50, 0x1000dc1f,
197 0xe54, 0x10008c1f,
198 0xe58, 0x02140102,
199 0xe5c, 0x28160d05,
200 0xe60, 0x00000010,
201 0xe68, 0x001b25a4,
202 0xe6c, 0x63db25a4,
203 0xe70, 0x63db25a4,
204 0xe74, 0x0c1b25a4,
205 0xe78, 0x0c1b25a4,
206 0xe7c, 0x0c1b25a4,
207 0xe80, 0x0c1b25a4,
208 0xe84, 0x63db25a4,
209 0xe88, 0x0c1b25a4,
210 0xe8c, 0x63db25a4,
211 0xed0, 0x63db25a4,
212 0xed4, 0x63db25a4,
213 0xed8, 0x63db25a4,
214 0xedc, 0x001b25a4,
215 0xee0, 0x001b25a4,
216 0xeec, 0x6fdb25a4,
217 0xf14, 0x00000003,
218 0xf4c, 0x00000000,
219 0xf00, 0x00000300,
220};
221
222u32 RTL8192CUPHY_REG_1TARRAY[RTL8192CUPHY_REG_1TARRAY_LENGTH] = {
223 0x024, 0x0011800f,
224 0x028, 0x00ffdb83,
225 0x800, 0x80040000,
226 0x804, 0x00000001,
227 0x808, 0x0000fc00,
228 0x80c, 0x0000000a,
229 0x810, 0x10005388,
230 0x814, 0x020c3d10,
231 0x818, 0x02200385,
232 0x81c, 0x00000000,
233 0x820, 0x01000100,
234 0x824, 0x00390004,
235 0x828, 0x00000000,
236 0x82c, 0x00000000,
237 0x830, 0x00000000,
238 0x834, 0x00000000,
239 0x838, 0x00000000,
240 0x83c, 0x00000000,
241 0x840, 0x00010000,
242 0x844, 0x00000000,
243 0x848, 0x00000000,
244 0x84c, 0x00000000,
245 0x850, 0x00000000,
246 0x854, 0x00000000,
247 0x858, 0x569a569a,
248 0x85c, 0x001b25a4,
249 0x860, 0x66e60230,
250 0x864, 0x061f0130,
251 0x868, 0x00000000,
252 0x86c, 0x32323200,
253 0x870, 0x07000700,
254 0x874, 0x22004000,
255 0x878, 0x00000808,
256 0x87c, 0x00000000,
257 0x880, 0xc0083070,
258 0x884, 0x000004d5,
259 0x888, 0x00000000,
260 0x88c, 0xccc000c0,
261 0x890, 0x00000800,
262 0x894, 0xfffffffe,
263 0x898, 0x40302010,
264 0x89c, 0x00706050,
265 0x900, 0x00000000,
266 0x904, 0x00000023,
267 0x908, 0x00000000,
268 0x90c, 0x81121111,
269 0xa00, 0x00d047c8,
270 0xa04, 0x80ff000c,
271 0xa08, 0x8c838300,
272 0xa0c, 0x2e68120f,
273 0xa10, 0x9500bb78,
274 0xa14, 0x11144028,
275 0xa18, 0x00881117,
276 0xa1c, 0x89140f00,
277 0xa20, 0x1a1b0000,
278 0xa24, 0x090e1317,
279 0xa28, 0x00000204,
280 0xa2c, 0x00d30000,
281 0xa70, 0x101fbf00,
282 0xa74, 0x00000007,
283 0xc00, 0x48071d40,
284 0xc04, 0x03a05611,
285 0xc08, 0x000000e4,
286 0xc0c, 0x6c6c6c6c,
287 0xc10, 0x08800000,
288 0xc14, 0x40000100,
289 0xc18, 0x08800000,
290 0xc1c, 0x40000100,
291 0xc20, 0x00000000,
292 0xc24, 0x00000000,
293 0xc28, 0x00000000,
294 0xc2c, 0x00000000,
295 0xc30, 0x69e9ac44,
296 0xc34, 0x469652cf,
297 0xc38, 0x49795994,
298 0xc3c, 0x0a97971c,
299 0xc40, 0x1f7c403f,
300 0xc44, 0x000100b7,
301 0xc48, 0xec020107,
302 0xc4c, 0x007f037f,
303 0xc50, 0x6954341e,
304 0xc54, 0x43bc0094,
305 0xc58, 0x6954341e,
306 0xc5c, 0x433c0094,
307 0xc60, 0x00000000,
308 0xc64, 0x5116848b,
309 0xc68, 0x47c00bff,
310 0xc6c, 0x00000036,
311 0xc70, 0x2c7f000d,
312 0xc74, 0x018610db,
313 0xc78, 0x0000001f,
314 0xc7c, 0x00b91612,
315 0xc80, 0x40000100,
316 0xc84, 0x20f60000,
317 0xc88, 0x40000100,
318 0xc8c, 0x20200000,
319 0xc90, 0x00121820,
320 0xc94, 0x00000000,
321 0xc98, 0x00121820,
322 0xc9c, 0x00007f7f,
323 0xca0, 0x00000000,
324 0xca4, 0x00000080,
325 0xca8, 0x00000000,
326 0xcac, 0x00000000,
327 0xcb0, 0x00000000,
328 0xcb4, 0x00000000,
329 0xcb8, 0x00000000,
330 0xcbc, 0x28000000,
331 0xcc0, 0x00000000,
332 0xcc4, 0x00000000,
333 0xcc8, 0x00000000,
334 0xccc, 0x00000000,
335 0xcd0, 0x00000000,
336 0xcd4, 0x00000000,
337 0xcd8, 0x64b22427,
338 0xcdc, 0x00766932,
339 0xce0, 0x00222222,
340 0xce4, 0x00000000,
341 0xce8, 0x37644302,
342 0xcec, 0x2f97d40c,
343 0xd00, 0x00080740,
344 0xd04, 0x00020401,
345 0xd08, 0x0000907f,
346 0xd0c, 0x20010201,
347 0xd10, 0xa0633333,
348 0xd14, 0x3333bc43,
349 0xd18, 0x7a8f5b6b,
350 0xd2c, 0xcc979975,
351 0xd30, 0x00000000,
352 0xd34, 0x80608000,
353 0xd38, 0x00000000,
354 0xd3c, 0x00027293,
355 0xd40, 0x00000000,
356 0xd44, 0x00000000,
357 0xd48, 0x00000000,
358 0xd4c, 0x00000000,
359 0xd50, 0x6437140a,
360 0xd54, 0x00000000,
361 0xd58, 0x00000000,
362 0xd5c, 0x30032064,
363 0xd60, 0x4653de68,
364 0xd64, 0x04518a3c,
365 0xd68, 0x00002101,
366 0xd6c, 0x2a201c16,
367 0xd70, 0x1812362e,
368 0xd74, 0x322c2220,
369 0xd78, 0x000e3c24,
370 0xe00, 0x2a2a2a2a,
371 0xe04, 0x2a2a2a2a,
372 0xe08, 0x03902a2a,
373 0xe10, 0x2a2a2a2a,
374 0xe14, 0x2a2a2a2a,
375 0xe18, 0x2a2a2a2a,
376 0xe1c, 0x2a2a2a2a,
377 0xe28, 0x00000000,
378 0xe30, 0x1000dc1f,
379 0xe34, 0x10008c1f,
380 0xe38, 0x02140102,
381 0xe3c, 0x681604c2,
382 0xe40, 0x01007c00,
383 0xe44, 0x01004800,
384 0xe48, 0xfb000000,
385 0xe4c, 0x000028d1,
386 0xe50, 0x1000dc1f,
387 0xe54, 0x10008c1f,
388 0xe58, 0x02140102,
389 0xe5c, 0x28160d05,
390 0xe60, 0x00000008,
391 0xe68, 0x001b25a4,
392 0xe6c, 0x631b25a0,
393 0xe70, 0x631b25a0,
394 0xe74, 0x081b25a0,
395 0xe78, 0x081b25a0,
396 0xe7c, 0x081b25a0,
397 0xe80, 0x081b25a0,
398 0xe84, 0x631b25a0,
399 0xe88, 0x081b25a0,
400 0xe8c, 0x631b25a0,
401 0xed0, 0x631b25a0,
402 0xed4, 0x631b25a0,
403 0xed8, 0x631b25a0,
404 0xedc, 0x001b25a0,
405 0xee0, 0x001b25a0,
406 0xeec, 0x6b1b25a0,
407 0xf14, 0x00000003,
408 0xf4c, 0x00000000,
409 0xf00, 0x00000300,
410};
411
412u32 RTL8192CUPHY_REG_ARRAY_PG[RTL8192CUPHY_REG_ARRAY_PGLENGTH] = {
413 0xe00, 0xffffffff, 0x07090c0c,
414 0xe04, 0xffffffff, 0x01020405,
415 0xe08, 0x0000ff00, 0x00000000,
416 0x86c, 0xffffff00, 0x00000000,
417 0xe10, 0xffffffff, 0x0b0c0c0e,
418 0xe14, 0xffffffff, 0x01030506,
419 0xe18, 0xffffffff, 0x0b0c0d0e,
420 0xe1c, 0xffffffff, 0x01030509,
421 0x830, 0xffffffff, 0x07090c0c,
422 0x834, 0xffffffff, 0x01020405,
423 0x838, 0xffffff00, 0x00000000,
424 0x86c, 0x000000ff, 0x00000000,
425 0x83c, 0xffffffff, 0x0b0c0d0e,
426 0x848, 0xffffffff, 0x01030509,
427 0x84c, 0xffffffff, 0x0b0c0d0e,
428 0x868, 0xffffffff, 0x01030509,
429 0xe00, 0xffffffff, 0x00000000,
430 0xe04, 0xffffffff, 0x00000000,
431 0xe08, 0x0000ff00, 0x00000000,
432 0x86c, 0xffffff00, 0x00000000,
433 0xe10, 0xffffffff, 0x00000000,
434 0xe14, 0xffffffff, 0x00000000,
435 0xe18, 0xffffffff, 0x00000000,
436 0xe1c, 0xffffffff, 0x00000000,
437 0x830, 0xffffffff, 0x00000000,
438 0x834, 0xffffffff, 0x00000000,
439 0x838, 0xffffff00, 0x00000000,
440 0x86c, 0x000000ff, 0x00000000,
441 0x83c, 0xffffffff, 0x00000000,
442 0x848, 0xffffffff, 0x00000000,
443 0x84c, 0xffffffff, 0x00000000,
444 0x868, 0xffffffff, 0x00000000,
445 0xe00, 0xffffffff, 0x04040404,
446 0xe04, 0xffffffff, 0x00020204,
447 0xe08, 0x0000ff00, 0x00000000,
448 0x86c, 0xffffff00, 0x00000000,
449 0xe10, 0xffffffff, 0x06060606,
450 0xe14, 0xffffffff, 0x00020406,
451 0xe18, 0xffffffff, 0x00000000,
452 0xe1c, 0xffffffff, 0x00000000,
453 0x830, 0xffffffff, 0x04040404,
454 0x834, 0xffffffff, 0x00020204,
455 0x838, 0xffffff00, 0x00000000,
456 0x86c, 0x000000ff, 0x00000000,
457 0x83c, 0xffffffff, 0x06060606,
458 0x848, 0xffffffff, 0x00020406,
459 0x84c, 0xffffffff, 0x00000000,
460 0x868, 0xffffffff, 0x00000000,
461 0xe00, 0xffffffff, 0x00000000,
462 0xe04, 0xffffffff, 0x00000000,
463 0xe08, 0x0000ff00, 0x00000000,
464 0x86c, 0xffffff00, 0x00000000,
465 0xe10, 0xffffffff, 0x00000000,
466 0xe14, 0xffffffff, 0x00000000,
467 0xe18, 0xffffffff, 0x00000000,
468 0xe1c, 0xffffffff, 0x00000000,
469 0x830, 0xffffffff, 0x00000000,
470 0x834, 0xffffffff, 0x00000000,
471 0x838, 0xffffff00, 0x00000000,
472 0x86c, 0x000000ff, 0x00000000,
473 0x83c, 0xffffffff, 0x00000000,
474 0x848, 0xffffffff, 0x00000000,
475 0x84c, 0xffffffff, 0x00000000,
476 0x868, 0xffffffff, 0x00000000,
477 0xe00, 0xffffffff, 0x00000000,
478 0xe04, 0xffffffff, 0x00000000,
479 0xe08, 0x0000ff00, 0x00000000,
480 0x86c, 0xffffff00, 0x00000000,
481 0xe10, 0xffffffff, 0x00000000,
482 0xe14, 0xffffffff, 0x00000000,
483 0xe18, 0xffffffff, 0x00000000,
484 0xe1c, 0xffffffff, 0x00000000,
485 0x830, 0xffffffff, 0x00000000,
486 0x834, 0xffffffff, 0x00000000,
487 0x838, 0xffffff00, 0x00000000,
488 0x86c, 0x000000ff, 0x00000000,
489 0x83c, 0xffffffff, 0x00000000,
490 0x848, 0xffffffff, 0x00000000,
491 0x84c, 0xffffffff, 0x00000000,
492 0x868, 0xffffffff, 0x00000000,
493 0xe00, 0xffffffff, 0x04040404,
494 0xe04, 0xffffffff, 0x00020204,
495 0xe08, 0x0000ff00, 0x00000000,
496 0x86c, 0xffffff00, 0x00000000,
497 0xe10, 0xffffffff, 0x00000000,
498 0xe14, 0xffffffff, 0x00000000,
499 0xe18, 0xffffffff, 0x00000000,
500 0xe1c, 0xffffffff, 0x00000000,
501 0x830, 0xffffffff, 0x04040404,
502 0x834, 0xffffffff, 0x00020204,
503 0x838, 0xffffff00, 0x00000000,
504 0x86c, 0x000000ff, 0x00000000,
505 0x83c, 0xffffffff, 0x00000000,
506 0x848, 0xffffffff, 0x00000000,
507 0x84c, 0xffffffff, 0x00000000,
508 0x868, 0xffffffff, 0x00000000,
509 0xe00, 0xffffffff, 0x00000000,
510 0xe04, 0xffffffff, 0x00000000,
511 0xe08, 0x0000ff00, 0x00000000,
512 0x86c, 0xffffff00, 0x00000000,
513 0xe10, 0xffffffff, 0x00000000,
514 0xe14, 0xffffffff, 0x00000000,
515 0xe18, 0xffffffff, 0x00000000,
516 0xe1c, 0xffffffff, 0x00000000,
517 0x830, 0xffffffff, 0x00000000,
518 0x834, 0xffffffff, 0x00000000,
519 0x838, 0xffffff00, 0x00000000,
520 0x86c, 0x000000ff, 0x00000000,
521 0x83c, 0xffffffff, 0x00000000,
522 0x848, 0xffffffff, 0x00000000,
523 0x84c, 0xffffffff, 0x00000000,
524 0x868, 0xffffffff, 0x00000000,
525};
526
527u32 RTL8192CURADIOA_2TARRAY[RTL8192CURADIOA_2TARRAYLENGTH] = {
528 0x000, 0x00030159,
529 0x001, 0x00031284,
530 0x002, 0x00098000,
531 0x003, 0x00018c63,
532 0x004, 0x000210e7,
533 0x009, 0x0002044f,
534 0x00a, 0x0001adb1,
535 0x00b, 0x00054867,
536 0x00c, 0x0008992e,
537 0x00d, 0x0000e52c,
538 0x00e, 0x00039ce7,
539 0x00f, 0x00000451,
540 0x019, 0x00000000,
541 0x01a, 0x00010255,
542 0x01b, 0x00060a00,
543 0x01c, 0x000fc378,
544 0x01d, 0x000a1250,
545 0x01e, 0x0004445f,
546 0x01f, 0x00080001,
547 0x020, 0x0000b614,
548 0x021, 0x0006c000,
549 0x022, 0x00000000,
550 0x023, 0x00001558,
551 0x024, 0x00000060,
552 0x025, 0x00000483,
553 0x026, 0x0004f000,
554 0x027, 0x000ec7d9,
555 0x028, 0x000577c0,
556 0x029, 0x00004783,
557 0x02a, 0x00000001,
558 0x02b, 0x00021334,
559 0x02a, 0x00000000,
560 0x02b, 0x00000054,
561 0x02a, 0x00000001,
562 0x02b, 0x00000808,
563 0x02b, 0x00053333,
564 0x02c, 0x0000000c,
565 0x02a, 0x00000002,
566 0x02b, 0x00000808,
567 0x02b, 0x0005b333,
568 0x02c, 0x0000000d,
569 0x02a, 0x00000003,
570 0x02b, 0x00000808,
571 0x02b, 0x00063333,
572 0x02c, 0x0000000d,
573 0x02a, 0x00000004,
574 0x02b, 0x00000808,
575 0x02b, 0x0006b333,
576 0x02c, 0x0000000d,
577 0x02a, 0x00000005,
578 0x02b, 0x00000808,
579 0x02b, 0x00073333,
580 0x02c, 0x0000000d,
581 0x02a, 0x00000006,
582 0x02b, 0x00000709,
583 0x02b, 0x0005b333,
584 0x02c, 0x0000000d,
585 0x02a, 0x00000007,
586 0x02b, 0x00000709,
587 0x02b, 0x00063333,
588 0x02c, 0x0000000d,
589 0x02a, 0x00000008,
590 0x02b, 0x0000060a,
591 0x02b, 0x0004b333,
592 0x02c, 0x0000000d,
593 0x02a, 0x00000009,
594 0x02b, 0x0000060a,
595 0x02b, 0x00053333,
596 0x02c, 0x0000000d,
597 0x02a, 0x0000000a,
598 0x02b, 0x0000060a,
599 0x02b, 0x0005b333,
600 0x02c, 0x0000000d,
601 0x02a, 0x0000000b,
602 0x02b, 0x0000060a,
603 0x02b, 0x00063333,
604 0x02c, 0x0000000d,
605 0x02a, 0x0000000c,
606 0x02b, 0x0000060a,
607 0x02b, 0x0006b333,
608 0x02c, 0x0000000d,
609 0x02a, 0x0000000d,
610 0x02b, 0x0000060a,
611 0x02b, 0x00073333,
612 0x02c, 0x0000000d,
613 0x02a, 0x0000000e,
614 0x02b, 0x0000050b,
615 0x02b, 0x00066666,
616 0x02c, 0x0000001a,
617 0x02a, 0x000e0000,
618 0x010, 0x0004000f,
619 0x011, 0x000e31fc,
620 0x010, 0x0006000f,
621 0x011, 0x000ff9f8,
622 0x010, 0x0002000f,
623 0x011, 0x000203f9,
624 0x010, 0x0003000f,
625 0x011, 0x000ff500,
626 0x010, 0x00000000,
627 0x011, 0x00000000,
628 0x010, 0x0008000f,
629 0x011, 0x0003f100,
630 0x010, 0x0009000f,
631 0x011, 0x00023100,
632 0x012, 0x00032000,
633 0x012, 0x00071000,
634 0x012, 0x000b0000,
635 0x012, 0x000fc000,
636 0x013, 0x000287af,
637 0x013, 0x000244b7,
638 0x013, 0x000204ab,
639 0x013, 0x0001c49f,
640 0x013, 0x00018493,
641 0x013, 0x00014297,
642 0x013, 0x00010295,
643 0x013, 0x0000c298,
644 0x013, 0x0000819c,
645 0x013, 0x000040a8,
646 0x013, 0x0000001c,
647 0x014, 0x0001944c,
648 0x014, 0x00059444,
649 0x014, 0x0009944c,
650 0x014, 0x000d9444,
651 0x015, 0x0000f424,
652 0x015, 0x0004f424,
653 0x015, 0x0008f424,
654 0x015, 0x000cf424,
655 0x016, 0x000e0330,
656 0x016, 0x000a0330,
657 0x016, 0x00060330,
658 0x016, 0x00020330,
659 0x000, 0x00010159,
660 0x018, 0x0000f401,
661 0x0fe, 0x00000000,
662 0x0fe, 0x00000000,
663 0x01f, 0x00080003,
664 0x0fe, 0x00000000,
665 0x0fe, 0x00000000,
666 0x01e, 0x00044457,
667 0x01f, 0x00080000,
668 0x000, 0x00030159,
669};
670
671u32 RTL8192CU_RADIOB_2TARRAY[RTL8192CURADIOB_2TARRAYLENGTH] = {
672 0x000, 0x00030159,
673 0x001, 0x00031284,
674 0x002, 0x00098000,
675 0x003, 0x00018c63,
676 0x004, 0x000210e7,
677 0x009, 0x0002044f,
678 0x00a, 0x0001adb1,
679 0x00b, 0x00054867,
680 0x00c, 0x0008992e,
681 0x00d, 0x0000e52c,
682 0x00e, 0x00039ce7,
683 0x00f, 0x00000451,
684 0x012, 0x00032000,
685 0x012, 0x00071000,
686 0x012, 0x000b0000,
687 0x012, 0x000fc000,
688 0x013, 0x000287af,
689 0x013, 0x000244b7,
690 0x013, 0x000204ab,
691 0x013, 0x0001c49f,
692 0x013, 0x00018493,
693 0x013, 0x00014297,
694 0x013, 0x00010295,
695 0x013, 0x0000c298,
696 0x013, 0x0000819c,
697 0x013, 0x000040a8,
698 0x013, 0x0000001c,
699 0x014, 0x0001944c,
700 0x014, 0x00059444,
701 0x014, 0x0009944c,
702 0x014, 0x000d9444,
703 0x015, 0x0000f424,
704 0x015, 0x0004f424,
705 0x015, 0x0008f424,
706 0x015, 0x000cf424,
707 0x016, 0x000e0330,
708 0x016, 0x000a0330,
709 0x016, 0x00060330,
710 0x016, 0x00020330,
711};
712
713u32 RTL8192CU_RADIOA_1TARRAY[RTL8192CURADIOA_1TARRAYLENGTH] = {
714 0x000, 0x00030159,
715 0x001, 0x00031284,
716 0x002, 0x00098000,
717 0x003, 0x00018c63,
718 0x004, 0x000210e7,
719 0x009, 0x0002044f,
720 0x00a, 0x0001adb1,
721 0x00b, 0x00054867,
722 0x00c, 0x0008992e,
723 0x00d, 0x0000e52c,
724 0x00e, 0x00039ce7,
725 0x00f, 0x00000451,
726 0x019, 0x00000000,
727 0x01a, 0x00010255,
728 0x01b, 0x00060a00,
729 0x01c, 0x000fc378,
730 0x01d, 0x000a1250,
731 0x01e, 0x0004445f,
732 0x01f, 0x00080001,
733 0x020, 0x0000b614,
734 0x021, 0x0006c000,
735 0x022, 0x00000000,
736 0x023, 0x00001558,
737 0x024, 0x00000060,
738 0x025, 0x00000483,
739 0x026, 0x0004f000,
740 0x027, 0x000ec7d9,
741 0x028, 0x000577c0,
742 0x029, 0x00004783,
743 0x02a, 0x00000001,
744 0x02b, 0x00021334,
745 0x02a, 0x00000000,
746 0x02b, 0x00000054,
747 0x02a, 0x00000001,
748 0x02b, 0x00000808,
749 0x02b, 0x00053333,
750 0x02c, 0x0000000c,
751 0x02a, 0x00000002,
752 0x02b, 0x00000808,
753 0x02b, 0x0005b333,
754 0x02c, 0x0000000d,
755 0x02a, 0x00000003,
756 0x02b, 0x00000808,
757 0x02b, 0x00063333,
758 0x02c, 0x0000000d,
759 0x02a, 0x00000004,
760 0x02b, 0x00000808,
761 0x02b, 0x0006b333,
762 0x02c, 0x0000000d,
763 0x02a, 0x00000005,
764 0x02b, 0x00000808,
765 0x02b, 0x00073333,
766 0x02c, 0x0000000d,
767 0x02a, 0x00000006,
768 0x02b, 0x00000709,
769 0x02b, 0x0005b333,
770 0x02c, 0x0000000d,
771 0x02a, 0x00000007,
772 0x02b, 0x00000709,
773 0x02b, 0x00063333,
774 0x02c, 0x0000000d,
775 0x02a, 0x00000008,
776 0x02b, 0x0000060a,
777 0x02b, 0x0004b333,
778 0x02c, 0x0000000d,
779 0x02a, 0x00000009,
780 0x02b, 0x0000060a,
781 0x02b, 0x00053333,
782 0x02c, 0x0000000d,
783 0x02a, 0x0000000a,
784 0x02b, 0x0000060a,
785 0x02b, 0x0005b333,
786 0x02c, 0x0000000d,
787 0x02a, 0x0000000b,
788 0x02b, 0x0000060a,
789 0x02b, 0x00063333,
790 0x02c, 0x0000000d,
791 0x02a, 0x0000000c,
792 0x02b, 0x0000060a,
793 0x02b, 0x0006b333,
794 0x02c, 0x0000000d,
795 0x02a, 0x0000000d,
796 0x02b, 0x0000060a,
797 0x02b, 0x00073333,
798 0x02c, 0x0000000d,
799 0x02a, 0x0000000e,
800 0x02b, 0x0000050b,
801 0x02b, 0x00066666,
802 0x02c, 0x0000001a,
803 0x02a, 0x000e0000,
804 0x010, 0x0004000f,
805 0x011, 0x000e31fc,
806 0x010, 0x0006000f,
807 0x011, 0x000ff9f8,
808 0x010, 0x0002000f,
809 0x011, 0x000203f9,
810 0x010, 0x0003000f,
811 0x011, 0x000ff500,
812 0x010, 0x00000000,
813 0x011, 0x00000000,
814 0x010, 0x0008000f,
815 0x011, 0x0003f100,
816 0x010, 0x0009000f,
817 0x011, 0x00023100,
818 0x012, 0x00032000,
819 0x012, 0x00071000,
820 0x012, 0x000b0000,
821 0x012, 0x000fc000,
822 0x013, 0x000287b3,
823 0x013, 0x000244b7,
824 0x013, 0x000204ab,
825 0x013, 0x0001c49f,
826 0x013, 0x00018493,
827 0x013, 0x0001429b,
828 0x013, 0x00010299,
829 0x013, 0x0000c29c,
830 0x013, 0x000081a0,
831 0x013, 0x000040ac,
832 0x013, 0x00000020,
833 0x014, 0x0001944c,
834 0x014, 0x00059444,
835 0x014, 0x0009944c,
836 0x014, 0x000d9444,
837 0x015, 0x0000f405,
838 0x015, 0x0004f405,
839 0x015, 0x0008f405,
840 0x015, 0x000cf405,
841 0x016, 0x000e0330,
842 0x016, 0x000a0330,
843 0x016, 0x00060330,
844 0x016, 0x00020330,
845 0x000, 0x00010159,
846 0x018, 0x0000f401,
847 0x0fe, 0x00000000,
848 0x0fe, 0x00000000,
849 0x01f, 0x00080003,
850 0x0fe, 0x00000000,
851 0x0fe, 0x00000000,
852 0x01e, 0x00044457,
853 0x01f, 0x00080000,
854 0x000, 0x00030159,
855};
856
857u32 RTL8192CU_RADIOB_1TARRAY[RTL8192CURADIOB_1TARRAYLENGTH] = {
858 0x0,
859};
860
861u32 RTL8192CUMAC_2T_ARRAY[RTL8192CUMAC_2T_ARRAYLENGTH] = {
862 0x420, 0x00000080,
863 0x423, 0x00000000,
864 0x430, 0x00000000,
865 0x431, 0x00000000,
866 0x432, 0x00000000,
867 0x433, 0x00000001,
868 0x434, 0x00000004,
869 0x435, 0x00000005,
870 0x436, 0x00000006,
871 0x437, 0x00000007,
872 0x438, 0x00000000,
873 0x439, 0x00000000,
874 0x43a, 0x00000000,
875 0x43b, 0x00000001,
876 0x43c, 0x00000004,
877 0x43d, 0x00000005,
878 0x43e, 0x00000006,
879 0x43f, 0x00000007,
880 0x440, 0x0000005d,
881 0x441, 0x00000001,
882 0x442, 0x00000000,
883 0x444, 0x00000015,
884 0x445, 0x000000f0,
885 0x446, 0x0000000f,
886 0x447, 0x00000000,
887 0x458, 0x00000041,
888 0x459, 0x000000a8,
889 0x45a, 0x00000072,
890 0x45b, 0x000000b9,
891 0x460, 0x00000066,
892 0x461, 0x00000066,
893 0x462, 0x00000008,
894 0x463, 0x00000003,
895 0x4c8, 0x000000ff,
896 0x4c9, 0x00000008,
897 0x4cc, 0x000000ff,
898 0x4cd, 0x000000ff,
899 0x4ce, 0x00000001,
900 0x500, 0x00000026,
901 0x501, 0x000000a2,
902 0x502, 0x0000002f,
903 0x503, 0x00000000,
904 0x504, 0x00000028,
905 0x505, 0x000000a3,
906 0x506, 0x0000005e,
907 0x507, 0x00000000,
908 0x508, 0x0000002b,
909 0x509, 0x000000a4,
910 0x50a, 0x0000005e,
911 0x50b, 0x00000000,
912 0x50c, 0x0000004f,
913 0x50d, 0x000000a4,
914 0x50e, 0x00000000,
915 0x50f, 0x00000000,
916 0x512, 0x0000001c,
917 0x514, 0x0000000a,
918 0x515, 0x00000010,
919 0x516, 0x0000000a,
920 0x517, 0x00000010,
921 0x51a, 0x00000016,
922 0x524, 0x0000000f,
923 0x525, 0x0000004f,
924 0x546, 0x00000040,
925 0x547, 0x00000000,
926 0x550, 0x00000010,
927 0x551, 0x00000010,
928 0x559, 0x00000002,
929 0x55a, 0x00000002,
930 0x55d, 0x000000ff,
931 0x605, 0x00000030,
932 0x608, 0x0000000e,
933 0x609, 0x0000002a,
934 0x652, 0x00000020,
935 0x63c, 0x0000000a,
936 0x63d, 0x0000000e,
937 0x63e, 0x0000000a,
938 0x63f, 0x0000000e,
939 0x66e, 0x00000005,
940 0x700, 0x00000021,
941 0x701, 0x00000043,
942 0x702, 0x00000065,
943 0x703, 0x00000087,
944 0x708, 0x00000021,
945 0x709, 0x00000043,
946 0x70a, 0x00000065,
947 0x70b, 0x00000087,
948};
949
950u32 RTL8192CUAGCTAB_2TARRAY[RTL8192CUAGCTAB_2TARRAYLENGTH] = {
951 0xc78, 0x7b000001,
952 0xc78, 0x7b010001,
953 0xc78, 0x7b020001,
954 0xc78, 0x7b030001,
955 0xc78, 0x7b040001,
956 0xc78, 0x7b050001,
957 0xc78, 0x7a060001,
958 0xc78, 0x79070001,
959 0xc78, 0x78080001,
960 0xc78, 0x77090001,
961 0xc78, 0x760a0001,
962 0xc78, 0x750b0001,
963 0xc78, 0x740c0001,
964 0xc78, 0x730d0001,
965 0xc78, 0x720e0001,
966 0xc78, 0x710f0001,
967 0xc78, 0x70100001,
968 0xc78, 0x6f110001,
969 0xc78, 0x6e120001,
970 0xc78, 0x6d130001,
971 0xc78, 0x6c140001,
972 0xc78, 0x6b150001,
973 0xc78, 0x6a160001,
974 0xc78, 0x69170001,
975 0xc78, 0x68180001,
976 0xc78, 0x67190001,
977 0xc78, 0x661a0001,
978 0xc78, 0x651b0001,
979 0xc78, 0x641c0001,
980 0xc78, 0x631d0001,
981 0xc78, 0x621e0001,
982 0xc78, 0x611f0001,
983 0xc78, 0x60200001,
984 0xc78, 0x49210001,
985 0xc78, 0x48220001,
986 0xc78, 0x47230001,
987 0xc78, 0x46240001,
988 0xc78, 0x45250001,
989 0xc78, 0x44260001,
990 0xc78, 0x43270001,
991 0xc78, 0x42280001,
992 0xc78, 0x41290001,
993 0xc78, 0x402a0001,
994 0xc78, 0x262b0001,
995 0xc78, 0x252c0001,
996 0xc78, 0x242d0001,
997 0xc78, 0x232e0001,
998 0xc78, 0x222f0001,
999 0xc78, 0x21300001,
1000 0xc78, 0x20310001,
1001 0xc78, 0x06320001,
1002 0xc78, 0x05330001,
1003 0xc78, 0x04340001,
1004 0xc78, 0x03350001,
1005 0xc78, 0x02360001,
1006 0xc78, 0x01370001,
1007 0xc78, 0x00380001,
1008 0xc78, 0x00390001,
1009 0xc78, 0x003a0001,
1010 0xc78, 0x003b0001,
1011 0xc78, 0x003c0001,
1012 0xc78, 0x003d0001,
1013 0xc78, 0x003e0001,
1014 0xc78, 0x003f0001,
1015 0xc78, 0x7b400001,
1016 0xc78, 0x7b410001,
1017 0xc78, 0x7b420001,
1018 0xc78, 0x7b430001,
1019 0xc78, 0x7b440001,
1020 0xc78, 0x7b450001,
1021 0xc78, 0x7a460001,
1022 0xc78, 0x79470001,
1023 0xc78, 0x78480001,
1024 0xc78, 0x77490001,
1025 0xc78, 0x764a0001,
1026 0xc78, 0x754b0001,
1027 0xc78, 0x744c0001,
1028 0xc78, 0x734d0001,
1029 0xc78, 0x724e0001,
1030 0xc78, 0x714f0001,
1031 0xc78, 0x70500001,
1032 0xc78, 0x6f510001,
1033 0xc78, 0x6e520001,
1034 0xc78, 0x6d530001,
1035 0xc78, 0x6c540001,
1036 0xc78, 0x6b550001,
1037 0xc78, 0x6a560001,
1038 0xc78, 0x69570001,
1039 0xc78, 0x68580001,
1040 0xc78, 0x67590001,
1041 0xc78, 0x665a0001,
1042 0xc78, 0x655b0001,
1043 0xc78, 0x645c0001,
1044 0xc78, 0x635d0001,
1045 0xc78, 0x625e0001,
1046 0xc78, 0x615f0001,
1047 0xc78, 0x60600001,
1048 0xc78, 0x49610001,
1049 0xc78, 0x48620001,
1050 0xc78, 0x47630001,
1051 0xc78, 0x46640001,
1052 0xc78, 0x45650001,
1053 0xc78, 0x44660001,
1054 0xc78, 0x43670001,
1055 0xc78, 0x42680001,
1056 0xc78, 0x41690001,
1057 0xc78, 0x406a0001,
1058 0xc78, 0x266b0001,
1059 0xc78, 0x256c0001,
1060 0xc78, 0x246d0001,
1061 0xc78, 0x236e0001,
1062 0xc78, 0x226f0001,
1063 0xc78, 0x21700001,
1064 0xc78, 0x20710001,
1065 0xc78, 0x06720001,
1066 0xc78, 0x05730001,
1067 0xc78, 0x04740001,
1068 0xc78, 0x03750001,
1069 0xc78, 0x02760001,
1070 0xc78, 0x01770001,
1071 0xc78, 0x00780001,
1072 0xc78, 0x00790001,
1073 0xc78, 0x007a0001,
1074 0xc78, 0x007b0001,
1075 0xc78, 0x007c0001,
1076 0xc78, 0x007d0001,
1077 0xc78, 0x007e0001,
1078 0xc78, 0x007f0001,
1079 0xc78, 0x3800001e,
1080 0xc78, 0x3801001e,
1081 0xc78, 0x3802001e,
1082 0xc78, 0x3803001e,
1083 0xc78, 0x3804001e,
1084 0xc78, 0x3805001e,
1085 0xc78, 0x3806001e,
1086 0xc78, 0x3807001e,
1087 0xc78, 0x3808001e,
1088 0xc78, 0x3c09001e,
1089 0xc78, 0x3e0a001e,
1090 0xc78, 0x400b001e,
1091 0xc78, 0x440c001e,
1092 0xc78, 0x480d001e,
1093 0xc78, 0x4c0e001e,
1094 0xc78, 0x500f001e,
1095 0xc78, 0x5210001e,
1096 0xc78, 0x5611001e,
1097 0xc78, 0x5a12001e,
1098 0xc78, 0x5e13001e,
1099 0xc78, 0x6014001e,
1100 0xc78, 0x6015001e,
1101 0xc78, 0x6016001e,
1102 0xc78, 0x6217001e,
1103 0xc78, 0x6218001e,
1104 0xc78, 0x6219001e,
1105 0xc78, 0x621a001e,
1106 0xc78, 0x621b001e,
1107 0xc78, 0x621c001e,
1108 0xc78, 0x621d001e,
1109 0xc78, 0x621e001e,
1110 0xc78, 0x621f001e,
1111};
1112
1113u32 RTL8192CUAGCTAB_1TARRAY[RTL8192CUAGCTAB_1TARRAYLENGTH] = {
1114 0xc78, 0x7b000001,
1115 0xc78, 0x7b010001,
1116 0xc78, 0x7b020001,
1117 0xc78, 0x7b030001,
1118 0xc78, 0x7b040001,
1119 0xc78, 0x7b050001,
1120 0xc78, 0x7a060001,
1121 0xc78, 0x79070001,
1122 0xc78, 0x78080001,
1123 0xc78, 0x77090001,
1124 0xc78, 0x760a0001,
1125 0xc78, 0x750b0001,
1126 0xc78, 0x740c0001,
1127 0xc78, 0x730d0001,
1128 0xc78, 0x720e0001,
1129 0xc78, 0x710f0001,
1130 0xc78, 0x70100001,
1131 0xc78, 0x6f110001,
1132 0xc78, 0x6e120001,
1133 0xc78, 0x6d130001,
1134 0xc78, 0x6c140001,
1135 0xc78, 0x6b150001,
1136 0xc78, 0x6a160001,
1137 0xc78, 0x69170001,
1138 0xc78, 0x68180001,
1139 0xc78, 0x67190001,
1140 0xc78, 0x661a0001,
1141 0xc78, 0x651b0001,
1142 0xc78, 0x641c0001,
1143 0xc78, 0x631d0001,
1144 0xc78, 0x621e0001,
1145 0xc78, 0x611f0001,
1146 0xc78, 0x60200001,
1147 0xc78, 0x49210001,
1148 0xc78, 0x48220001,
1149 0xc78, 0x47230001,
1150 0xc78, 0x46240001,
1151 0xc78, 0x45250001,
1152 0xc78, 0x44260001,
1153 0xc78, 0x43270001,
1154 0xc78, 0x42280001,
1155 0xc78, 0x41290001,
1156 0xc78, 0x402a0001,
1157 0xc78, 0x262b0001,
1158 0xc78, 0x252c0001,
1159 0xc78, 0x242d0001,
1160 0xc78, 0x232e0001,
1161 0xc78, 0x222f0001,
1162 0xc78, 0x21300001,
1163 0xc78, 0x20310001,
1164 0xc78, 0x06320001,
1165 0xc78, 0x05330001,
1166 0xc78, 0x04340001,
1167 0xc78, 0x03350001,
1168 0xc78, 0x02360001,
1169 0xc78, 0x01370001,
1170 0xc78, 0x00380001,
1171 0xc78, 0x00390001,
1172 0xc78, 0x003a0001,
1173 0xc78, 0x003b0001,
1174 0xc78, 0x003c0001,
1175 0xc78, 0x003d0001,
1176 0xc78, 0x003e0001,
1177 0xc78, 0x003f0001,
1178 0xc78, 0x7b400001,
1179 0xc78, 0x7b410001,
1180 0xc78, 0x7b420001,
1181 0xc78, 0x7b430001,
1182 0xc78, 0x7b440001,
1183 0xc78, 0x7b450001,
1184 0xc78, 0x7a460001,
1185 0xc78, 0x79470001,
1186 0xc78, 0x78480001,
1187 0xc78, 0x77490001,
1188 0xc78, 0x764a0001,
1189 0xc78, 0x754b0001,
1190 0xc78, 0x744c0001,
1191 0xc78, 0x734d0001,
1192 0xc78, 0x724e0001,
1193 0xc78, 0x714f0001,
1194 0xc78, 0x70500001,
1195 0xc78, 0x6f510001,
1196 0xc78, 0x6e520001,
1197 0xc78, 0x6d530001,
1198 0xc78, 0x6c540001,
1199 0xc78, 0x6b550001,
1200 0xc78, 0x6a560001,
1201 0xc78, 0x69570001,
1202 0xc78, 0x68580001,
1203 0xc78, 0x67590001,
1204 0xc78, 0x665a0001,
1205 0xc78, 0x655b0001,
1206 0xc78, 0x645c0001,
1207 0xc78, 0x635d0001,
1208 0xc78, 0x625e0001,
1209 0xc78, 0x615f0001,
1210 0xc78, 0x60600001,
1211 0xc78, 0x49610001,
1212 0xc78, 0x48620001,
1213 0xc78, 0x47630001,
1214 0xc78, 0x46640001,
1215 0xc78, 0x45650001,
1216 0xc78, 0x44660001,
1217 0xc78, 0x43670001,
1218 0xc78, 0x42680001,
1219 0xc78, 0x41690001,
1220 0xc78, 0x406a0001,
1221 0xc78, 0x266b0001,
1222 0xc78, 0x256c0001,
1223 0xc78, 0x246d0001,
1224 0xc78, 0x236e0001,
1225 0xc78, 0x226f0001,
1226 0xc78, 0x21700001,
1227 0xc78, 0x20710001,
1228 0xc78, 0x06720001,
1229 0xc78, 0x05730001,
1230 0xc78, 0x04740001,
1231 0xc78, 0x03750001,
1232 0xc78, 0x02760001,
1233 0xc78, 0x01770001,
1234 0xc78, 0x00780001,
1235 0xc78, 0x00790001,
1236 0xc78, 0x007a0001,
1237 0xc78, 0x007b0001,
1238 0xc78, 0x007c0001,
1239 0xc78, 0x007d0001,
1240 0xc78, 0x007e0001,
1241 0xc78, 0x007f0001,
1242 0xc78, 0x3800001e,
1243 0xc78, 0x3801001e,
1244 0xc78, 0x3802001e,
1245 0xc78, 0x3803001e,
1246 0xc78, 0x3804001e,
1247 0xc78, 0x3805001e,
1248 0xc78, 0x3806001e,
1249 0xc78, 0x3807001e,
1250 0xc78, 0x3808001e,
1251 0xc78, 0x3c09001e,
1252 0xc78, 0x3e0a001e,
1253 0xc78, 0x400b001e,
1254 0xc78, 0x440c001e,
1255 0xc78, 0x480d001e,
1256 0xc78, 0x4c0e001e,
1257 0xc78, 0x500f001e,
1258 0xc78, 0x5210001e,
1259 0xc78, 0x5611001e,
1260 0xc78, 0x5a12001e,
1261 0xc78, 0x5e13001e,
1262 0xc78, 0x6014001e,
1263 0xc78, 0x6015001e,
1264 0xc78, 0x6016001e,
1265 0xc78, 0x6217001e,
1266 0xc78, 0x6218001e,
1267 0xc78, 0x6219001e,
1268 0xc78, 0x621a001e,
1269 0xc78, 0x621b001e,
1270 0xc78, 0x621c001e,
1271 0xc78, 0x621d001e,
1272 0xc78, 0x621e001e,
1273 0xc78, 0x621f001e,
1274};
1275
1276u32 RTL8192CUPHY_REG_1T_HPArray[RTL8192CUPHY_REG_1T_HPArrayLength] = {
1277 0x024, 0x0011800f,
1278 0x028, 0x00ffdb83,
1279 0x040, 0x000c0004,
1280 0x800, 0x80040000,
1281 0x804, 0x00000001,
1282 0x808, 0x0000fc00,
1283 0x80c, 0x0000000a,
1284 0x810, 0x10005388,
1285 0x814, 0x020c3d10,
1286 0x818, 0x02200385,
1287 0x81c, 0x00000000,
1288 0x820, 0x01000100,
1289 0x824, 0x00390204,
1290 0x828, 0x00000000,
1291 0x82c, 0x00000000,
1292 0x830, 0x00000000,
1293 0x834, 0x00000000,
1294 0x838, 0x00000000,
1295 0x83c, 0x00000000,
1296 0x840, 0x00010000,
1297 0x844, 0x00000000,
1298 0x848, 0x00000000,
1299 0x84c, 0x00000000,
1300 0x850, 0x00000000,
1301 0x854, 0x00000000,
1302 0x858, 0x569a569a,
1303 0x85c, 0x001b25a4,
1304 0x860, 0x66e60230,
1305 0x864, 0x061f0130,
1306 0x868, 0x00000000,
1307 0x86c, 0x20202000,
1308 0x870, 0x03000300,
1309 0x874, 0x22004000,
1310 0x878, 0x00000808,
1311 0x87c, 0x00ffc3f1,
1312 0x880, 0xc0083070,
1313 0x884, 0x000004d5,
1314 0x888, 0x00000000,
1315 0x88c, 0xccc000c0,
1316 0x890, 0x00000800,
1317 0x894, 0xfffffffe,
1318 0x898, 0x40302010,
1319 0x89c, 0x00706050,
1320 0x900, 0x00000000,
1321 0x904, 0x00000023,
1322 0x908, 0x00000000,
1323 0x90c, 0x81121111,
1324 0xa00, 0x00d047c8,
1325 0xa04, 0x80ff000c,
1326 0xa08, 0x8c838300,
1327 0xa0c, 0x2e68120f,
1328 0xa10, 0x9500bb78,
1329 0xa14, 0x11144028,
1330 0xa18, 0x00881117,
1331 0xa1c, 0x89140f00,
1332 0xa20, 0x15160000,
1333 0xa24, 0x070b0f12,
1334 0xa28, 0x00000104,
1335 0xa2c, 0x00d30000,
1336 0xa70, 0x101fbf00,
1337 0xa74, 0x00000007,
1338 0xc00, 0x48071d40,
1339 0xc04, 0x03a05611,
1340 0xc08, 0x000000e4,
1341 0xc0c, 0x6c6c6c6c,
1342 0xc10, 0x08800000,
1343 0xc14, 0x40000100,
1344 0xc18, 0x08800000,
1345 0xc1c, 0x40000100,
1346 0xc20, 0x00000000,
1347 0xc24, 0x00000000,
1348 0xc28, 0x00000000,
1349 0xc2c, 0x00000000,
1350 0xc30, 0x69e9ac44,
1351 0xc34, 0x469652cf,
1352 0xc38, 0x49795994,
1353 0xc3c, 0x0a97971c,
1354 0xc40, 0x1f7c403f,
1355 0xc44, 0x000100b7,
1356 0xc48, 0xec020107,
1357 0xc4c, 0x007f037f,
1358 0xc50, 0x6954342e,
1359 0xc54, 0x43bc0094,
1360 0xc58, 0x6954342f,
1361 0xc5c, 0x433c0094,
1362 0xc60, 0x00000000,
1363 0xc64, 0x5116848b,
1364 0xc68, 0x47c00bff,
1365 0xc6c, 0x00000036,
1366 0xc70, 0x2c46000d,
1367 0xc74, 0x018610db,
1368 0xc78, 0x0000001f,
1369 0xc7c, 0x00b91612,
1370 0xc80, 0x24000090,
1371 0xc84, 0x20f60000,
1372 0xc88, 0x24000090,
1373 0xc8c, 0x20200000,
1374 0xc90, 0x00121820,
1375 0xc94, 0x00000000,
1376 0xc98, 0x00121820,
1377 0xc9c, 0x00007f7f,
1378 0xca0, 0x00000000,
1379 0xca4, 0x00000080,
1380 0xca8, 0x00000000,
1381 0xcac, 0x00000000,
1382 0xcb0, 0x00000000,
1383 0xcb4, 0x00000000,
1384 0xcb8, 0x00000000,
1385 0xcbc, 0x28000000,
1386 0xcc0, 0x00000000,
1387 0xcc4, 0x00000000,
1388 0xcc8, 0x00000000,
1389 0xccc, 0x00000000,
1390 0xcd0, 0x00000000,
1391 0xcd4, 0x00000000,
1392 0xcd8, 0x64b22427,
1393 0xcdc, 0x00766932,
1394 0xce0, 0x00222222,
1395 0xce4, 0x00000000,
1396 0xce8, 0x37644302,
1397 0xcec, 0x2f97d40c,
1398 0xd00, 0x00080740,
1399 0xd04, 0x00020401,
1400 0xd08, 0x0000907f,
1401 0xd0c, 0x20010201,
1402 0xd10, 0xa0633333,
1403 0xd14, 0x3333bc43,
1404 0xd18, 0x7a8f5b6b,
1405 0xd2c, 0xcc979975,
1406 0xd30, 0x00000000,
1407 0xd34, 0x80608000,
1408 0xd38, 0x00000000,
1409 0xd3c, 0x00027293,
1410 0xd40, 0x00000000,
1411 0xd44, 0x00000000,
1412 0xd48, 0x00000000,
1413 0xd4c, 0x00000000,
1414 0xd50, 0x6437140a,
1415 0xd54, 0x00000000,
1416 0xd58, 0x00000000,
1417 0xd5c, 0x30032064,
1418 0xd60, 0x4653de68,
1419 0xd64, 0x04518a3c,
1420 0xd68, 0x00002101,
1421 0xd6c, 0x2a201c16,
1422 0xd70, 0x1812362e,
1423 0xd74, 0x322c2220,
1424 0xd78, 0x000e3c24,
1425 0xe00, 0x24242424,
1426 0xe04, 0x24242424,
1427 0xe08, 0x03902024,
1428 0xe10, 0x24242424,
1429 0xe14, 0x24242424,
1430 0xe18, 0x24242424,
1431 0xe1c, 0x24242424,
1432 0xe28, 0x00000000,
1433 0xe30, 0x1000dc1f,
1434 0xe34, 0x10008c1f,
1435 0xe38, 0x02140102,
1436 0xe3c, 0x681604c2,
1437 0xe40, 0x01007c00,
1438 0xe44, 0x01004800,
1439 0xe48, 0xfb000000,
1440 0xe4c, 0x000028d1,
1441 0xe50, 0x1000dc1f,
1442 0xe54, 0x10008c1f,
1443 0xe58, 0x02140102,
1444 0xe5c, 0x28160d05,
1445 0xe60, 0x00000008,
1446 0xe68, 0x001b25a4,
1447 0xe6c, 0x631b25a0,
1448 0xe70, 0x631b25a0,
1449 0xe74, 0x081b25a0,
1450 0xe78, 0x081b25a0,
1451 0xe7c, 0x081b25a0,
1452 0xe80, 0x081b25a0,
1453 0xe84, 0x631b25a0,
1454 0xe88, 0x081b25a0,
1455 0xe8c, 0x631b25a0,
1456 0xed0, 0x631b25a0,
1457 0xed4, 0x631b25a0,
1458 0xed8, 0x631b25a0,
1459 0xedc, 0x001b25a0,
1460 0xee0, 0x001b25a0,
1461 0xeec, 0x6b1b25a0,
1462 0xee8, 0x31555448,
1463 0xf14, 0x00000003,
1464 0xf4c, 0x00000000,
1465 0xf00, 0x00000300,
1466};
1467
1468u32 RTL8192CUPHY_REG_Array_PG_HP[RTL8192CUPHY_REG_Array_PG_HPLength] = {
1469 0xe00, 0xffffffff, 0x06080808,
1470 0xe04, 0xffffffff, 0x00040406,
1471 0xe08, 0x0000ff00, 0x00000000,
1472 0x86c, 0xffffff00, 0x00000000,
1473 0xe10, 0xffffffff, 0x04060608,
1474 0xe14, 0xffffffff, 0x00020204,
1475 0xe18, 0xffffffff, 0x04060608,
1476 0xe1c, 0xffffffff, 0x00020204,
1477 0x830, 0xffffffff, 0x06080808,
1478 0x834, 0xffffffff, 0x00040406,
1479 0x838, 0xffffff00, 0x00000000,
1480 0x86c, 0x000000ff, 0x00000000,
1481 0x83c, 0xffffffff, 0x04060608,
1482 0x848, 0xffffffff, 0x00020204,
1483 0x84c, 0xffffffff, 0x04060608,
1484 0x868, 0xffffffff, 0x00020204,
1485 0xe00, 0xffffffff, 0x00000000,
1486 0xe04, 0xffffffff, 0x00000000,
1487 0xe08, 0x0000ff00, 0x00000000,
1488 0x86c, 0xffffff00, 0x00000000,
1489 0xe10, 0xffffffff, 0x00000000,
1490 0xe14, 0xffffffff, 0x00000000,
1491 0xe18, 0xffffffff, 0x00000000,
1492 0xe1c, 0xffffffff, 0x00000000,
1493 0x830, 0xffffffff, 0x00000000,
1494 0x834, 0xffffffff, 0x00000000,
1495 0x838, 0xffffff00, 0x00000000,
1496 0x86c, 0x000000ff, 0x00000000,
1497 0x83c, 0xffffffff, 0x00000000,
1498 0x848, 0xffffffff, 0x00000000,
1499 0x84c, 0xffffffff, 0x00000000,
1500 0x868, 0xffffffff, 0x00000000,
1501 0xe00, 0xffffffff, 0x00000000,
1502 0xe04, 0xffffffff, 0x00000000,
1503 0xe08, 0x0000ff00, 0x00000000,
1504 0x86c, 0xffffff00, 0x00000000,
1505 0xe10, 0xffffffff, 0x00000000,
1506 0xe14, 0xffffffff, 0x00000000,
1507 0xe18, 0xffffffff, 0x00000000,
1508 0xe1c, 0xffffffff, 0x00000000,
1509 0x830, 0xffffffff, 0x00000000,
1510 0x834, 0xffffffff, 0x00000000,
1511 0x838, 0xffffff00, 0x00000000,
1512 0x86c, 0x000000ff, 0x00000000,
1513 0x83c, 0xffffffff, 0x00000000,
1514 0x848, 0xffffffff, 0x00000000,
1515 0x84c, 0xffffffff, 0x00000000,
1516 0x868, 0xffffffff, 0x00000000,
1517 0xe00, 0xffffffff, 0x00000000,
1518 0xe04, 0xffffffff, 0x00000000,
1519 0xe08, 0x0000ff00, 0x00000000,
1520 0x86c, 0xffffff00, 0x00000000,
1521 0xe10, 0xffffffff, 0x00000000,
1522 0xe14, 0xffffffff, 0x00000000,
1523 0xe18, 0xffffffff, 0x00000000,
1524 0xe1c, 0xffffffff, 0x00000000,
1525 0x830, 0xffffffff, 0x00000000,
1526 0x834, 0xffffffff, 0x00000000,
1527 0x838, 0xffffff00, 0x00000000,
1528 0x86c, 0x000000ff, 0x00000000,
1529 0x83c, 0xffffffff, 0x00000000,
1530 0x848, 0xffffffff, 0x00000000,
1531 0x84c, 0xffffffff, 0x00000000,
1532 0x868, 0xffffffff, 0x00000000,
1533 0xe00, 0xffffffff, 0x00000000,
1534 0xe04, 0xffffffff, 0x00000000,
1535 0xe08, 0x0000ff00, 0x00000000,
1536 0x86c, 0xffffff00, 0x00000000,
1537 0xe10, 0xffffffff, 0x00000000,
1538 0xe14, 0xffffffff, 0x00000000,
1539 0xe18, 0xffffffff, 0x00000000,
1540 0xe1c, 0xffffffff, 0x00000000,
1541 0x830, 0xffffffff, 0x00000000,
1542 0x834, 0xffffffff, 0x00000000,
1543 0x838, 0xffffff00, 0x00000000,
1544 0x86c, 0x000000ff, 0x00000000,
1545 0x83c, 0xffffffff, 0x00000000,
1546 0x848, 0xffffffff, 0x00000000,
1547 0x84c, 0xffffffff, 0x00000000,
1548 0x868, 0xffffffff, 0x00000000,
1549 0xe00, 0xffffffff, 0x00000000,
1550 0xe04, 0xffffffff, 0x00000000,
1551 0xe08, 0x0000ff00, 0x00000000,
1552 0x86c, 0xffffff00, 0x00000000,
1553 0xe10, 0xffffffff, 0x00000000,
1554 0xe14, 0xffffffff, 0x00000000,
1555 0xe18, 0xffffffff, 0x00000000,
1556 0xe1c, 0xffffffff, 0x00000000,
1557 0x830, 0xffffffff, 0x00000000,
1558 0x834, 0xffffffff, 0x00000000,
1559 0x838, 0xffffff00, 0x00000000,
1560 0x86c, 0x000000ff, 0x00000000,
1561 0x83c, 0xffffffff, 0x00000000,
1562 0x848, 0xffffffff, 0x00000000,
1563 0x84c, 0xffffffff, 0x00000000,
1564 0x868, 0xffffffff, 0x00000000,
1565 0xe00, 0xffffffff, 0x00000000,
1566 0xe04, 0xffffffff, 0x00000000,
1567 0xe08, 0x0000ff00, 0x00000000,
1568 0x86c, 0xffffff00, 0x00000000,
1569 0xe10, 0xffffffff, 0x00000000,
1570 0xe14, 0xffffffff, 0x00000000,
1571 0xe18, 0xffffffff, 0x00000000,
1572 0xe1c, 0xffffffff, 0x00000000,
1573 0x830, 0xffffffff, 0x00000000,
1574 0x834, 0xffffffff, 0x00000000,
1575 0x838, 0xffffff00, 0x00000000,
1576 0x86c, 0x000000ff, 0x00000000,
1577 0x83c, 0xffffffff, 0x00000000,
1578 0x848, 0xffffffff, 0x00000000,
1579 0x84c, 0xffffffff, 0x00000000,
1580 0x868, 0xffffffff, 0x00000000,
1581};
1582
1583u32 RTL8192CURadioA_1T_HPArray[RTL8192CURadioA_1T_HPArrayLength] = {
1584 0x000, 0x00030159,
1585 0x001, 0x00031284,
1586 0x002, 0x00098000,
1587 0x003, 0x00018c63,
1588 0x004, 0x000210e7,
1589 0x009, 0x0002044f,
1590 0x00a, 0x0001adb0,
1591 0x00b, 0x00054867,
1592 0x00c, 0x0008992e,
1593 0x00d, 0x0000e529,
1594 0x00e, 0x00039ce7,
1595 0x00f, 0x00000451,
1596 0x019, 0x00000000,
1597 0x01a, 0x00000255,
1598 0x01b, 0x00060a00,
1599 0x01c, 0x000fc378,
1600 0x01d, 0x000a1250,
1601 0x01e, 0x0004445f,
1602 0x01f, 0x00080001,
1603 0x020, 0x0000b614,
1604 0x021, 0x0006c000,
1605 0x022, 0x0000083c,
1606 0x023, 0x00001558,
1607 0x024, 0x00000060,
1608 0x025, 0x00000483,
1609 0x026, 0x0004f000,
1610 0x027, 0x000ec7d9,
1611 0x028, 0x000977c0,
1612 0x029, 0x00004783,
1613 0x02a, 0x00000001,
1614 0x02b, 0x00021334,
1615 0x02a, 0x00000000,
1616 0x02b, 0x00000054,
1617 0x02a, 0x00000001,
1618 0x02b, 0x00000808,
1619 0x02b, 0x00053333,
1620 0x02c, 0x0000000c,
1621 0x02a, 0x00000002,
1622 0x02b, 0x00000808,
1623 0x02b, 0x0005b333,
1624 0x02c, 0x0000000d,
1625 0x02a, 0x00000003,
1626 0x02b, 0x00000808,
1627 0x02b, 0x00063333,
1628 0x02c, 0x0000000d,
1629 0x02a, 0x00000004,
1630 0x02b, 0x00000808,
1631 0x02b, 0x0006b333,
1632 0x02c, 0x0000000d,
1633 0x02a, 0x00000005,
1634 0x02b, 0x00000808,
1635 0x02b, 0x00073333,
1636 0x02c, 0x0000000d,
1637 0x02a, 0x00000006,
1638 0x02b, 0x00000709,
1639 0x02b, 0x0005b333,
1640 0x02c, 0x0000000d,
1641 0x02a, 0x00000007,
1642 0x02b, 0x00000709,
1643 0x02b, 0x00063333,
1644 0x02c, 0x0000000d,
1645 0x02a, 0x00000008,
1646 0x02b, 0x0000060a,
1647 0x02b, 0x0004b333,
1648 0x02c, 0x0000000d,
1649 0x02a, 0x00000009,
1650 0x02b, 0x0000060a,
1651 0x02b, 0x00053333,
1652 0x02c, 0x0000000d,
1653 0x02a, 0x0000000a,
1654 0x02b, 0x0000060a,
1655 0x02b, 0x0005b333,
1656 0x02c, 0x0000000d,
1657 0x02a, 0x0000000b,
1658 0x02b, 0x0000060a,
1659 0x02b, 0x00063333,
1660 0x02c, 0x0000000d,
1661 0x02a, 0x0000000c,
1662 0x02b, 0x0000060a,
1663 0x02b, 0x0006b333,
1664 0x02c, 0x0000000d,
1665 0x02a, 0x0000000d,
1666 0x02b, 0x0000060a,
1667 0x02b, 0x00073333,
1668 0x02c, 0x0000000d,
1669 0x02a, 0x0000000e,
1670 0x02b, 0x0000050b,
1671 0x02b, 0x00066666,
1672 0x02c, 0x0000001a,
1673 0x02a, 0x000e0000,
1674 0x010, 0x0004000f,
1675 0x011, 0x000e31fc,
1676 0x010, 0x0006000f,
1677 0x011, 0x000ff9f8,
1678 0x010, 0x0002000f,
1679 0x011, 0x000203f9,
1680 0x010, 0x0003000f,
1681 0x011, 0x000ff500,
1682 0x010, 0x00000000,
1683 0x011, 0x00000000,
1684 0x010, 0x0008000f,
1685 0x011, 0x0003f100,
1686 0x010, 0x0009000f,
1687 0x011, 0x00023100,
1688 0x012, 0x000d8000,
1689 0x012, 0x00090000,
1690 0x012, 0x00051000,
1691 0x012, 0x00012000,
1692 0x013, 0x00028fb4,
1693 0x013, 0x00024fa8,
1694 0x013, 0x000207a4,
1695 0x013, 0x0001c798,
1696 0x013, 0x000183a4,
1697 0x013, 0x00014398,
1698 0x013, 0x000101a4,
1699 0x013, 0x0000c198,
1700 0x013, 0x000080a4,
1701 0x013, 0x00004098,
1702 0x013, 0x00000000,
1703 0x014, 0x0001944c,
1704 0x014, 0x00059444,
1705 0x014, 0x0009944c,
1706 0x014, 0x000d9444,
1707 0x015, 0x0000f405,
1708 0x015, 0x0004f405,
1709 0x015, 0x0008f405,
1710 0x015, 0x000cf405,
1711 0x016, 0x000e0330,
1712 0x016, 0x000a0330,
1713 0x016, 0x00060330,
1714 0x016, 0x00020330,
1715 0x000, 0x00010159,
1716 0x018, 0x0000f401,
1717 0x0fe, 0x00000000,
1718 0x0fe, 0x00000000,
1719 0x01f, 0x00080003,
1720 0x0fe, 0x00000000,
1721 0x0fe, 0x00000000,
1722 0x01e, 0x00044457,
1723 0x01f, 0x00080000,
1724 0x000, 0x00030159,
1725};
1726
1727u32 Rtl8192CUAGCTAB_1T_HPArray[RTL8192CUAGCTAB_1T_HPArrayLength] = {
1728 0xc78, 0x7b000001,
1729 0xc78, 0x7b010001,
1730 0xc78, 0x7b020001,
1731 0xc78, 0x7b030001,
1732 0xc78, 0x7b040001,
1733 0xc78, 0x7b050001,
1734 0xc78, 0x7b060001,
1735 0xc78, 0x7b070001,
1736 0xc78, 0x7b080001,
1737 0xc78, 0x7a090001,
1738 0xc78, 0x790a0001,
1739 0xc78, 0x780b0001,
1740 0xc78, 0x770c0001,
1741 0xc78, 0x760d0001,
1742 0xc78, 0x750e0001,
1743 0xc78, 0x740f0001,
1744 0xc78, 0x73100001,
1745 0xc78, 0x72110001,
1746 0xc78, 0x71120001,
1747 0xc78, 0x70130001,
1748 0xc78, 0x6f140001,
1749 0xc78, 0x6e150001,
1750 0xc78, 0x6d160001,
1751 0xc78, 0x6c170001,
1752 0xc78, 0x6b180001,
1753 0xc78, 0x6a190001,
1754 0xc78, 0x691a0001,
1755 0xc78, 0x681b0001,
1756 0xc78, 0x671c0001,
1757 0xc78, 0x661d0001,
1758 0xc78, 0x651e0001,
1759 0xc78, 0x641f0001,
1760 0xc78, 0x63200001,
1761 0xc78, 0x62210001,
1762 0xc78, 0x61220001,
1763 0xc78, 0x60230001,
1764 0xc78, 0x46240001,
1765 0xc78, 0x45250001,
1766 0xc78, 0x44260001,
1767 0xc78, 0x43270001,
1768 0xc78, 0x42280001,
1769 0xc78, 0x41290001,
1770 0xc78, 0x402a0001,
1771 0xc78, 0x262b0001,
1772 0xc78, 0x252c0001,
1773 0xc78, 0x242d0001,
1774 0xc78, 0x232e0001,
1775 0xc78, 0x222f0001,
1776 0xc78, 0x21300001,
1777 0xc78, 0x20310001,
1778 0xc78, 0x06320001,
1779 0xc78, 0x05330001,
1780 0xc78, 0x04340001,
1781 0xc78, 0x03350001,
1782 0xc78, 0x02360001,
1783 0xc78, 0x01370001,
1784 0xc78, 0x00380001,
1785 0xc78, 0x00390001,
1786 0xc78, 0x003a0001,
1787 0xc78, 0x003b0001,
1788 0xc78, 0x003c0001,
1789 0xc78, 0x003d0001,
1790 0xc78, 0x003e0001,
1791 0xc78, 0x003f0001,
1792 0xc78, 0x7b400001,
1793 0xc78, 0x7b410001,
1794 0xc78, 0x7b420001,
1795 0xc78, 0x7b430001,
1796 0xc78, 0x7b440001,
1797 0xc78, 0x7b450001,
1798 0xc78, 0x7b460001,
1799 0xc78, 0x7b470001,
1800 0xc78, 0x7b480001,
1801 0xc78, 0x7a490001,
1802 0xc78, 0x794a0001,
1803 0xc78, 0x784b0001,
1804 0xc78, 0x774c0001,
1805 0xc78, 0x764d0001,
1806 0xc78, 0x754e0001,
1807 0xc78, 0x744f0001,
1808 0xc78, 0x73500001,
1809 0xc78, 0x72510001,
1810 0xc78, 0x71520001,
1811 0xc78, 0x70530001,
1812 0xc78, 0x6f540001,
1813 0xc78, 0x6e550001,
1814 0xc78, 0x6d560001,
1815 0xc78, 0x6c570001,
1816 0xc78, 0x6b580001,
1817 0xc78, 0x6a590001,
1818 0xc78, 0x695a0001,
1819 0xc78, 0x685b0001,
1820 0xc78, 0x675c0001,
1821 0xc78, 0x665d0001,
1822 0xc78, 0x655e0001,
1823 0xc78, 0x645f0001,
1824 0xc78, 0x63600001,
1825 0xc78, 0x62610001,
1826 0xc78, 0x61620001,
1827 0xc78, 0x60630001,
1828 0xc78, 0x46640001,
1829 0xc78, 0x45650001,
1830 0xc78, 0x44660001,
1831 0xc78, 0x43670001,
1832 0xc78, 0x42680001,
1833 0xc78, 0x41690001,
1834 0xc78, 0x406a0001,
1835 0xc78, 0x266b0001,
1836 0xc78, 0x256c0001,
1837 0xc78, 0x246d0001,
1838 0xc78, 0x236e0001,
1839 0xc78, 0x226f0001,
1840 0xc78, 0x21700001,
1841 0xc78, 0x20710001,
1842 0xc78, 0x06720001,
1843 0xc78, 0x05730001,
1844 0xc78, 0x04740001,
1845 0xc78, 0x03750001,
1846 0xc78, 0x02760001,
1847 0xc78, 0x01770001,
1848 0xc78, 0x00780001,
1849 0xc78, 0x00790001,
1850 0xc78, 0x007a0001,
1851 0xc78, 0x007b0001,
1852 0xc78, 0x007c0001,
1853 0xc78, 0x007d0001,
1854 0xc78, 0x007e0001,
1855 0xc78, 0x007f0001,
1856 0xc78, 0x3800001e,
1857 0xc78, 0x3801001e,
1858 0xc78, 0x3802001e,
1859 0xc78, 0x3803001e,
1860 0xc78, 0x3804001e,
1861 0xc78, 0x3805001e,
1862 0xc78, 0x3806001e,
1863 0xc78, 0x3807001e,
1864 0xc78, 0x3808001e,
1865 0xc78, 0x3c09001e,
1866 0xc78, 0x3e0a001e,
1867 0xc78, 0x400b001e,
1868 0xc78, 0x440c001e,
1869 0xc78, 0x480d001e,
1870 0xc78, 0x4c0e001e,
1871 0xc78, 0x500f001e,
1872 0xc78, 0x5210001e,
1873 0xc78, 0x5611001e,
1874 0xc78, 0x5a12001e,
1875 0xc78, 0x5e13001e,
1876 0xc78, 0x6014001e,
1877 0xc78, 0x6015001e,
1878 0xc78, 0x6016001e,
1879 0xc78, 0x6217001e,
1880 0xc78, 0x6218001e,
1881 0xc78, 0x6219001e,
1882 0xc78, 0x621a001e,
1883 0xc78, 0x621b001e,
1884 0xc78, 0x621c001e,
1885 0xc78, 0x621d001e,
1886 0xc78, 0x621e001e,
1887 0xc78, 0x621f001e,
1888};
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/table.h b/drivers/net/wireless/rtlwifi/rtl8192cu/table.h
new file mode 100644
index 000000000000..c3d5cd826cfa
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/table.h
@@ -0,0 +1,71 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92CU_TABLE__H_
31#define __RTL92CU_TABLE__H_
32
33#include <linux/types.h>
34
35#define RTL8192CUPHY_REG_2TARRAY_LENGTH 374
36extern u32 RTL8192CUPHY_REG_2TARRAY[RTL8192CUPHY_REG_2TARRAY_LENGTH];
37#define RTL8192CUPHY_REG_1TARRAY_LENGTH 374
38extern u32 RTL8192CUPHY_REG_1TARRAY[RTL8192CUPHY_REG_1TARRAY_LENGTH];
39
40#define RTL8192CUPHY_REG_ARRAY_PGLENGTH 336
41extern u32 RTL8192CUPHY_REG_ARRAY_PG[RTL8192CUPHY_REG_ARRAY_PGLENGTH];
42
43#define RTL8192CURADIOA_2TARRAYLENGTH 282
44extern u32 RTL8192CURADIOA_2TARRAY[RTL8192CURADIOA_2TARRAYLENGTH];
45#define RTL8192CURADIOB_2TARRAYLENGTH 78
46extern u32 RTL8192CU_RADIOB_2TARRAY[RTL8192CURADIOB_2TARRAYLENGTH];
47#define RTL8192CURADIOA_1TARRAYLENGTH 282
48extern u32 RTL8192CU_RADIOA_1TARRAY[RTL8192CURADIOA_1TARRAYLENGTH];
49#define RTL8192CURADIOB_1TARRAYLENGTH 1
50extern u32 RTL8192CU_RADIOB_1TARRAY[RTL8192CURADIOB_1TARRAYLENGTH];
51
52#define RTL8192CUMAC_2T_ARRAYLENGTH 172
53extern u32 RTL8192CUMAC_2T_ARRAY[RTL8192CUMAC_2T_ARRAYLENGTH];
54
55#define RTL8192CUAGCTAB_2TARRAYLENGTH 320
56extern u32 RTL8192CUAGCTAB_2TARRAY[RTL8192CUAGCTAB_2TARRAYLENGTH];
57#define RTL8192CUAGCTAB_1TARRAYLENGTH 320
58extern u32 RTL8192CUAGCTAB_1TARRAY[RTL8192CUAGCTAB_1TARRAYLENGTH];
59
60#define RTL8192CUPHY_REG_1T_HPArrayLength 378
61extern u32 RTL8192CUPHY_REG_1T_HPArray[RTL8192CUPHY_REG_1T_HPArrayLength];
62
63#define RTL8192CUPHY_REG_Array_PG_HPLength 336
64extern u32 RTL8192CUPHY_REG_Array_PG_HP[RTL8192CUPHY_REG_Array_PG_HPLength];
65
66#define RTL8192CURadioA_1T_HPArrayLength 282
67extern u32 RTL8192CURadioA_1T_HPArray[RTL8192CURadioA_1T_HPArrayLength];
68#define RTL8192CUAGCTAB_1T_HPArrayLength 320
69extern u32 Rtl8192CUAGCTAB_1T_HPArray[RTL8192CUAGCTAB_1T_HPArrayLength];
70
71#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
new file mode 100644
index 000000000000..d0b0d43b9a6d
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -0,0 +1,687 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../usb.h"
32#include "../ps.h"
33#include "../base.h"
34#include "reg.h"
35#include "def.h"
36#include "phy.h"
37#include "rf.h"
38#include "dm.h"
39#include "mac.h"
40#include "trx.h"
41
42static int _ConfigVerTOutEP(struct ieee80211_hw *hw)
43{
44 u8 ep_cfg, txqsele;
45 u8 ep_nums = 0;
46
47 struct rtl_priv *rtlpriv = rtl_priv(hw);
48 struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
49 struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
50
51 rtlusb->out_queue_sel = 0;
52 ep_cfg = rtl_read_byte(rtlpriv, REG_TEST_SIE_OPTIONAL);
53 ep_cfg = (ep_cfg & USB_TEST_EP_MASK) >> USB_TEST_EP_SHIFT;
54 switch (ep_cfg) {
55 case 0: /* 2 bulk OUT, 1 bulk IN */
56 case 3:
57 rtlusb->out_queue_sel = TX_SELE_HQ | TX_SELE_LQ;
58 ep_nums = 2;
59 break;
60 case 1: /* 1 bulk IN/OUT => map all endpoint to Low queue */
61 case 2: /* 1 bulk IN, 1 bulk OUT => map all endpoint to High queue */
62 txqsele = rtl_read_byte(rtlpriv, REG_TEST_USB_TXQS);
63 if (txqsele & 0x0F) /* /map all endpoint to High queue */
64 rtlusb->out_queue_sel = TX_SELE_HQ;
65 else if (txqsele&0xF0) /* map all endpoint to Low queue */
66 rtlusb->out_queue_sel = TX_SELE_LQ;
67 ep_nums = 1;
68 break;
69 default:
70 break;
71 }
72 return (rtlusb->out_ep_nums == ep_nums) ? 0 : -EINVAL;
73}
74
75static int _ConfigVerNOutEP(struct ieee80211_hw *hw)
76{
77 u8 ep_cfg;
78 u8 ep_nums = 0;
79
80 struct rtl_priv *rtlpriv = rtl_priv(hw);
81 struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
82 struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
83
84 rtlusb->out_queue_sel = 0;
85 /* Normal and High queue */
86 ep_cfg = rtl_read_byte(rtlpriv, (REG_NORMAL_SIE_EP + 1));
87 if (ep_cfg & USB_NORMAL_SIE_EP_MASK) {
88 rtlusb->out_queue_sel |= TX_SELE_HQ;
89 ep_nums++;
90 }
91 if ((ep_cfg >> USB_NORMAL_SIE_EP_SHIFT) & USB_NORMAL_SIE_EP_MASK) {
92 rtlusb->out_queue_sel |= TX_SELE_NQ;
93 ep_nums++;
94 }
95 /* Low queue */
96 ep_cfg = rtl_read_byte(rtlpriv, (REG_NORMAL_SIE_EP + 2));
97 if (ep_cfg & USB_NORMAL_SIE_EP_MASK) {
98 rtlusb->out_queue_sel |= TX_SELE_LQ;
99 ep_nums++;
100 }
101 return (rtlusb->out_ep_nums == ep_nums) ? 0 : -EINVAL;
102}
103
104static void _TwoOutEpMapping(struct ieee80211_hw *hw, bool bIsChipB,
105 bool bwificfg, struct rtl_ep_map *ep_map)
106{
107 struct rtl_priv *rtlpriv = rtl_priv(hw);
108
109 if (bwificfg) { /* for WMM */
110 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
111 ("USB Chip-B & WMM Setting.....\n"));
112 ep_map->ep_mapping[RTL_TXQ_BE] = 2;
113 ep_map->ep_mapping[RTL_TXQ_BK] = 3;
114 ep_map->ep_mapping[RTL_TXQ_VI] = 3;
115 ep_map->ep_mapping[RTL_TXQ_VO] = 2;
116 ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
117 ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
118 ep_map->ep_mapping[RTL_TXQ_HI] = 2;
119 } else { /* typical setting */
120 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
121 ("USB typical Setting.....\n"));
122 ep_map->ep_mapping[RTL_TXQ_BE] = 3;
123 ep_map->ep_mapping[RTL_TXQ_BK] = 3;
124 ep_map->ep_mapping[RTL_TXQ_VI] = 2;
125 ep_map->ep_mapping[RTL_TXQ_VO] = 2;
126 ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
127 ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
128 ep_map->ep_mapping[RTL_TXQ_HI] = 2;
129 }
130}
131
132static void _ThreeOutEpMapping(struct ieee80211_hw *hw, bool bwificfg,
133 struct rtl_ep_map *ep_map)
134{
135 struct rtl_priv *rtlpriv = rtl_priv(hw);
136 if (bwificfg) { /* for WMM */
137 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
138 ("USB 3EP Setting for WMM.....\n"));
139 ep_map->ep_mapping[RTL_TXQ_BE] = 5;
140 ep_map->ep_mapping[RTL_TXQ_BK] = 3;
141 ep_map->ep_mapping[RTL_TXQ_VI] = 3;
142 ep_map->ep_mapping[RTL_TXQ_VO] = 2;
143 ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
144 ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
145 ep_map->ep_mapping[RTL_TXQ_HI] = 2;
146 } else { /* typical setting */
147 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
148 ("USB 3EP Setting for typical.....\n"));
149 ep_map->ep_mapping[RTL_TXQ_BE] = 5;
150 ep_map->ep_mapping[RTL_TXQ_BK] = 5;
151 ep_map->ep_mapping[RTL_TXQ_VI] = 3;
152 ep_map->ep_mapping[RTL_TXQ_VO] = 2;
153 ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
154 ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
155 ep_map->ep_mapping[RTL_TXQ_HI] = 2;
156 }
157}
158
159static void _OneOutEpMapping(struct ieee80211_hw *hw, struct rtl_ep_map *ep_map)
160{
161 ep_map->ep_mapping[RTL_TXQ_BE] = 2;
162 ep_map->ep_mapping[RTL_TXQ_BK] = 2;
163 ep_map->ep_mapping[RTL_TXQ_VI] = 2;
164 ep_map->ep_mapping[RTL_TXQ_VO] = 2;
165 ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
166 ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
167 ep_map->ep_mapping[RTL_TXQ_HI] = 2;
168}
169static int _out_ep_mapping(struct ieee80211_hw *hw)
170{
171 int err = 0;
172 bool bIsChipN, bwificfg = false;
173 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
174 struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
175 struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
176 struct rtl_ep_map *ep_map = &(rtlusb->ep_map);
177
178 bIsChipN = IS_NORMAL_CHIP(rtlhal->version);
179 switch (rtlusb->out_ep_nums) {
180 case 2:
181 _TwoOutEpMapping(hw, bIsChipN, bwificfg, ep_map);
182 break;
183 case 3:
184 /* Test chip doesn't support three out EPs. */
185 if (!bIsChipN) {
186 err = -EINVAL;
187 goto err_out;
188 }
189 _ThreeOutEpMapping(hw, bIsChipN, ep_map);
190 break;
191 case 1:
192 _OneOutEpMapping(hw, ep_map);
193 break;
194 default:
195 err = -EINVAL;
196 break;
197 }
198err_out:
199 return err;
200
201}
202/* endpoint mapping */
203int rtl8192cu_endpoint_mapping(struct ieee80211_hw *hw)
204{
205 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
206 int error = 0;
207 if (likely(IS_NORMAL_CHIP(rtlhal->version)))
208 error = _ConfigVerNOutEP(hw);
209 else
210 error = _ConfigVerTOutEP(hw);
211 if (error)
212 goto err_out;
213 error = _out_ep_mapping(hw);
214 if (error)
215 goto err_out;
216err_out:
217 return error;
218}
219
220u16 rtl8192cu_mq_to_hwq(__le16 fc, u16 mac80211_queue_index)
221{
222 u16 hw_queue_index;
223
224 if (unlikely(ieee80211_is_beacon(fc))) {
225 hw_queue_index = RTL_TXQ_BCN;
226 goto out;
227 }
228 if (ieee80211_is_mgmt(fc)) {
229 hw_queue_index = RTL_TXQ_MGT;
230 goto out;
231 }
232 switch (mac80211_queue_index) {
233 case 0:
234 hw_queue_index = RTL_TXQ_VO;
235 break;
236 case 1:
237 hw_queue_index = RTL_TXQ_VI;
238 break;
239 case 2:
240 hw_queue_index = RTL_TXQ_BE;
241 break;
242 case 3:
243 hw_queue_index = RTL_TXQ_BK;
244 break;
245 default:
246 hw_queue_index = RTL_TXQ_BE;
247 RT_ASSERT(false, ("QSLT_BE queue, skb_queue:%d\n",
248 mac80211_queue_index));
249 break;
250 }
251out:
252 return hw_queue_index;
253}
254
255static enum rtl_desc_qsel _rtl8192cu_mq_to_descq(struct ieee80211_hw *hw,
256 __le16 fc, u16 mac80211_queue_index)
257{
258 enum rtl_desc_qsel qsel;
259 struct rtl_priv *rtlpriv = rtl_priv(hw);
260
261 if (unlikely(ieee80211_is_beacon(fc))) {
262 qsel = QSLT_BEACON;
263 goto out;
264 }
265 if (ieee80211_is_mgmt(fc)) {
266 qsel = QSLT_MGNT;
267 goto out;
268 }
269 switch (mac80211_queue_index) {
270 case 0: /* VO */
271 qsel = QSLT_VO;
272 RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
273 ("VO queue, set qsel = 0x%x\n", QSLT_VO));
274 break;
275 case 1: /* VI */
276 qsel = QSLT_VI;
277 RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
278 ("VI queue, set qsel = 0x%x\n", QSLT_VI));
279 break;
280 case 3: /* BK */
281 qsel = QSLT_BK;
282 RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
283 ("BK queue, set qsel = 0x%x\n", QSLT_BK));
284 break;
285 case 2: /* BE */
286 default:
287 qsel = QSLT_BE;
288 RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
289 ("BE queue, set qsel = 0x%x\n", QSLT_BE));
290 break;
291 }
292out:
293 return qsel;
294}
295
296/* =============================================================== */
297
298/*----------------------------------------------------------------------
299 *
300 * Rx handler
301 *
302 *---------------------------------------------------------------------- */
303bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
304 struct rtl_stats *stats,
305 struct ieee80211_rx_status *rx_status,
306 u8 *p_desc, struct sk_buff *skb)
307{
308 struct rx_fwinfo_92c *p_drvinfo;
309 struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc;
310 u32 phystatus = GET_RX_DESC_PHY_STATUS(pdesc);
311
312 stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
313 stats->rx_drvinfo_size = (u8)GET_RX_DESC_DRVINFO_SIZE(pdesc) *
314 RX_DRV_INFO_SIZE_UNIT;
315 stats->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03);
316 stats->icv = (u16) GET_RX_DESC_ICV(pdesc);
317 stats->crc = (u16) GET_RX_DESC_CRC32(pdesc);
318 stats->hwerror = (stats->crc | stats->icv);
319 stats->decrypted = !GET_RX_DESC_SWDEC(pdesc);
320 stats->rate = (u8) GET_RX_DESC_RX_MCS(pdesc);
321 stats->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
322 stats->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
323 stats->isampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1)
324 && (GET_RX_DESC_FAGGR(pdesc) == 1));
325 stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
326 stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
327 rx_status->freq = hw->conf.channel->center_freq;
328 rx_status->band = hw->conf.channel->band;
329 if (GET_RX_DESC_CRC32(pdesc))
330 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
331 if (!GET_RX_DESC_SWDEC(pdesc))
332 rx_status->flag |= RX_FLAG_DECRYPTED;
333 if (GET_RX_DESC_BW(pdesc))
334 rx_status->flag |= RX_FLAG_40MHZ;
335 if (GET_RX_DESC_RX_HT(pdesc))
336 rx_status->flag |= RX_FLAG_HT;
337 rx_status->flag |= RX_FLAG_MACTIME_MPDU;
338 if (stats->decrypted)
339 rx_status->flag |= RX_FLAG_DECRYPTED;
340 rx_status->rate_idx = _rtl92c_rate_mapping(hw,
341 (bool)GET_RX_DESC_RX_HT(pdesc),
342 (u8)GET_RX_DESC_RX_MCS(pdesc),
343 (bool)GET_RX_DESC_PAGGR(pdesc));
344 rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
345 if (phystatus == true) {
346 p_drvinfo = (struct rx_fwinfo_92c *)(pdesc + RTL_RX_DESC_SIZE);
347 rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc,
348 p_drvinfo);
349 }
350 /*rx_status->qual = stats->signal; */
351 rx_status->signal = stats->rssi + 10;
352 /*rx_status->noise = -stats->noise; */
353 return true;
354}
355
356#define RTL_RX_DRV_INFO_UNIT 8
357
358static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
359{
360 struct ieee80211_rx_status *rx_status =
361 (struct ieee80211_rx_status *)IEEE80211_SKB_RXCB(skb);
362 u32 skb_len, pkt_len, drvinfo_len;
363 struct rtl_priv *rtlpriv = rtl_priv(hw);
364 u8 *rxdesc;
365 struct rtl_stats stats = {
366 .signal = 0,
367 .noise = -98,
368 .rate = 0,
369 };
370 struct rx_fwinfo_92c *p_drvinfo;
371 bool bv;
372 __le16 fc;
373 struct ieee80211_hdr *hdr;
374
375 memset(rx_status, 0, sizeof(rx_status));
376 rxdesc = skb->data;
377 skb_len = skb->len;
378 drvinfo_len = (GET_RX_DESC_DRVINFO_SIZE(rxdesc) * RTL_RX_DRV_INFO_UNIT);
379 pkt_len = GET_RX_DESC_PKT_LEN(rxdesc);
380 /* TODO: Error recovery. drop this skb or something. */
381 WARN_ON(skb_len < (pkt_len + RTL_RX_DESC_SIZE + drvinfo_len));
382 stats.length = (u16) GET_RX_DESC_PKT_LEN(rxdesc);
383 stats.rx_drvinfo_size = (u8)GET_RX_DESC_DRVINFO_SIZE(rxdesc) *
384 RX_DRV_INFO_SIZE_UNIT;
385 stats.rx_bufshift = (u8) (GET_RX_DESC_SHIFT(rxdesc) & 0x03);
386 stats.icv = (u16) GET_RX_DESC_ICV(rxdesc);
387 stats.crc = (u16) GET_RX_DESC_CRC32(rxdesc);
388 stats.hwerror = (stats.crc | stats.icv);
389 stats.decrypted = !GET_RX_DESC_SWDEC(rxdesc);
390 stats.rate = (u8) GET_RX_DESC_RX_MCS(rxdesc);
391 stats.shortpreamble = (u16) GET_RX_DESC_SPLCP(rxdesc);
392 stats.isampdu = (bool) ((GET_RX_DESC_PAGGR(rxdesc) == 1)
393 && (GET_RX_DESC_FAGGR(rxdesc) == 1));
394 stats.timestamp_low = GET_RX_DESC_TSFL(rxdesc);
395 stats.rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(rxdesc);
396 /* TODO: is center_freq changed when doing scan? */
397 /* TODO: Shall we add protection or just skip those two step? */
398 rx_status->freq = hw->conf.channel->center_freq;
399 rx_status->band = hw->conf.channel->band;
400 if (GET_RX_DESC_CRC32(rxdesc))
401 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
402 if (!GET_RX_DESC_SWDEC(rxdesc))
403 rx_status->flag |= RX_FLAG_DECRYPTED;
404 if (GET_RX_DESC_BW(rxdesc))
405 rx_status->flag |= RX_FLAG_40MHZ;
406 if (GET_RX_DESC_RX_HT(rxdesc))
407 rx_status->flag |= RX_FLAG_HT;
408 /* Data rate */
409 rx_status->rate_idx = _rtl92c_rate_mapping(hw,
410 (bool)GET_RX_DESC_RX_HT(rxdesc),
411 (u8)GET_RX_DESC_RX_MCS(rxdesc),
412 (bool)GET_RX_DESC_PAGGR(rxdesc)
413 );
414 /* There is a phy status after this rx descriptor. */
415 if (GET_RX_DESC_PHY_STATUS(rxdesc)) {
416 p_drvinfo = (struct rx_fwinfo_92c *)(rxdesc + RTL_RX_DESC_SIZE);
417 rtl92c_translate_rx_signal_stuff(hw, skb, &stats,
418 (struct rx_desc_92c *)rxdesc, p_drvinfo);
419 }
420 skb_pull(skb, (drvinfo_len + RTL_RX_DESC_SIZE));
421 hdr = (struct ieee80211_hdr *)(skb->data);
422 fc = hdr->frame_control;
423 bv = ieee80211_is_probe_resp(fc);
424 if (bv)
425 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
426 ("Got probe response frame.\n"));
427 if (ieee80211_is_beacon(fc))
428 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
429 ("Got beacon frame.\n"));
430 if (ieee80211_is_data(fc))
431 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Got data frame.\n"));
432 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
433 ("Fram: fc = 0x%X addr1 = 0x%02X:0x%02X:0x%02X:0x%02X:0x%02X:"
434 "0x%02X\n", fc, (u32)hdr->addr1[0], (u32)hdr->addr1[1],
435 (u32)hdr->addr1[2], (u32)hdr->addr1[3], (u32)hdr->addr1[4],
436 (u32)hdr->addr1[5]));
437 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
438 ieee80211_rx_irqsafe(hw, skb);
439}
440
441void rtl8192cu_rx_hdl(struct ieee80211_hw *hw, struct sk_buff * skb)
442{
443 _rtl_rx_process(hw, skb);
444}
445
446void rtl8192c_rx_segregate_hdl(
447 struct ieee80211_hw *hw,
448 struct sk_buff *skb,
449 struct sk_buff_head *skb_list)
450{
451}
452
453/*----------------------------------------------------------------------
454 *
455 * Tx handler
456 *
457 *---------------------------------------------------------------------- */
458void rtl8192c_tx_cleanup(struct ieee80211_hw *hw, struct sk_buff *skb)
459{
460}
461
462int rtl8192c_tx_post_hdl(struct ieee80211_hw *hw, struct urb *urb,
463 struct sk_buff *skb)
464{
465 return 0;
466}
467
468struct sk_buff *rtl8192c_tx_aggregate_hdl(struct ieee80211_hw *hw,
469 struct sk_buff_head *list)
470{
471 return skb_dequeue(list);
472}
473
474/*======================================== trx ===============================*/
475
476static void _rtl_fill_usb_tx_desc(u8 *txdesc)
477{
478 SET_TX_DESC_OWN(txdesc, 1);
479 SET_TX_DESC_LAST_SEG(txdesc, 1);
480 SET_TX_DESC_FIRST_SEG(txdesc, 1);
481}
482/**
483 * For HW recovery information
484 */
485static void _rtl_tx_desc_checksum(u8 *txdesc)
486{
487 u16 *ptr = (u16 *)txdesc;
488 u16 checksum = 0;
489 u32 index;
490
491 /* Clear first */
492 SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0);
493 for (index = 0; index < 16; index++)
494 checksum = checksum ^ (*(ptr + index));
495 SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, checksum);
496}
497
498void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
499 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
500 struct ieee80211_tx_info *info, struct sk_buff *skb,
501 unsigned int queue_index)
502{
503 struct rtl_priv *rtlpriv = rtl_priv(hw);
504 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
505 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
506 bool defaultadapter = true;
507 struct ieee80211_sta *sta;
508 struct rtl_tcb_desc tcb_desc;
509 u8 *qc = ieee80211_get_qos_ctl(hdr);
510 u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
511 u16 seq_number;
512 __le16 fc = hdr->frame_control;
513 u8 rate_flag = info->control.rates[0].flags;
514 u16 pktlen = skb->len;
515 enum rtl_desc_qsel fw_qsel = _rtl8192cu_mq_to_descq(hw, fc,
516 skb_get_queue_mapping(skb));
517 u8 *txdesc;
518
519 seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
520 rtl_get_tcb_desc(hw, info, skb, &tcb_desc);
521 txdesc = (u8 *)skb_push(skb, RTL_TX_HEADER_SIZE);
522 memset(txdesc, 0, RTL_TX_HEADER_SIZE);
523 SET_TX_DESC_PKT_SIZE(txdesc, pktlen);
524 SET_TX_DESC_LINIP(txdesc, 0);
525 SET_TX_DESC_PKT_OFFSET(txdesc, RTL_DUMMY_OFFSET);
526 SET_TX_DESC_OFFSET(txdesc, RTL_TX_HEADER_SIZE);
527 SET_TX_DESC_TX_RATE(txdesc, tcb_desc.hw_rate);
528 if (tcb_desc.use_shortgi || tcb_desc.use_shortpreamble)
529 SET_TX_DESC_DATA_SHORTGI(txdesc, 1);
530 if (mac->tids[tid].agg.agg_state == RTL_AGG_ON &&
531 info->flags & IEEE80211_TX_CTL_AMPDU) {
532 SET_TX_DESC_AGG_ENABLE(txdesc, 1);
533 SET_TX_DESC_MAX_AGG_NUM(txdesc, 0x14);
534 } else {
535 SET_TX_DESC_AGG_BREAK(txdesc, 1);
536 }
537 SET_TX_DESC_SEQ(txdesc, seq_number);
538 SET_TX_DESC_RTS_ENABLE(txdesc, ((tcb_desc.rts_enable &&
539 !tcb_desc.cts_enable) ? 1 : 0));
540 SET_TX_DESC_HW_RTS_ENABLE(txdesc, ((tcb_desc.rts_enable ||
541 tcb_desc.cts_enable) ? 1 : 0));
542 SET_TX_DESC_CTS2SELF(txdesc, ((tcb_desc.cts_enable) ? 1 : 0));
543 SET_TX_DESC_RTS_STBC(txdesc, ((tcb_desc.rts_stbc) ? 1 : 0));
544 SET_TX_DESC_RTS_RATE(txdesc, tcb_desc.rts_rate);
545 SET_TX_DESC_RTS_BW(txdesc, 0);
546 SET_TX_DESC_RTS_SC(txdesc, tcb_desc.rts_sc);
547 SET_TX_DESC_RTS_SHORT(txdesc,
548 ((tcb_desc.rts_rate <= DESC92C_RATE54M) ?
549 (tcb_desc.rts_use_shortpreamble ? 1 : 0)
550 : (tcb_desc.rts_use_shortgi ? 1 : 0)));
551 if (mac->bw_40) {
552 if (tcb_desc.packet_bw) {
553 SET_TX_DESC_DATA_BW(txdesc, 1);
554 SET_TX_DESC_DATA_SC(txdesc, 3);
555 } else {
556 SET_TX_DESC_DATA_BW(txdesc, 0);
557 if (rate_flag & IEEE80211_TX_RC_DUP_DATA)
558 SET_TX_DESC_DATA_SC(txdesc,
559 mac->cur_40_prime_sc);
560 }
561 } else {
562 SET_TX_DESC_DATA_BW(txdesc, 0);
563 SET_TX_DESC_DATA_SC(txdesc, 0);
564 }
565 rcu_read_lock();
566 sta = ieee80211_find_sta(mac->vif, mac->bssid);
567 if (sta) {
568 u8 ampdu_density = sta->ht_cap.ampdu_density;
569 SET_TX_DESC_AMPDU_DENSITY(txdesc, ampdu_density);
570 }
571 rcu_read_unlock();
572 if (info->control.hw_key) {
573 struct ieee80211_key_conf *keyconf = info->control.hw_key;
574 switch (keyconf->cipher) {
575 case WLAN_CIPHER_SUITE_WEP40:
576 case WLAN_CIPHER_SUITE_WEP104:
577 case WLAN_CIPHER_SUITE_TKIP:
578 SET_TX_DESC_SEC_TYPE(txdesc, 0x1);
579 break;
580 case WLAN_CIPHER_SUITE_CCMP:
581 SET_TX_DESC_SEC_TYPE(txdesc, 0x3);
582 break;
583 default:
584 SET_TX_DESC_SEC_TYPE(txdesc, 0x0);
585 break;
586 }
587 }
588 SET_TX_DESC_PKT_ID(txdesc, 0);
589 SET_TX_DESC_QUEUE_SEL(txdesc, fw_qsel);
590 SET_TX_DESC_DATA_RATE_FB_LIMIT(txdesc, 0x1F);
591 SET_TX_DESC_RTS_RATE_FB_LIMIT(txdesc, 0xF);
592 SET_TX_DESC_DISABLE_FB(txdesc, 0);
593 SET_TX_DESC_USE_RATE(txdesc, tcb_desc.use_driver_rate ? 1 : 0);
594 if (ieee80211_is_data_qos(fc)) {
595 if (mac->rdg_en) {
596 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
597 ("Enable RDG function.\n"));
598 SET_TX_DESC_RDG_ENABLE(txdesc, 1);
599 SET_TX_DESC_HTC(txdesc, 1);
600 }
601 }
602 if (rtlpriv->dm.useramask) {
603 SET_TX_DESC_RATE_ID(txdesc, tcb_desc.ratr_index);
604 SET_TX_DESC_MACID(txdesc, tcb_desc.mac_id);
605 } else {
606 SET_TX_DESC_RATE_ID(txdesc, 0xC + tcb_desc.ratr_index);
607 SET_TX_DESC_MACID(txdesc, tcb_desc.ratr_index);
608 }
609 if ((!ieee80211_is_data_qos(fc)) && ppsc->leisure_ps &&
610 ppsc->fwctrl_lps) {
611 SET_TX_DESC_HWSEQ_EN(txdesc, 1);
612 SET_TX_DESC_PKT_ID(txdesc, 8);
613 if (!defaultadapter)
614 SET_TX_DESC_QOS(txdesc, 1);
615 }
616 if (ieee80211_has_morefrags(fc))
617 SET_TX_DESC_MORE_FRAG(txdesc, 1);
618 if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
619 is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
620 SET_TX_DESC_BMC(txdesc, 1);
621 _rtl_fill_usb_tx_desc(txdesc);
622 _rtl_tx_desc_checksum(txdesc);
623 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, (" %s ==>\n", __func__));
624}
625
626void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc,
627 u32 buffer_len, bool bIsPsPoll)
628{
629 /* Clear all status */
630 memset(pDesc, 0, RTL_TX_HEADER_SIZE);
631 SET_TX_DESC_FIRST_SEG(pDesc, 1); /* bFirstSeg; */
632 SET_TX_DESC_LAST_SEG(pDesc, 1); /* bLastSeg; */
633 SET_TX_DESC_OFFSET(pDesc, RTL_TX_HEADER_SIZE); /* Offset = 32 */
634 SET_TX_DESC_PKT_SIZE(pDesc, buffer_len); /* Buffer size + command hdr */
635 SET_TX_DESC_QUEUE_SEL(pDesc, QSLT_MGNT); /* Fixed queue of Mgnt queue */
636 /* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed to error
637 * vlaue by Hw. */
638 if (bIsPsPoll) {
639 SET_TX_DESC_NAV_USE_HDR(pDesc, 1);
640 } else {
641 SET_TX_DESC_HWSEQ_EN(pDesc, 1); /* Hw set sequence number */
642 SET_TX_DESC_PKT_ID(pDesc, 0x100); /* set bit3 to 1. */
643 }
644 SET_TX_DESC_USE_RATE(pDesc, 1); /* use data rate which is set by Sw */
645 SET_TX_DESC_OWN(pDesc, 1);
646 SET_TX_DESC_TX_RATE(pDesc, DESC92C_RATE1M);
647 _rtl_tx_desc_checksum(pDesc);
648}
649
650void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw,
651 u8 *pdesc, bool firstseg,
652 bool lastseg, struct sk_buff *skb)
653{
654 struct rtl_priv *rtlpriv = rtl_priv(hw);
655 u8 fw_queue = QSLT_BEACON;
656 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
657 __le16 fc = hdr->frame_control;
658
659 memset((void *)pdesc, 0, RTL_TX_HEADER_SIZE);
660 if (firstseg)
661 SET_TX_DESC_OFFSET(pdesc, RTL_TX_HEADER_SIZE);
662 SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M);
663 SET_TX_DESC_SEQ(pdesc, 0);
664 SET_TX_DESC_LINIP(pdesc, 0);
665 SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
666 SET_TX_DESC_FIRST_SEG(pdesc, 1);
667 SET_TX_DESC_LAST_SEG(pdesc, 1);
668 SET_TX_DESC_RATE_ID(pdesc, 7);
669 SET_TX_DESC_MACID(pdesc, 0);
670 SET_TX_DESC_OWN(pdesc, 1);
671 SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len));
672 SET_TX_DESC_FIRST_SEG(pdesc, 1);
673 SET_TX_DESC_LAST_SEG(pdesc, 1);
674 SET_TX_DESC_OFFSET(pdesc, 0x20);
675 SET_TX_DESC_USE_RATE(pdesc, 1);
676 if (!ieee80211_is_data_qos(fc)) {
677 SET_TX_DESC_HWSEQ_EN(pdesc, 1);
678 SET_TX_DESC_PKT_ID(pdesc, 8);
679 }
680 RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "H2C Tx Cmd Content\n",
681 pdesc, RTL_TX_DESC_SIZE);
682}
683
684bool rtl92cu_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb)
685{
686 return true;
687}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
new file mode 100644
index 000000000000..b396d46edbb7
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
@@ -0,0 +1,430 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92CU_TRX_H__
31#define __RTL92CU_TRX_H__
32
33#define RTL92C_USB_BULK_IN_NUM 1
34#define RTL92C_NUM_RX_URBS 8
35#define RTL92C_NUM_TX_URBS 32
36
37#define RTL92C_SIZE_MAX_RX_BUFFER 15360 /* 8192 */
38#define RX_DRV_INFO_SIZE_UNIT 8
39
40enum usb_rx_agg_mode {
41 USB_RX_AGG_DISABLE,
42 USB_RX_AGG_DMA,
43 USB_RX_AGG_USB,
44 USB_RX_AGG_DMA_USB
45};
46
47#define TX_SELE_HQ BIT(0) /* High Queue */
48#define TX_SELE_LQ BIT(1) /* Low Queue */
49#define TX_SELE_NQ BIT(2) /* Normal Queue */
50
51#define RTL_USB_TX_AGG_NUM_DESC 5
52
53#define RTL_USB_RX_AGG_PAGE_NUM 4
54#define RTL_USB_RX_AGG_PAGE_TIMEOUT 3
55
56#define RTL_USB_RX_AGG_BLOCK_NUM 5
57#define RTL_USB_RX_AGG_BLOCK_TIMEOUT 3
58
59/*======================== rx status =========================================*/
60
61struct rx_drv_info_92c {
62 /*
63 * Driver info contain PHY status and other variabel size info
64 * PHY Status content as below
65 */
66
67 /* DWORD 0 */
68 u8 gain_trsw[4];
69
70 /* DWORD 1 */
71 u8 pwdb_all;
72 u8 cfosho[4];
73
74 /* DWORD 2 */
75 u8 cfotail[4];
76
77 /* DWORD 3 */
78 s8 rxevm[2];
79 s8 rxsnr[4];
80
81 /* DWORD 4 */
82 u8 pdsnr[2];
83
84 /* DWORD 5 */
85 u8 csi_current[2];
86 u8 csi_target[2];
87
88 /* DWORD 6 */
89 u8 sigevm;
90 u8 max_ex_pwr;
91 u8 ex_intf_flag:1;
92 u8 sgi_en:1;
93 u8 rxsc:2;
94 u8 reserve:4;
95} __packed;
96
97/* Define a macro that takes a le32 word, converts it to host ordering,
98 * right shifts by a specified count, creates a mask of the specified
99 * bit count, and extracts that number of bits.
100 */
101
102#define SHIFT_AND_MASK_LE(__pdesc, __shift, __bits) \
103 ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \
104 BIT_LEN_MASK_32(__bits))
105
106/* Define a macro that clears a bit field in an le32 word and
107 * sets the specified value into that bit field. The resulting
108 * value remains in le32 ordering; however, it is properly converted
109 * to host ordering for the clear and set operations before conversion
110 * back to le32.
111 */
112
113#define SET_BITS_OFFSET_LE(__pdesc, __shift, __len, __val) \
114 (*(__le32 *)(__pdesc) = \
115 (cpu_to_le32((le32_to_cpu(*((__le32 *)(__pdesc))) & \
116 (~(BIT_OFFSET_LEN_MASK_32((__shift), __len)))) | \
117 (((u32)(__val) & BIT_LEN_MASK_32(__len)) << (__shift)))));
118
119/* macros to read various fields in RX descriptor */
120
121/* DWORD 0 */
122#define GET_RX_DESC_PKT_LEN(__rxdesc) \
123 SHIFT_AND_MASK_LE((__rxdesc), 0, 14)
124#define GET_RX_DESC_CRC32(__rxdesc) \
125 SHIFT_AND_MASK_LE(__rxdesc, 14, 1)
126#define GET_RX_DESC_ICV(__rxdesc) \
127 SHIFT_AND_MASK_LE(__rxdesc, 15, 1)
128#define GET_RX_DESC_DRVINFO_SIZE(__rxdesc) \
129 SHIFT_AND_MASK_LE(__rxdesc, 16, 4)
130#define GET_RX_DESC_SECURITY(__rxdesc) \
131 SHIFT_AND_MASK_LE(__rxdesc, 20, 3)
132#define GET_RX_DESC_QOS(__rxdesc) \
133 SHIFT_AND_MASK_LE(__rxdesc, 23, 1)
134#define GET_RX_DESC_SHIFT(__rxdesc) \
135 SHIFT_AND_MASK_LE(__rxdesc, 24, 2)
136#define GET_RX_DESC_PHY_STATUS(__rxdesc) \
137 SHIFT_AND_MASK_LE(__rxdesc, 26, 1)
138#define GET_RX_DESC_SWDEC(__rxdesc) \
139 SHIFT_AND_MASK_LE(__rxdesc, 27, 1)
140#define GET_RX_DESC_LAST_SEG(__rxdesc) \
141 SHIFT_AND_MASK_LE(__rxdesc, 28, 1)
142#define GET_RX_DESC_FIRST_SEG(__rxdesc) \
143 SHIFT_AND_MASK_LE(__rxdesc, 29, 1)
144#define GET_RX_DESC_EOR(__rxdesc) \
145 SHIFT_AND_MASK_LE(__rxdesc, 30, 1)
146#define GET_RX_DESC_OWN(__rxdesc) \
147 SHIFT_AND_MASK_LE(__rxdesc, 31, 1)
148
149/* DWORD 1 */
150#define GET_RX_DESC_MACID(__rxdesc) \
151 SHIFT_AND_MASK_LE(__rxdesc+4, 0, 5)
152#define GET_RX_DESC_TID(__rxdesc) \
153 SHIFT_AND_MASK_LE(__rxdesc+4, 5, 4)
154#define GET_RX_DESC_PAGGR(__rxdesc) \
155 SHIFT_AND_MASK_LE(__rxdesc+4, 14, 1)
156#define GET_RX_DESC_FAGGR(__rxdesc) \
157 SHIFT_AND_MASK_LE(__rxdesc+4, 15, 1)
158#define GET_RX_DESC_A1_FIT(__rxdesc) \
159 SHIFT_AND_MASK_LE(__rxdesc+4, 16, 4)
160#define GET_RX_DESC_A2_FIT(__rxdesc) \
161 SHIFT_AND_MASK_LE(__rxdesc+4, 20, 4)
162#define GET_RX_DESC_PAM(__rxdesc) \
163 SHIFT_AND_MASK_LE(__rxdesc+4, 24, 1)
164#define GET_RX_DESC_PWR(__rxdesc) \
165 SHIFT_AND_MASK_LE(__rxdesc+4, 25, 1)
166#define GET_RX_DESC_MORE_DATA(__rxdesc) \
167 SHIFT_AND_MASK_LE(__rxdesc+4, 26, 1)
168#define GET_RX_DESC_MORE_FRAG(__rxdesc) \
169 SHIFT_AND_MASK_LE(__rxdesc+4, 27, 1)
170#define GET_RX_DESC_TYPE(__rxdesc) \
171 SHIFT_AND_MASK_LE(__rxdesc+4, 28, 2)
172#define GET_RX_DESC_MC(__rxdesc) \
173 SHIFT_AND_MASK_LE(__rxdesc+4, 30, 1)
174#define GET_RX_DESC_BC(__rxdesc) \
175 SHIFT_AND_MASK_LE(__rxdesc+4, 31, 1)
176
177/* DWORD 2 */
178#define GET_RX_DESC_SEQ(__rxdesc) \
179 SHIFT_AND_MASK_LE(__rxdesc+8, 0, 12)
180#define GET_RX_DESC_FRAG(__rxdesc) \
181 SHIFT_AND_MASK_LE(__rxdesc+8, 12, 4)
182#define GET_RX_DESC_USB_AGG_PKTNUM(__rxdesc) \
183 SHIFT_AND_MASK_LE(__rxdesc+8, 16, 8)
184#define GET_RX_DESC_NEXT_IND(__rxdesc) \
185 SHIFT_AND_MASK_LE(__rxdesc+8, 30, 1)
186
187/* DWORD 3 */
188#define GET_RX_DESC_RX_MCS(__rxdesc) \
189 SHIFT_AND_MASK_LE(__rxdesc+12, 0, 6)
190#define GET_RX_DESC_RX_HT(__rxdesc) \
191 SHIFT_AND_MASK_LE(__rxdesc+12, 6, 1)
192#define GET_RX_DESC_AMSDU(__rxdesc) \
193 SHIFT_AND_MASK_LE(__rxdesc+12, 7, 1)
194#define GET_RX_DESC_SPLCP(__rxdesc) \
195 SHIFT_AND_MASK_LE(__rxdesc+12, 8, 1)
196#define GET_RX_DESC_BW(__rxdesc) \
197 SHIFT_AND_MASK_LE(__rxdesc+12, 9, 1)
198#define GET_RX_DESC_HTC(__rxdesc) \
199 SHIFT_AND_MASK_LE(__rxdesc+12, 10, 1)
200#define GET_RX_DESC_TCP_CHK_RPT(__rxdesc) \
201 SHIFT_AND_MASK_LE(__rxdesc+12, 11, 1)
202#define GET_RX_DESC_IP_CHK_RPT(__rxdesc) \
203 SHIFT_AND_MASK_LE(__rxdesc+12, 12, 1)
204#define GET_RX_DESC_TCP_CHK_VALID(__rxdesc) \
205 SHIFT_AND_MASK_LE(__rxdesc+12, 13, 1)
206#define GET_RX_DESC_HWPC_ERR(__rxdesc) \
207 SHIFT_AND_MASK_LE(__rxdesc+12, 14, 1)
208#define GET_RX_DESC_HWPC_IND(__rxdesc) \
209 SHIFT_AND_MASK_LE(__rxdesc+12, 15, 1)
210#define GET_RX_DESC_IV0(__rxdesc) \
211 SHIFT_AND_MASK_LE(__rxdesc+12, 16, 16)
212
213/* DWORD 4 */
214#define GET_RX_DESC_IV1(__rxdesc) \
215 SHIFT_AND_MASK_LE(__rxdesc+16, 0, 32)
216
217/* DWORD 5 */
218#define GET_RX_DESC_TSFL(__rxdesc) \
219 SHIFT_AND_MASK_LE(__rxdesc+20, 0, 32)
220
221/*======================= tx desc ============================================*/
222
223/* macros to set various fields in TX descriptor */
224
225/* Dword 0 */
226#define SET_TX_DESC_PKT_SIZE(__txdesc, __value) \
227 SET_BITS_OFFSET_LE(__txdesc, 0, 16, __value)
228#define SET_TX_DESC_OFFSET(__txdesc, __value) \
229 SET_BITS_OFFSET_LE(__txdesc, 16, 8, __value)
230#define SET_TX_DESC_BMC(__txdesc, __value) \
231 SET_BITS_OFFSET_LE(__txdesc, 24, 1, __value)
232#define SET_TX_DESC_HTC(__txdesc, __value) \
233 SET_BITS_OFFSET_LE(__txdesc, 25, 1, __value)
234#define SET_TX_DESC_LAST_SEG(__txdesc, __value) \
235 SET_BITS_OFFSET_LE(__txdesc, 26, 1, __value)
236#define SET_TX_DESC_FIRST_SEG(__txdesc, __value) \
237 SET_BITS_OFFSET_LE(__txdesc, 27, 1, __value)
238#define SET_TX_DESC_LINIP(__txdesc, __value) \
239 SET_BITS_OFFSET_LE(__txdesc, 28, 1, __value)
240#define SET_TX_DESC_NO_ACM(__txdesc, __value) \
241 SET_BITS_OFFSET_LE(__txdesc, 29, 1, __value)
242#define SET_TX_DESC_GF(__txdesc, __value) \
243 SET_BITS_OFFSET_LE(__txdesc, 30, 1, __value)
244#define SET_TX_DESC_OWN(__txdesc, __value) \
245 SET_BITS_OFFSET_LE(__txdesc, 31, 1, __value)
246
247
248/* Dword 1 */
249#define SET_TX_DESC_MACID(__txdesc, __value) \
250 SET_BITS_OFFSET_LE(__txdesc+4, 0, 5, __value)
251#define SET_TX_DESC_AGG_ENABLE(__txdesc, __value) \
252 SET_BITS_OFFSET_LE(__txdesc+4, 5, 1, __value)
253#define SET_TX_DESC_AGG_BREAK(__txdesc, __value) \
254 SET_BITS_OFFSET_LE(__txdesc+4, 6, 1, __value)
255#define SET_TX_DESC_RDG_ENABLE(__txdesc, __value) \
256 SET_BITS_OFFSET_LE(__txdesc+4, 7, 1, __value)
257#define SET_TX_DESC_QUEUE_SEL(__txdesc, __value) \
258 SET_BITS_OFFSET_LE(__txdesc+4, 8, 5, __value)
259#define SET_TX_DESC_RDG_NAV_EXT(__txdesc, __value) \
260 SET_BITS_OFFSET_LE(__txdesc+4, 13, 1, __value)
261#define SET_TX_DESC_LSIG_TXOP_EN(__txdesc, __value) \
262 SET_BITS_OFFSET_LE(__txdesc+4, 14, 1, __value)
263#define SET_TX_DESC_PIFS(__txdesc, __value) \
264 SET_BITS_OFFSET_LE(__txdesc+4, 15, 1, __value)
265#define SET_TX_DESC_RATE_ID(__txdesc, __value) \
266 SET_BITS_OFFSET_LE(__txdesc+4, 16, 4, __value)
267#define SET_TX_DESC_RA_BRSR_ID(__txdesc, __value) \
268 SET_BITS_OFFSET_LE(__txdesc+4, 16, 4, __value)
269#define SET_TX_DESC_NAV_USE_HDR(__txdesc, __value) \
270 SET_BITS_OFFSET_LE(__txdesc+4, 20, 1, __value)
271#define SET_TX_DESC_EN_DESC_ID(__txdesc, __value) \
272 SET_BITS_OFFSET_LE(__txdesc+4, 21, 1, __value)
273#define SET_TX_DESC_SEC_TYPE(__txdesc, __value) \
274 SET_BITS_OFFSET_LE(__txdesc+4, 22, 2, __value)
275#define SET_TX_DESC_PKT_OFFSET(__txdesc, __value) \
276 SET_BITS_OFFSET_LE(__txdesc+4, 26, 5, __value)
277
278/* Dword 2 */
279#define SET_TX_DESC_RTS_RC(__txdesc, __value) \
280 SET_BITS_OFFSET_LE(__txdesc+8, 0, 6, __value)
281#define SET_TX_DESC_DATA_RC(__txdesc, __value) \
282 SET_BITS_OFFSET_LE(__txdesc+8, 6, 6, __value)
283#define SET_TX_DESC_BAR_RTY_TH(__txdesc, __value) \
284 SET_BITS_OFFSET_LE(__txdesc+8, 14, 2, __value)
285#define SET_TX_DESC_MORE_FRAG(__txdesc, __value) \
286 SET_BITS_OFFSET_LE(__txdesc+8, 17, 1, __value)
287#define SET_TX_DESC_RAW(__txdesc, __value) \
288 SET_BITS_OFFSET_LE(__txdesc+8, 18, 1, __value)
289#define SET_TX_DESC_CCX(__txdesc, __value) \
290 SET_BITS_OFFSET_LE(__txdesc+8, 19, 1, __value)
291#define SET_TX_DESC_AMPDU_DENSITY(__txdesc, __value) \
292 SET_BITS_OFFSET_LE(__txdesc+8, 20, 3, __value)
293#define SET_TX_DESC_ANTSEL_A(__txdesc, __value) \
294 SET_BITS_OFFSET_LE(__txdesc+8, 24, 1, __value)
295#define SET_TX_DESC_ANTSEL_B(__txdesc, __value) \
296 SET_BITS_OFFSET_LE(__txdesc+8, 25, 1, __value)
297#define SET_TX_DESC_TX_ANT_CCK(__txdesc, __value) \
298 SET_BITS_OFFSET_LE(__txdesc+8, 26, 2, __value)
299#define SET_TX_DESC_TX_ANTL(__txdesc, __value) \
300 SET_BITS_OFFSET_LE(__txdesc+8, 28, 2, __value)
301#define SET_TX_DESC_TX_ANT_HT(__txdesc, __value) \
302 SET_BITS_OFFSET_LE(__txdesc+8, 30, 2, __value)
303
304/* Dword 3 */
305#define SET_TX_DESC_NEXT_HEAP_PAGE(__txdesc, __value) \
306 SET_BITS_OFFSET_LE(__txdesc+12, 0, 8, __value)
307#define SET_TX_DESC_TAIL_PAGE(__txdesc, __value) \
308 SET_BITS_OFFSET_LE(__txdesc+12, 8, 8, __value)
309#define SET_TX_DESC_SEQ(__txdesc, __value) \
310 SET_BITS_OFFSET_LE(__txdesc+12, 16, 12, __value)
311#define SET_TX_DESC_PKT_ID(__txdesc, __value) \
312 SET_BITS_OFFSET_LE(__txdesc+12, 28, 4, __value)
313
314/* Dword 4 */
315#define SET_TX_DESC_RTS_RATE(__txdesc, __value) \
316 SET_BITS_OFFSET_LE(__txdesc+16, 0, 5, __value)
317#define SET_TX_DESC_AP_DCFE(__txdesc, __value) \
318 SET_BITS_OFFSET_LE(__txdesc+16, 5, 1, __value)
319#define SET_TX_DESC_QOS(__txdesc, __value) \
320 SET_BITS_OFFSET_LE(__txdesc+16, 6, 1, __value)
321#define SET_TX_DESC_HWSEQ_EN(__txdesc, __value) \
322 SET_BITS_OFFSET_LE(__txdesc+16, 7, 1, __value)
323#define SET_TX_DESC_USE_RATE(__txdesc, __value) \
324 SET_BITS_OFFSET_LE(__txdesc+16, 8, 1, __value)
325#define SET_TX_DESC_DISABLE_RTS_FB(__txdesc, __value) \
326 SET_BITS_OFFSET_LE(__txdesc+16, 9, 1, __value)
327#define SET_TX_DESC_DISABLE_FB(__txdesc, __value) \
328 SET_BITS_OFFSET_LE(__txdesc+16, 10, 1, __value)
329#define SET_TX_DESC_CTS2SELF(__txdesc, __value) \
330 SET_BITS_OFFSET_LE(__txdesc+16, 11, 1, __value)
331#define SET_TX_DESC_RTS_ENABLE(__txdesc, __value) \
332 SET_BITS_OFFSET_LE(__txdesc+16, 12, 1, __value)
333#define SET_TX_DESC_HW_RTS_ENABLE(__txdesc, __value) \
334 SET_BITS_OFFSET_LE(__txdesc+16, 13, 1, __value)
335#define SET_TX_DESC_WAIT_DCTS(__txdesc, __value) \
336 SET_BITS_OFFSET_LE(__txdesc+16, 18, 1, __value)
337#define SET_TX_DESC_CTS2AP_EN(__txdesc, __value) \
338 SET_BITS_OFFSET_LE(__txdesc+16, 19, 1, __value)
339#define SET_TX_DESC_DATA_SC(__txdesc, __value) \
340 SET_BITS_OFFSET_LE(__txdesc+16, 20, 2, __value)
341#define SET_TX_DESC_DATA_STBC(__txdesc, __value) \
342 SET_BITS_OFFSET_LE(__txdesc+16, 22, 2, __value)
343#define SET_TX_DESC_DATA_SHORT(__txdesc, __value) \
344 SET_BITS_OFFSET_LE(__txdesc+16, 24, 1, __value)
345#define SET_TX_DESC_DATA_BW(__txdesc, __value) \
346 SET_BITS_OFFSET_LE(__txdesc+16, 25, 1, __value)
347#define SET_TX_DESC_RTS_SHORT(__txdesc, __value) \
348 SET_BITS_OFFSET_LE(__txdesc+16, 26, 1, __value)
349#define SET_TX_DESC_RTS_BW(__txdesc, __value) \
350 SET_BITS_OFFSET_LE(__txdesc+16, 27, 1, __value)
351#define SET_TX_DESC_RTS_SC(__txdesc, __value) \
352 SET_BITS_OFFSET_LE(__txdesc+16, 28, 2, __value)
353#define SET_TX_DESC_RTS_STBC(__txdesc, __value) \
354 SET_BITS_OFFSET_LE(__txdesc+16, 30, 2, __value)
355
356/* Dword 5 */
357#define SET_TX_DESC_TX_RATE(__pdesc, __val) \
358 SET_BITS_OFFSET_LE(__pdesc+20, 0, 6, __val)
359#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \
360 SET_BITS_OFFSET_LE(__pdesc+20, 6, 1, __val)
361#define SET_TX_DESC_CCX_TAG(__pdesc, __val) \
362 SET_BITS_OFFSET_LE(__pdesc+20, 7, 1, __val)
363#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__txdesc, __value) \
364 SET_BITS_OFFSET_LE(__txdesc+20, 8, 5, __value)
365#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__txdesc, __value) \
366 SET_BITS_OFFSET_LE(__txdesc+20, 13, 4, __value)
367#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__txdesc, __value) \
368 SET_BITS_OFFSET_LE(__txdesc+20, 17, 1, __value)
369#define SET_TX_DESC_DATA_RETRY_LIMIT(__txdesc, __value) \
370 SET_BITS_OFFSET_LE(__txdesc+20, 18, 6, __value)
371#define SET_TX_DESC_USB_TXAGG_NUM(__txdesc, __value) \
372 SET_BITS_OFFSET_LE(__txdesc+20, 24, 8, __value)
373
374/* Dword 6 */
375#define SET_TX_DESC_TXAGC_A(__txdesc, __value) \
376 SET_BITS_OFFSET_LE(__txdesc+24, 0, 5, __value)
377#define SET_TX_DESC_TXAGC_B(__txdesc, __value) \
378 SET_BITS_OFFSET_LE(__txdesc+24, 5, 5, __value)
379#define SET_TX_DESC_USB_MAX_LEN(__txdesc, __value) \
380 SET_BITS_OFFSET_LE(__txdesc+24, 10, 1, __value)
381#define SET_TX_DESC_MAX_AGG_NUM(__txdesc, __value) \
382 SET_BITS_OFFSET_LE(__txdesc+24, 11, 5, __value)
383#define SET_TX_DESC_MCSG1_MAX_LEN(__txdesc, __value) \
384 SET_BITS_OFFSET_LE(__txdesc+24, 16, 4, __value)
385#define SET_TX_DESC_MCSG2_MAX_LEN(__txdesc, __value) \
386 SET_BITS_OFFSET_LE(__txdesc+24, 20, 4, __value)
387#define SET_TX_DESC_MCSG3_MAX_LEN(__txdesc, __value) \
388 SET_BITS_OFFSET_LE(__txdesc+24, 24, 4, __value)
389#define SET_TX_DESC_MCSG7_MAX_LEN(__txdesc, __value) \
390 SET_BITS_OFFSET_LE(__txdesc+24, 28, 4, __value)
391
392/* Dword 7 */
393#define SET_TX_DESC_TX_DESC_CHECKSUM(__txdesc, __value) \
394 SET_BITS_OFFSET_LE(__txdesc+28, 0, 16, __value)
395#define SET_TX_DESC_MCSG4_MAX_LEN(__txdesc, __value) \
396 SET_BITS_OFFSET_LE(__txdesc+28, 16, 4, __value)
397#define SET_TX_DESC_MCSG5_MAX_LEN(__txdesc, __value) \
398 SET_BITS_OFFSET_LE(__txdesc+28, 20, 4, __value)
399#define SET_TX_DESC_MCSG6_MAX_LEN(__txdesc, __value) \
400 SET_BITS_OFFSET_LE(__txdesc+28, 24, 4, __value)
401#define SET_TX_DESC_MCSG15_MAX_LEN(__txdesc, __value) \
402 SET_BITS_OFFSET_LE(__txdesc+28, 28, 4, __value)
403
404
405int rtl8192cu_endpoint_mapping(struct ieee80211_hw *hw);
406u16 rtl8192cu_mq_to_hwq(__le16 fc, u16 mac80211_queue_index);
407bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
408 struct rtl_stats *stats,
409 struct ieee80211_rx_status *rx_status,
410 u8 *p_desc, struct sk_buff *skb);
411void rtl8192cu_rx_hdl(struct ieee80211_hw *hw, struct sk_buff * skb);
412void rtl8192c_rx_segregate_hdl(struct ieee80211_hw *, struct sk_buff *,
413 struct sk_buff_head *);
414void rtl8192c_tx_cleanup(struct ieee80211_hw *hw, struct sk_buff *skb);
415int rtl8192c_tx_post_hdl(struct ieee80211_hw *hw, struct urb *urb,
416 struct sk_buff *skb);
417struct sk_buff *rtl8192c_tx_aggregate_hdl(struct ieee80211_hw *,
418 struct sk_buff_head *);
419void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
420 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
421 struct ieee80211_tx_info *info, struct sk_buff *skb,
422 unsigned int queue_index);
423void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc,
424 u32 buffer_len, bool bIsPsPoll);
425void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw,
426 u8 *pdesc, bool b_firstseg,
427 bool b_lastseg, struct sk_buff *skb);
428bool rtl92cu_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb);
429
430#endif
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
new file mode 100644
index 000000000000..a4b2613d6a8c
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -0,0 +1,1035 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2011 Realtek Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 *****************************************************************************/
27#include <linux/usb.h>
28#include "core.h"
29#include "wifi.h"
30#include "usb.h"
31#include "base.h"
32#include "ps.h"
33
34#define REALTEK_USB_VENQT_READ 0xC0
35#define REALTEK_USB_VENQT_WRITE 0x40
36#define REALTEK_USB_VENQT_CMD_REQ 0x05
37#define REALTEK_USB_VENQT_CMD_IDX 0x00
38
39#define REALTEK_USB_VENQT_MAX_BUF_SIZE 254
40
41static void usbctrl_async_callback(struct urb *urb)
42{
43 if (urb)
44 kfree(urb->context);
45}
46
47static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request,
48 u16 value, u16 index, void *pdata,
49 u16 len)
50{
51 int rc;
52 unsigned int pipe;
53 u8 reqtype;
54 struct usb_ctrlrequest *dr;
55 struct urb *urb;
56 struct rtl819x_async_write_data {
57 u8 data[REALTEK_USB_VENQT_MAX_BUF_SIZE];
58 struct usb_ctrlrequest dr;
59 } *buf;
60
61 pipe = usb_sndctrlpipe(udev, 0); /* write_out */
62 reqtype = REALTEK_USB_VENQT_WRITE;
63
64 buf = kmalloc(sizeof(*buf), GFP_ATOMIC);
65 if (!buf)
66 return -ENOMEM;
67
68 urb = usb_alloc_urb(0, GFP_ATOMIC);
69 if (!urb) {
70 kfree(buf);
71 return -ENOMEM;
72 }
73
74 dr = &buf->dr;
75
76 dr->bRequestType = reqtype;
77 dr->bRequest = request;
78 dr->wValue = cpu_to_le16(value);
79 dr->wIndex = cpu_to_le16(index);
80 dr->wLength = cpu_to_le16(len);
81 memcpy(buf, pdata, len);
82 usb_fill_control_urb(urb, udev, pipe,
83 (unsigned char *)dr, buf, len,
84 usbctrl_async_callback, buf);
85 rc = usb_submit_urb(urb, GFP_ATOMIC);
86 if (rc < 0)
87 kfree(buf);
88 usb_free_urb(urb);
89 return rc;
90}
91
92static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
93 u16 value, u16 index, void *pdata,
94 u16 len)
95{
96 unsigned int pipe;
97 int status;
98 u8 reqtype;
99
100 pipe = usb_rcvctrlpipe(udev, 0); /* read_in */
101 reqtype = REALTEK_USB_VENQT_READ;
102
103 status = usb_control_msg(udev, pipe, request, reqtype, value, index,
104 pdata, len, 0); /* max. timeout */
105
106 if (status < 0)
107 printk(KERN_ERR "reg 0x%x, usbctrl_vendorreq TimeOut! "
108 "status:0x%x value=0x%x\n", value, status,
109 *(u32 *)pdata);
110 return status;
111}
112
113static u32 _usb_read_sync(struct usb_device *udev, u32 addr, u16 len)
114{
115 u8 request;
116 u16 wvalue;
117 u16 index;
118 u32 *data;
119 u32 ret;
120
121 data = kmalloc(sizeof(u32), GFP_KERNEL);
122 if (!data)
123 return -ENOMEM;
124 request = REALTEK_USB_VENQT_CMD_REQ;
125 index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
126
127 wvalue = (u16)addr;
128 _usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len);
129 ret = *data;
130 kfree(data);
131 return ret;
132}
133
134static u8 _usb_read8_sync(struct rtl_priv *rtlpriv, u32 addr)
135{
136 struct device *dev = rtlpriv->io.dev;
137
138 return (u8)_usb_read_sync(to_usb_device(dev), addr, 1);
139}
140
141static u16 _usb_read16_sync(struct rtl_priv *rtlpriv, u32 addr)
142{
143 struct device *dev = rtlpriv->io.dev;
144
145 return (u16)_usb_read_sync(to_usb_device(dev), addr, 2);
146}
147
148static u32 _usb_read32_sync(struct rtl_priv *rtlpriv, u32 addr)
149{
150 struct device *dev = rtlpriv->io.dev;
151
152 return _usb_read_sync(to_usb_device(dev), addr, 4);
153}
154
155static void _usb_write_async(struct usb_device *udev, u32 addr, u32 val,
156 u16 len)
157{
158 u8 request;
159 u16 wvalue;
160 u16 index;
161 u32 data;
162
163 request = REALTEK_USB_VENQT_CMD_REQ;
164 index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
165 wvalue = (u16)(addr&0x0000ffff);
166 data = val;
167 _usbctrl_vendorreq_async_write(udev, request, wvalue, index, &data,
168 len);
169}
170
171static void _usb_write8_async(struct rtl_priv *rtlpriv, u32 addr, u8 val)
172{
173 struct device *dev = rtlpriv->io.dev;
174
175 _usb_write_async(to_usb_device(dev), addr, val, 1);
176}
177
178static void _usb_write16_async(struct rtl_priv *rtlpriv, u32 addr, u16 val)
179{
180 struct device *dev = rtlpriv->io.dev;
181
182 _usb_write_async(to_usb_device(dev), addr, val, 2);
183}
184
185static void _usb_write32_async(struct rtl_priv *rtlpriv, u32 addr, u32 val)
186{
187 struct device *dev = rtlpriv->io.dev;
188
189 _usb_write_async(to_usb_device(dev), addr, val, 4);
190}
191
192static int _usb_nbytes_read_write(struct usb_device *udev, bool read, u32 addr,
193 u16 len, u8 *pdata)
194{
195 int status;
196 u8 request;
197 u16 wvalue;
198 u16 index;
199
200 request = REALTEK_USB_VENQT_CMD_REQ;
201 index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
202 wvalue = (u16)addr;
203 if (read)
204 status = _usbctrl_vendorreq_sync_read(udev, request, wvalue,
205 index, pdata, len);
206 else
207 status = _usbctrl_vendorreq_async_write(udev, request, wvalue,
208 index, pdata, len);
209 return status;
210}
211
212static int _usb_readN_sync(struct rtl_priv *rtlpriv, u32 addr, u16 len,
213 u8 *pdata)
214{
215 struct device *dev = rtlpriv->io.dev;
216
217 return _usb_nbytes_read_write(to_usb_device(dev), true, addr, len,
218 pdata);
219}
220
221static int _usb_writeN_async(struct rtl_priv *rtlpriv, u32 addr, u16 len,
222 u8 *pdata)
223{
224 struct device *dev = rtlpriv->io.dev;
225
226 return _usb_nbytes_read_write(to_usb_device(dev), false, addr, len,
227 pdata);
228}
229
230static void _rtl_usb_io_handler_init(struct device *dev,
231 struct ieee80211_hw *hw)
232{
233 struct rtl_priv *rtlpriv = rtl_priv(hw);
234
235 rtlpriv->io.dev = dev;
236 mutex_init(&rtlpriv->io.bb_mutex);
237 rtlpriv->io.write8_async = _usb_write8_async;
238 rtlpriv->io.write16_async = _usb_write16_async;
239 rtlpriv->io.write32_async = _usb_write32_async;
240 rtlpriv->io.writeN_async = _usb_writeN_async;
241 rtlpriv->io.read8_sync = _usb_read8_sync;
242 rtlpriv->io.read16_sync = _usb_read16_sync;
243 rtlpriv->io.read32_sync = _usb_read32_sync;
244 rtlpriv->io.readN_sync = _usb_readN_sync;
245}
246
247static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw)
248{
249 struct rtl_priv *rtlpriv = rtl_priv(hw);
250
251 mutex_destroy(&rtlpriv->io.bb_mutex);
252}
253
254/**
255 *
256 * Default aggregation handler. Do nothing and just return the oldest skb.
257 */
258static struct sk_buff *_none_usb_tx_aggregate_hdl(struct ieee80211_hw *hw,
259 struct sk_buff_head *list)
260{
261 return skb_dequeue(list);
262}
263
264#define IS_HIGH_SPEED_USB(udev) \
265 ((USB_SPEED_HIGH == (udev)->speed) ? true : false)
266
267static int _rtl_usb_init_tx(struct ieee80211_hw *hw)
268{
269 u32 i;
270 struct rtl_priv *rtlpriv = rtl_priv(hw);
271 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
272
273 rtlusb->max_bulk_out_size = IS_HIGH_SPEED_USB(rtlusb->udev)
274 ? USB_HIGH_SPEED_BULK_SIZE
275 : USB_FULL_SPEED_BULK_SIZE;
276
277 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("USB Max Bulk-out Size=%d\n",
278 rtlusb->max_bulk_out_size));
279
280 for (i = 0; i < __RTL_TXQ_NUM; i++) {
281 u32 ep_num = rtlusb->ep_map.ep_mapping[i];
282 if (!ep_num) {
283 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
284 ("Invalid endpoint map setting!\n"));
285 return -EINVAL;
286 }
287 }
288
289 rtlusb->usb_tx_post_hdl =
290 rtlpriv->cfg->usb_interface_cfg->usb_tx_post_hdl;
291 rtlusb->usb_tx_cleanup =
292 rtlpriv->cfg->usb_interface_cfg->usb_tx_cleanup;
293 rtlusb->usb_tx_aggregate_hdl =
294 (rtlpriv->cfg->usb_interface_cfg->usb_tx_aggregate_hdl)
295 ? rtlpriv->cfg->usb_interface_cfg->usb_tx_aggregate_hdl
296 : &_none_usb_tx_aggregate_hdl;
297
298 init_usb_anchor(&rtlusb->tx_submitted);
299 for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) {
300 skb_queue_head_init(&rtlusb->tx_skb_queue[i]);
301 init_usb_anchor(&rtlusb->tx_pending[i]);
302 }
303 return 0;
304}
305
306static int _rtl_usb_init_rx(struct ieee80211_hw *hw)
307{
308 struct rtl_priv *rtlpriv = rtl_priv(hw);
309 struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
310 struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
311
312 rtlusb->rx_max_size = rtlpriv->cfg->usb_interface_cfg->rx_max_size;
313 rtlusb->rx_urb_num = rtlpriv->cfg->usb_interface_cfg->rx_urb_num;
314 rtlusb->in_ep = rtlpriv->cfg->usb_interface_cfg->in_ep_num;
315 rtlusb->usb_rx_hdl = rtlpriv->cfg->usb_interface_cfg->usb_rx_hdl;
316 rtlusb->usb_rx_segregate_hdl =
317 rtlpriv->cfg->usb_interface_cfg->usb_rx_segregate_hdl;
318
319 printk(KERN_INFO "rtl8192cu: rx_max_size %d, rx_urb_num %d, in_ep %d\n",
320 rtlusb->rx_max_size, rtlusb->rx_urb_num, rtlusb->in_ep);
321 init_usb_anchor(&rtlusb->rx_submitted);
322 return 0;
323}
324
325static int _rtl_usb_init(struct ieee80211_hw *hw)
326{
327 struct rtl_priv *rtlpriv = rtl_priv(hw);
328 struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
329 struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
330 int err;
331 u8 epidx;
332 struct usb_interface *usb_intf = rtlusb->intf;
333 u8 epnums = usb_intf->cur_altsetting->desc.bNumEndpoints;
334
335 rtlusb->out_ep_nums = rtlusb->in_ep_nums = 0;
336 for (epidx = 0; epidx < epnums; epidx++) {
337 struct usb_endpoint_descriptor *pep_desc;
338 pep_desc = &usb_intf->cur_altsetting->endpoint[epidx].desc;
339
340 if (usb_endpoint_dir_in(pep_desc))
341 rtlusb->in_ep_nums++;
342 else if (usb_endpoint_dir_out(pep_desc))
343 rtlusb->out_ep_nums++;
344
345 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
346 ("USB EP(0x%02x), MaxPacketSize=%d ,Interval=%d.\n",
347 pep_desc->bEndpointAddress, pep_desc->wMaxPacketSize,
348 pep_desc->bInterval));
349 }
350 if (rtlusb->in_ep_nums < rtlpriv->cfg->usb_interface_cfg->in_ep_num)
351 return -EINVAL ;
352
353 /* usb endpoint mapping */
354 err = rtlpriv->cfg->usb_interface_cfg->usb_endpoint_mapping(hw);
355 rtlusb->usb_mq_to_hwq = rtlpriv->cfg->usb_interface_cfg->usb_mq_to_hwq;
356 _rtl_usb_init_tx(hw);
357 _rtl_usb_init_rx(hw);
358 return err;
359}
360
361static int _rtl_usb_init_sw(struct ieee80211_hw *hw)
362{
363 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
364 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
365 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
366 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
367
368 rtlhal->hw = hw;
369 ppsc->inactiveps = false;
370 ppsc->leisure_ps = false;
371 ppsc->fwctrl_lps = false;
372 ppsc->reg_fwctrl_lps = 3;
373 ppsc->reg_max_lps_awakeintvl = 5;
374 ppsc->fwctrl_psmode = FW_PS_DTIM_MODE;
375
376 /* IBSS */
377 mac->beacon_interval = 100;
378
379 /* AMPDU */
380 mac->min_space_cfg = 0;
381 mac->max_mss_density = 0;
382
383 /* set sane AMPDU defaults */
384 mac->current_ampdu_density = 7;
385 mac->current_ampdu_factor = 3;
386
387 /* QOS */
388 rtlusb->acm_method = eAcmWay2_SW;
389
390 /* IRQ */
391 /* HIMR - turn all on */
392 rtlusb->irq_mask[0] = 0xFFFFFFFF;
393 /* HIMR_EX - turn all on */
394 rtlusb->irq_mask[1] = 0xFFFFFFFF;
395 rtlusb->disableHWSM = true;
396 return 0;
397}
398
399#define __RADIO_TAP_SIZE_RSV 32
400
401static void _rtl_rx_completed(struct urb *urb);
402
403static struct sk_buff *_rtl_prep_rx_urb(struct ieee80211_hw *hw,
404 struct rtl_usb *rtlusb,
405 struct urb *urb,
406 gfp_t gfp_mask)
407{
408 struct sk_buff *skb;
409 struct rtl_priv *rtlpriv = rtl_priv(hw);
410
411 skb = __dev_alloc_skb((rtlusb->rx_max_size + __RADIO_TAP_SIZE_RSV),
412 gfp_mask);
413 if (!skb) {
414 RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
415 ("Failed to __dev_alloc_skb!!\n"))
416 return ERR_PTR(-ENOMEM);
417 }
418
419 /* reserve some space for mac80211's radiotap */
420 skb_reserve(skb, __RADIO_TAP_SIZE_RSV);
421 usb_fill_bulk_urb(urb, rtlusb->udev,
422 usb_rcvbulkpipe(rtlusb->udev, rtlusb->in_ep),
423 skb->data, min(skb_tailroom(skb),
424 (int)rtlusb->rx_max_size),
425 _rtl_rx_completed, skb);
426
427 _rtl_install_trx_info(rtlusb, skb, rtlusb->in_ep);
428 return skb;
429}
430
431#undef __RADIO_TAP_SIZE_RSV
432
433static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw,
434 struct sk_buff *skb)
435{
436 struct rtl_priv *rtlpriv = rtl_priv(hw);
437 u8 *rxdesc = skb->data;
438 struct ieee80211_hdr *hdr;
439 bool unicast = false;
440 __le16 fc;
441 struct ieee80211_rx_status rx_status = {0};
442 struct rtl_stats stats = {
443 .signal = 0,
444 .noise = -98,
445 .rate = 0,
446 };
447
448 skb_pull(skb, RTL_RX_DESC_SIZE);
449 rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb);
450 skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift));
451 hdr = (struct ieee80211_hdr *)(skb->data);
452 fc = hdr->frame_control;
453 if (!stats.crc) {
454 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
455
456 if (is_broadcast_ether_addr(hdr->addr1)) {
457 /*TODO*/;
458 } else if (is_multicast_ether_addr(hdr->addr1)) {
459 /*TODO*/
460 } else {
461 unicast = true;
462 rtlpriv->stats.rxbytesunicast += skb->len;
463 }
464
465 rtl_is_special_data(hw, skb, false);
466
467 if (ieee80211_is_data(fc)) {
468 rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
469
470 if (unicast)
471 rtlpriv->link_info.num_rx_inperiod++;
472 }
473 }
474}
475
476static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
477 struct sk_buff *skb)
478{
479 struct rtl_priv *rtlpriv = rtl_priv(hw);
480 u8 *rxdesc = skb->data;
481 struct ieee80211_hdr *hdr;
482 bool unicast = false;
483 __le16 fc;
484 struct ieee80211_rx_status rx_status = {0};
485 struct rtl_stats stats = {
486 .signal = 0,
487 .noise = -98,
488 .rate = 0,
489 };
490
491 skb_pull(skb, RTL_RX_DESC_SIZE);
492 rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb);
493 skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift));
494 hdr = (struct ieee80211_hdr *)(skb->data);
495 fc = hdr->frame_control;
496 if (!stats.crc) {
497 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
498
499 if (is_broadcast_ether_addr(hdr->addr1)) {
500 /*TODO*/;
501 } else if (is_multicast_ether_addr(hdr->addr1)) {
502 /*TODO*/
503 } else {
504 unicast = true;
505 rtlpriv->stats.rxbytesunicast += skb->len;
506 }
507
508 rtl_is_special_data(hw, skb, false);
509
510 if (ieee80211_is_data(fc)) {
511 rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
512
513 if (unicast)
514 rtlpriv->link_info.num_rx_inperiod++;
515 }
516 if (likely(rtl_action_proc(hw, skb, false))) {
517 struct sk_buff *uskb = NULL;
518 u8 *pdata;
519
520 uskb = dev_alloc_skb(skb->len + 128);
521 memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status,
522 sizeof(rx_status));
523 pdata = (u8 *)skb_put(uskb, skb->len);
524 memcpy(pdata, skb->data, skb->len);
525 dev_kfree_skb_any(skb);
526 ieee80211_rx_irqsafe(hw, uskb);
527 } else {
528 dev_kfree_skb_any(skb);
529 }
530 }
531}
532
533static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
534{
535 struct sk_buff *_skb;
536 struct sk_buff_head rx_queue;
537 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
538
539 skb_queue_head_init(&rx_queue);
540 if (rtlusb->usb_rx_segregate_hdl)
541 rtlusb->usb_rx_segregate_hdl(hw, skb, &rx_queue);
542 WARN_ON(skb_queue_empty(&rx_queue));
543 while (!skb_queue_empty(&rx_queue)) {
544 _skb = skb_dequeue(&rx_queue);
545 _rtl_usb_rx_process_agg(hw, skb);
546 ieee80211_rx_irqsafe(hw, skb);
547 }
548}
549
550static void _rtl_rx_completed(struct urb *_urb)
551{
552 struct sk_buff *skb = (struct sk_buff *)_urb->context;
553 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
554 struct rtl_usb *rtlusb = (struct rtl_usb *)info->rate_driver_data[0];
555 struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf);
556 struct rtl_priv *rtlpriv = rtl_priv(hw);
557 int err = 0;
558
559 if (unlikely(IS_USB_STOP(rtlusb)))
560 goto free;
561
562 if (likely(0 == _urb->status)) {
563 /* If this code were moved to work queue, would CPU
564 * utilization be improved? NOTE: We shall allocate another skb
565 * and reuse the original one.
566 */
567 skb_put(skb, _urb->actual_length);
568
569 if (likely(!rtlusb->usb_rx_segregate_hdl)) {
570 struct sk_buff *_skb;
571 _rtl_usb_rx_process_noagg(hw, skb);
572 _skb = _rtl_prep_rx_urb(hw, rtlusb, _urb, GFP_ATOMIC);
573 if (IS_ERR(_skb)) {
574 err = PTR_ERR(_skb);
575 RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
576 ("Can't allocate skb for bulk IN!\n"));
577 return;
578 }
579 skb = _skb;
580 } else{
581 /* TO DO */
582 _rtl_rx_pre_process(hw, skb);
583 printk(KERN_ERR "rtlwifi: rx agg not supported\n");
584 }
585 goto resubmit;
586 }
587
588 switch (_urb->status) {
589 /* disconnect */
590 case -ENOENT:
591 case -ECONNRESET:
592 case -ENODEV:
593 case -ESHUTDOWN:
594 goto free;
595 default:
596 break;
597 }
598
599resubmit:
600 skb_reset_tail_pointer(skb);
601 skb_trim(skb, 0);
602
603 usb_anchor_urb(_urb, &rtlusb->rx_submitted);
604 err = usb_submit_urb(_urb, GFP_ATOMIC);
605 if (unlikely(err)) {
606 usb_unanchor_urb(_urb);
607 goto free;
608 }
609 return;
610
611free:
612 dev_kfree_skb_irq(skb);
613}
614
615static int _rtl_usb_receive(struct ieee80211_hw *hw)
616{
617 struct urb *urb;
618 struct sk_buff *skb;
619 int err;
620 int i;
621 struct rtl_priv *rtlpriv = rtl_priv(hw);
622 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
623
624 WARN_ON(0 == rtlusb->rx_urb_num);
625 /* 1600 == 1514 + max WLAN header + rtk info */
626 WARN_ON(rtlusb->rx_max_size < 1600);
627
628 for (i = 0; i < rtlusb->rx_urb_num; i++) {
629 err = -ENOMEM;
630 urb = usb_alloc_urb(0, GFP_KERNEL);
631 if (!urb) {
632 RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
633 ("Failed to alloc URB!!\n"))
634 goto err_out;
635 }
636
637 skb = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL);
638 if (IS_ERR(skb)) {
639 RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
640 ("Failed to prep_rx_urb!!\n"))
641 err = PTR_ERR(skb);
642 goto err_out;
643 }
644
645 usb_anchor_urb(urb, &rtlusb->rx_submitted);
646 err = usb_submit_urb(urb, GFP_KERNEL);
647 if (err)
648 goto err_out;
649 usb_free_urb(urb);
650 }
651 return 0;
652
653err_out:
654 usb_kill_anchored_urbs(&rtlusb->rx_submitted);
655 return err;
656}
657
658static int rtl_usb_start(struct ieee80211_hw *hw)
659{
660 int err;
661 struct rtl_priv *rtlpriv = rtl_priv(hw);
662 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
663 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
664
665 err = rtlpriv->cfg->ops->hw_init(hw);
666 rtl_init_rx_config(hw);
667
668 /* Enable software */
669 SET_USB_START(rtlusb);
670 /* should after adapter start and interrupt enable. */
671 set_hal_start(rtlhal);
672
673 /* Start bulk IN */
674 _rtl_usb_receive(hw);
675
676 return err;
677}
678/**
679 *
680 *
681 */
682
683/*======================= tx =========================================*/
684static void rtl_usb_cleanup(struct ieee80211_hw *hw)
685{
686 u32 i;
687 struct sk_buff *_skb;
688 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
689 struct ieee80211_tx_info *txinfo;
690
691 SET_USB_STOP(rtlusb);
692
693 /* clean up rx stuff. */
694 usb_kill_anchored_urbs(&rtlusb->rx_submitted);
695
696 /* clean up tx stuff */
697 for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) {
698 while ((_skb = skb_dequeue(&rtlusb->tx_skb_queue[i]))) {
699 rtlusb->usb_tx_cleanup(hw, _skb);
700 txinfo = IEEE80211_SKB_CB(_skb);
701 ieee80211_tx_info_clear_status(txinfo);
702 txinfo->flags |= IEEE80211_TX_STAT_ACK;
703 ieee80211_tx_status_irqsafe(hw, _skb);
704 }
705 usb_kill_anchored_urbs(&rtlusb->tx_pending[i]);
706 }
707 usb_kill_anchored_urbs(&rtlusb->tx_submitted);
708}
709
710/**
711 *
712 * We may add some struct into struct rtl_usb later. Do deinit here.
713 *
714 */
715static void rtl_usb_deinit(struct ieee80211_hw *hw)
716{
717 rtl_usb_cleanup(hw);
718}
719
720static void rtl_usb_stop(struct ieee80211_hw *hw)
721{
722 struct rtl_priv *rtlpriv = rtl_priv(hw);
723 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
724 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
725
726 /* should after adapter start and interrupt enable. */
727 set_hal_stop(rtlhal);
728 /* Enable software */
729 SET_USB_STOP(rtlusb);
730 rtl_usb_deinit(hw);
731 rtlpriv->cfg->ops->hw_disable(hw);
732}
733
734static void _rtl_submit_tx_urb(struct ieee80211_hw *hw, struct urb *_urb)
735{
736 int err;
737 struct rtl_priv *rtlpriv = rtl_priv(hw);
738 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
739
740 usb_anchor_urb(_urb, &rtlusb->tx_submitted);
741 err = usb_submit_urb(_urb, GFP_ATOMIC);
742 if (err < 0) {
743 struct sk_buff *skb;
744
745 RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
746 ("Failed to submit urb.\n"));
747 usb_unanchor_urb(_urb);
748 skb = (struct sk_buff *)_urb->context;
749 kfree_skb(skb);
750 }
751 usb_free_urb(_urb);
752}
753
754static int _usb_tx_post(struct ieee80211_hw *hw, struct urb *urb,
755 struct sk_buff *skb)
756{
757 struct rtl_priv *rtlpriv = rtl_priv(hw);
758 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
759 struct ieee80211_tx_info *txinfo;
760
761 rtlusb->usb_tx_post_hdl(hw, urb, skb);
762 skb_pull(skb, RTL_TX_HEADER_SIZE);
763 txinfo = IEEE80211_SKB_CB(skb);
764 ieee80211_tx_info_clear_status(txinfo);
765 txinfo->flags |= IEEE80211_TX_STAT_ACK;
766
767 if (urb->status) {
768 RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
769 ("Urb has error status 0x%X\n", urb->status));
770 goto out;
771 }
772 /* TODO: statistics */
773out:
774 ieee80211_tx_status_irqsafe(hw, skb);
775 return urb->status;
776}
777
778static void _rtl_tx_complete(struct urb *urb)
779{
780 struct sk_buff *skb = (struct sk_buff *)urb->context;
781 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
782 struct rtl_usb *rtlusb = (struct rtl_usb *)info->rate_driver_data[0];
783 struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf);
784 int err;
785
786 if (unlikely(IS_USB_STOP(rtlusb)))
787 return;
788 err = _usb_tx_post(hw, urb, skb);
789 if (err) {
790 /* Ignore error and keep issuiing other urbs */
791 return;
792 }
793}
794
795static struct urb *_rtl_usb_tx_urb_setup(struct ieee80211_hw *hw,
796 struct sk_buff *skb, u32 ep_num)
797{
798 struct rtl_priv *rtlpriv = rtl_priv(hw);
799 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
800 struct urb *_urb;
801
802 WARN_ON(NULL == skb);
803 _urb = usb_alloc_urb(0, GFP_ATOMIC);
804 if (!_urb) {
805 RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
806 ("Can't allocate URB for bulk out!\n"));
807 kfree_skb(skb);
808 return NULL;
809 }
810 _rtl_install_trx_info(rtlusb, skb, ep_num);
811 usb_fill_bulk_urb(_urb, rtlusb->udev, usb_sndbulkpipe(rtlusb->udev,
812 ep_num), skb->data, skb->len, _rtl_tx_complete, skb);
813 _urb->transfer_flags |= URB_ZERO_PACKET;
814 return _urb;
815}
816
817static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
818 enum rtl_txq qnum)
819{
820 struct rtl_priv *rtlpriv = rtl_priv(hw);
821 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
822 u32 ep_num;
823 struct urb *_urb = NULL;
824 struct sk_buff *_skb = NULL;
825 struct sk_buff_head *skb_list;
826 struct usb_anchor *urb_list;
827
828 WARN_ON(NULL == rtlusb->usb_tx_aggregate_hdl);
829 if (unlikely(IS_USB_STOP(rtlusb))) {
830 RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
831 ("USB device is stopping...\n"));
832 kfree_skb(skb);
833 return;
834 }
835 ep_num = rtlusb->ep_map.ep_mapping[qnum];
836 skb_list = &rtlusb->tx_skb_queue[ep_num];
837 _skb = skb;
838 _urb = _rtl_usb_tx_urb_setup(hw, _skb, ep_num);
839 if (unlikely(!_urb)) {
840 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
841 ("Can't allocate urb. Drop skb!\n"));
842 return;
843 }
844 urb_list = &rtlusb->tx_pending[ep_num];
845 _rtl_submit_tx_urb(hw, _urb);
846}
847
848static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
849 u16 hw_queue)
850{
851 struct rtl_priv *rtlpriv = rtl_priv(hw);
852 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
853 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
854 struct rtl_tx_desc *pdesc = NULL;
855 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
856 __le16 fc = hdr->frame_control;
857 u8 *pda_addr = hdr->addr1;
858 /* ssn */
859 u8 *qc = NULL;
860 u8 tid = 0;
861 u16 seq_number = 0;
862
863 if (ieee80211_is_mgmt(fc))
864 rtl_tx_mgmt_proc(hw, skb);
865 rtl_action_proc(hw, skb, true);
866 if (is_multicast_ether_addr(pda_addr))
867 rtlpriv->stats.txbytesmulticast += skb->len;
868 else if (is_broadcast_ether_addr(pda_addr))
869 rtlpriv->stats.txbytesbroadcast += skb->len;
870 else
871 rtlpriv->stats.txbytesunicast += skb->len;
872 if (ieee80211_is_data_qos(fc)) {
873 qc = ieee80211_get_qos_ctl(hdr);
874 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
875 seq_number = (le16_to_cpu(hdr->seq_ctrl) &
876 IEEE80211_SCTL_SEQ) >> 4;
877 seq_number += 1;
878 seq_number <<= 4;
879 }
880 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, skb,
881 hw_queue);
882 if (!ieee80211_has_morefrags(hdr->frame_control)) {
883 if (qc)
884 mac->tids[tid].seq_number = seq_number;
885 }
886 if (ieee80211_is_data(fc))
887 rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
888}
889
890static int rtl_usb_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
891{
892 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
893 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
894 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
895 __le16 fc = hdr->frame_control;
896 u16 hw_queue;
897
898 if (unlikely(is_hal_stop(rtlhal)))
899 goto err_free;
900 hw_queue = rtlusb->usb_mq_to_hwq(fc, skb_get_queue_mapping(skb));
901 _rtl_usb_tx_preprocess(hw, skb, hw_queue);
902 _rtl_usb_transmit(hw, skb, hw_queue);
903 return NETDEV_TX_OK;
904
905err_free:
906 dev_kfree_skb_any(skb);
907 return NETDEV_TX_OK;
908}
909
910static bool rtl_usb_tx_chk_waitq_insert(struct ieee80211_hw *hw,
911 struct sk_buff *skb)
912{
913 return false;
914}
915
916static struct rtl_intf_ops rtl_usb_ops = {
917 .adapter_start = rtl_usb_start,
918 .adapter_stop = rtl_usb_stop,
919 .adapter_tx = rtl_usb_tx,
920 .waitq_insert = rtl_usb_tx_chk_waitq_insert,
921};
922
923int __devinit rtl_usb_probe(struct usb_interface *intf,
924 const struct usb_device_id *id)
925{
926 int err;
927 struct ieee80211_hw *hw = NULL;
928 struct rtl_priv *rtlpriv = NULL;
929 struct usb_device *udev;
930 struct rtl_usb_priv *usb_priv;
931
932 hw = ieee80211_alloc_hw(sizeof(struct rtl_priv) +
933 sizeof(struct rtl_usb_priv), &rtl_ops);
934 if (!hw) {
935 RT_ASSERT(false, ("%s : ieee80211 alloc failed\n", __func__));
936 return -ENOMEM;
937 }
938 rtlpriv = hw->priv;
939 SET_IEEE80211_DEV(hw, &intf->dev);
940 udev = interface_to_usbdev(intf);
941 usb_get_dev(udev);
942 usb_priv = rtl_usbpriv(hw);
943 memset(usb_priv, 0, sizeof(*usb_priv));
944 usb_priv->dev.intf = intf;
945 usb_priv->dev.udev = udev;
946 usb_set_intfdata(intf, hw);
947 /* init cfg & intf_ops */
948 rtlpriv->rtlhal.interface = INTF_USB;
949 rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_info);
950 rtlpriv->intf_ops = &rtl_usb_ops;
951 rtl_dbgp_flag_init(hw);
952 /* Init IO handler */
953 _rtl_usb_io_handler_init(&udev->dev, hw);
954 rtlpriv->cfg->ops->read_chip_version(hw);
955 /*like read eeprom and so on */
956 rtlpriv->cfg->ops->read_eeprom_info(hw);
957 if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
958 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
959 ("Can't init_sw_vars.\n"));
960 goto error_out;
961 }
962 rtlpriv->cfg->ops->init_sw_leds(hw);
963 err = _rtl_usb_init(hw);
964 err = _rtl_usb_init_sw(hw);
965 /* Init mac80211 sw */
966 err = rtl_init_core(hw);
967 if (err) {
968 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
969 ("Can't allocate sw for mac80211.\n"));
970 goto error_out;
971 }
972
973 /*init rfkill */
974 /* rtl_init_rfkill(hw); */
975
976 err = ieee80211_register_hw(hw);
977 if (err) {
978 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
979 ("Can't register mac80211 hw.\n"));
980 goto error_out;
981 } else {
982 rtlpriv->mac80211.mac80211_registered = 1;
983 }
984 set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
985 return 0;
986error_out:
987 rtl_deinit_core(hw);
988 _rtl_usb_io_handler_release(hw);
989 ieee80211_free_hw(hw);
990 usb_put_dev(udev);
991 return -ENODEV;
992}
993EXPORT_SYMBOL(rtl_usb_probe);
994
995void rtl_usb_disconnect(struct usb_interface *intf)
996{
997 struct ieee80211_hw *hw = usb_get_intfdata(intf);
998 struct rtl_priv *rtlpriv = rtl_priv(hw);
999 struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
1000 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
1001
1002 if (unlikely(!rtlpriv))
1003 return;
1004 /*ieee80211_unregister_hw will call ops_stop */
1005 if (rtlmac->mac80211_registered == 1) {
1006 ieee80211_unregister_hw(hw);
1007 rtlmac->mac80211_registered = 0;
1008 } else {
1009 rtl_deinit_deferred_work(hw);
1010 rtlpriv->intf_ops->adapter_stop(hw);
1011 }
1012 /*deinit rfkill */
1013 /* rtl_deinit_rfkill(hw); */
1014 rtl_usb_deinit(hw);
1015 rtl_deinit_core(hw);
1016 rtlpriv->cfg->ops->deinit_sw_leds(hw);
1017 rtlpriv->cfg->ops->deinit_sw_vars(hw);
1018 _rtl_usb_io_handler_release(hw);
1019 usb_put_dev(rtlusb->udev);
1020 usb_set_intfdata(intf, NULL);
1021 ieee80211_free_hw(hw);
1022}
1023EXPORT_SYMBOL(rtl_usb_disconnect);
1024
1025int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message)
1026{
1027 return 0;
1028}
1029EXPORT_SYMBOL(rtl_usb_suspend);
1030
1031int rtl_usb_resume(struct usb_interface *pusb_intf)
1032{
1033 return 0;
1034}
1035EXPORT_SYMBOL(rtl_usb_resume);
diff --git a/drivers/net/wireless/rtlwifi/usb.h b/drivers/net/wireless/rtlwifi/usb.h
new file mode 100644
index 000000000000..abadfe918d30
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/usb.h
@@ -0,0 +1,164 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2011 Realtek Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 *****************************************************************************/
27
28#ifndef __RTL_USB_H__
29#define __RTL_USB_H__
30
31#include <linux/usb.h>
32#include <linux/skbuff.h>
33
34#define RTL_USB_DEVICE(vend, prod, cfg) \
35 .match_flags = USB_DEVICE_ID_MATCH_DEVICE, \
36 .idVendor = (vend), \
37 .idProduct = (prod), \
38 .driver_info = (kernel_ulong_t)&(cfg)
39
40#define USB_HIGH_SPEED_BULK_SIZE 512
41#define USB_FULL_SPEED_BULK_SIZE 64
42
43
44#define RTL_USB_MAX_TXQ_NUM 4 /* max tx queue */
45#define RTL_USB_MAX_EP_NUM 6 /* max ep number */
46#define RTL_USB_MAX_TX_URBS_NUM 8
47
48enum rtl_txq {
49 /* These definitions shall be consistent with value
50 * returned by skb_get_queue_mapping
51 *------------------------------------*/
52 RTL_TXQ_BK,
53 RTL_TXQ_BE,
54 RTL_TXQ_VI,
55 RTL_TXQ_VO,
56 /*------------------------------------*/
57 RTL_TXQ_BCN,
58 RTL_TXQ_MGT,
59 RTL_TXQ_HI,
60
61 /* Must be last */
62 __RTL_TXQ_NUM,
63};
64
65struct rtl_ep_map {
66 u32 ep_mapping[__RTL_TXQ_NUM];
67};
68
69struct _trx_info {
70 struct rtl_usb *rtlusb;
71 u32 ep_num;
72};
73
74static inline void _rtl_install_trx_info(struct rtl_usb *rtlusb,
75 struct sk_buff *skb,
76 u32 ep_num)
77{
78 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
79 info->rate_driver_data[0] = rtlusb;
80 info->rate_driver_data[1] = (void *)(__kernel_size_t)ep_num;
81}
82
83
84/* Add suspend/resume later */
85enum rtl_usb_state {
86 USB_STATE_STOP = 0,
87 USB_STATE_START = 1,
88};
89
90#define IS_USB_STOP(rtlusb_ptr) (USB_STATE_STOP == (rtlusb_ptr)->state)
91#define IS_USB_START(rtlusb_ptr) (USB_STATE_START == (rtlusb_ptr)->state)
92#define SET_USB_STOP(rtlusb_ptr) \
93 do { \
94 (rtlusb_ptr)->state = USB_STATE_STOP; \
95 } while (0)
96
97#define SET_USB_START(rtlusb_ptr) \
98 do { \
99 (rtlusb_ptr)->state = USB_STATE_START; \
100 } while (0)
101
102struct rtl_usb {
103 struct usb_device *udev;
104 struct usb_interface *intf;
105 enum rtl_usb_state state;
106
107 /* Bcn control register setting */
108 u32 reg_bcn_ctrl_val;
109 /* for 88/92cu card disable */
110 u8 disableHWSM;
111 /*QOS & EDCA */
112 enum acm_method acm_method;
113 /* irq . HIMR,HIMR_EX */
114 u32 irq_mask[2];
115 bool irq_enabled;
116
117 u16 (*usb_mq_to_hwq)(__le16 fc, u16 mac80211_queue_index);
118
119 /* Tx */
120 u8 out_ep_nums ;
121 u8 out_queue_sel;
122 struct rtl_ep_map ep_map;
123
124 u32 max_bulk_out_size;
125 u32 tx_submitted_urbs;
126 struct sk_buff_head tx_skb_queue[RTL_USB_MAX_EP_NUM];
127
128 struct usb_anchor tx_pending[RTL_USB_MAX_EP_NUM];
129 struct usb_anchor tx_submitted;
130
131 struct sk_buff *(*usb_tx_aggregate_hdl)(struct ieee80211_hw *,
132 struct sk_buff_head *);
133 int (*usb_tx_post_hdl)(struct ieee80211_hw *,
134 struct urb *, struct sk_buff *);
135 void (*usb_tx_cleanup)(struct ieee80211_hw *, struct sk_buff *);
136
137 /* Rx */
138 u8 in_ep_nums ;
139 u32 in_ep; /* Bulk IN endpoint number */
140 u32 rx_max_size; /* Bulk IN max buffer size */
141 u32 rx_urb_num; /* How many Bulk INs are submitted to host. */
142 struct usb_anchor rx_submitted;
143 void (*usb_rx_segregate_hdl)(struct ieee80211_hw *, struct sk_buff *,
144 struct sk_buff_head *);
145 void (*usb_rx_hdl)(struct ieee80211_hw *, struct sk_buff *);
146};
147
148struct rtl_usb_priv {
149 struct rtl_usb dev;
150 struct rtl_led_ctl ledctl;
151};
152
153#define rtl_usbpriv(hw) (((struct rtl_usb_priv *)(rtl_priv(hw))->priv))
154#define rtl_usbdev(usbpriv) (&((usbpriv)->dev))
155
156
157
158int __devinit rtl_usb_probe(struct usb_interface *intf,
159 const struct usb_device_id *id);
160void rtl_usb_disconnect(struct usb_interface *intf);
161int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message);
162int rtl_usb_resume(struct usb_interface *pusb_intf);
163
164#endif
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index d44d79613d2d..01226f8e70f9 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -34,6 +34,8 @@
34#include <linux/firmware.h> 34#include <linux/firmware.h>
35#include <linux/version.h> 35#include <linux/version.h>
36#include <linux/etherdevice.h> 36#include <linux/etherdevice.h>
37#include <linux/vmalloc.h>
38#include <linux/usb.h>
37#include <net/mac80211.h> 39#include <net/mac80211.h>
38#include "debug.h" 40#include "debug.h"
39 41
@@ -82,6 +84,19 @@
82#define MAC80211_3ADDR_LEN 24 84#define MAC80211_3ADDR_LEN 24
83#define MAC80211_4ADDR_LEN 30 85#define MAC80211_4ADDR_LEN 30
84 86
87#define CHANNEL_MAX_NUMBER (14 + 24 + 21) /* 14 is the max channel no */
88#define CHANNEL_GROUP_MAX (3 + 9) /* ch1~3, 4~9, 10~14 = three groups */
89#define MAX_PG_GROUP 13
90#define CHANNEL_GROUP_MAX_2G 3
91#define CHANNEL_GROUP_IDX_5GL 3
92#define CHANNEL_GROUP_IDX_5GM 6
93#define CHANNEL_GROUP_IDX_5GH 9
94#define CHANNEL_GROUP_MAX_5G 9
95#define CHANNEL_MAX_NUMBER_2G 14
96#define AVG_THERMAL_NUM 8
97
98/* for early mode */
99#define EM_HDR_LEN 8
85enum intf_type { 100enum intf_type {
86 INTF_PCI = 0, 101 INTF_PCI = 0,
87 INTF_USB = 1, 102 INTF_USB = 1,
@@ -113,11 +128,38 @@ enum hardware_type {
113 HARDWARE_TYPE_RTL8192CU, 128 HARDWARE_TYPE_RTL8192CU,
114 HARDWARE_TYPE_RTL8192DE, 129 HARDWARE_TYPE_RTL8192DE,
115 HARDWARE_TYPE_RTL8192DU, 130 HARDWARE_TYPE_RTL8192DU,
131 HARDWARE_TYPE_RTL8723E,
132 HARDWARE_TYPE_RTL8723U,
116 133
117 /*keep it last*/ 134 /* keep it last */
118 HARDWARE_TYPE_NUM 135 HARDWARE_TYPE_NUM
119}; 136};
120 137
138#define IS_HARDWARE_TYPE_8192SU(rtlhal) \
139 (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SU)
140#define IS_HARDWARE_TYPE_8192SE(rtlhal) \
141 (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
142#define IS_HARDWARE_TYPE_8192CE(rtlhal) \
143 (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE)
144#define IS_HARDWARE_TYPE_8192CU(rtlhal) \
145 (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CU)
146#define IS_HARDWARE_TYPE_8192DE(rtlhal) \
147 (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE)
148#define IS_HARDWARE_TYPE_8192DU(rtlhal) \
149 (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DU)
150#define IS_HARDWARE_TYPE_8723E(rtlhal) \
151 (rtlhal->hw_type == HARDWARE_TYPE_RTL8723E)
152#define IS_HARDWARE_TYPE_8723U(rtlhal) \
153 (rtlhal->hw_type == HARDWARE_TYPE_RTL8723U)
154#define IS_HARDWARE_TYPE_8192S(rtlhal) \
155(IS_HARDWARE_TYPE_8192SE(rtlhal) || IS_HARDWARE_TYPE_8192SU(rtlhal))
156#define IS_HARDWARE_TYPE_8192C(rtlhal) \
157(IS_HARDWARE_TYPE_8192CE(rtlhal) || IS_HARDWARE_TYPE_8192CU(rtlhal))
158#define IS_HARDWARE_TYPE_8192D(rtlhal) \
159(IS_HARDWARE_TYPE_8192DE(rtlhal) || IS_HARDWARE_TYPE_8192DU(rtlhal))
160#define IS_HARDWARE_TYPE_8723(rtlhal) \
161(IS_HARDWARE_TYPE_8723E(rtlhal) || IS_HARDWARE_TYPE_8723U(rtlhal))
162
121enum scan_operation_backup_opt { 163enum scan_operation_backup_opt {
122 SCAN_OPT_BACKUP = 0, 164 SCAN_OPT_BACKUP = 0,
123 SCAN_OPT_RESTORE, 165 SCAN_OPT_RESTORE,
@@ -315,6 +357,7 @@ enum rf_type {
315 RF_1T1R = 0, 357 RF_1T1R = 0,
316 RF_1T2R = 1, 358 RF_1T2R = 1,
317 RF_2T2R = 2, 359 RF_2T2R = 2,
360 RF_2T2R_GREEN = 3,
318}; 361};
319 362
320enum ht_channel_width { 363enum ht_channel_width {
@@ -359,6 +402,8 @@ enum rtl_var_map {
359 EFUSE_LOADER_CLK_EN, 402 EFUSE_LOADER_CLK_EN,
360 EFUSE_ANA8M, 403 EFUSE_ANA8M,
361 EFUSE_HWSET_MAX_SIZE, 404 EFUSE_HWSET_MAX_SIZE,
405 EFUSE_MAX_SECTION_MAP,
406 EFUSE_REAL_CONTENT_SIZE,
362 407
363 /*CAM map */ 408 /*CAM map */
364 RWCAM, 409 RWCAM,
@@ -397,6 +442,7 @@ enum rtl_var_map {
397 RTL_IMR_ATIMEND, /*For 92C,ATIM Window End Interrupt */ 442 RTL_IMR_ATIMEND, /*For 92C,ATIM Window End Interrupt */
398 RTL_IMR_BDOK, /*Beacon Queue DMA OK Interrup */ 443 RTL_IMR_BDOK, /*Beacon Queue DMA OK Interrup */
399 RTL_IMR_HIGHDOK, /*High Queue DMA OK Interrupt */ 444 RTL_IMR_HIGHDOK, /*High Queue DMA OK Interrupt */
445 RTL_IMR_COMDOK, /*Command Queue DMA OK Interrupt*/
400 RTL_IMR_TBDOK, /*Transmit Beacon OK interrup */ 446 RTL_IMR_TBDOK, /*Transmit Beacon OK interrup */
401 RTL_IMR_MGNTDOK, /*Management Queue DMA OK Interrupt */ 447 RTL_IMR_MGNTDOK, /*Management Queue DMA OK Interrupt */
402 RTL_IMR_TBDER, /*For 92C,Transmit Beacon Error Interrupt */ 448 RTL_IMR_TBDER, /*For 92C,Transmit Beacon Error Interrupt */
@@ -405,7 +451,8 @@ enum rtl_var_map {
405 RTL_IMR_VIDOK, /*AC_VI DMA OK Interrupt */ 451 RTL_IMR_VIDOK, /*AC_VI DMA OK Interrupt */
406 RTL_IMR_VODOK, /*AC_VO DMA Interrupt */ 452 RTL_IMR_VODOK, /*AC_VO DMA Interrupt */
407 RTL_IMR_ROK, /*Receive DMA OK Interrupt */ 453 RTL_IMR_ROK, /*Receive DMA OK Interrupt */
408 RTL_IBSS_INT_MASKS, /*(RTL_IMR_BcnInt|RTL_IMR_TBDOK|RTL_IMR_TBDER)*/ 454 RTL_IBSS_INT_MASKS, /*(RTL_IMR_BcnInt | RTL_IMR_TBDOK |
455 * RTL_IMR_TBDER) */
409 456
410 /*CCK Rates, TxHT = 0 */ 457 /*CCK Rates, TxHT = 0 */
411 RTL_RC_CCK_RATE1M, 458 RTL_RC_CCK_RATE1M,
@@ -481,6 +528,19 @@ enum acm_method {
481 eAcmWay2_SW = 2, 528 eAcmWay2_SW = 2,
482}; 529};
483 530
531enum macphy_mode {
532 SINGLEMAC_SINGLEPHY = 0,
533 DUALMAC_DUALPHY,
534 DUALMAC_SINGLEPHY,
535};
536
537enum band_type {
538 BAND_ON_2_4G = 0,
539 BAND_ON_5G,
540 BAND_ON_BOTH,
541 BANDMAX
542};
543
484/*aci/aifsn Field. 544/*aci/aifsn Field.
485Ref: WMM spec 2.2.2: WME Parameter Element, p.12.*/ 545Ref: WMM spec 2.2.2: WME Parameter Element, p.12.*/
486union aci_aifsn { 546union aci_aifsn {
@@ -505,6 +565,17 @@ enum wireless_mode {
505 WIRELESS_MODE_N_5G = 0x20 565 WIRELESS_MODE_N_5G = 0x20
506}; 566};
507 567
568#define IS_WIRELESS_MODE_A(wirelessmode) \
569 (wirelessmode == WIRELESS_MODE_A)
570#define IS_WIRELESS_MODE_B(wirelessmode) \
571 (wirelessmode == WIRELESS_MODE_B)
572#define IS_WIRELESS_MODE_G(wirelessmode) \
573 (wirelessmode == WIRELESS_MODE_G)
574#define IS_WIRELESS_MODE_N_24G(wirelessmode) \
575 (wirelessmode == WIRELESS_MODE_N_24G)
576#define IS_WIRELESS_MODE_N_5G(wirelessmode) \
577 (wirelessmode == WIRELESS_MODE_N_5G)
578
508enum ratr_table_mode { 579enum ratr_table_mode {
509 RATR_INX_WIRELESS_NGB = 0, 580 RATR_INX_WIRELESS_NGB = 0,
510 RATR_INX_WIRELESS_NG = 1, 581 RATR_INX_WIRELESS_NG = 1,
@@ -574,11 +645,11 @@ struct rtl_probe_rsp {
574struct rtl_led { 645struct rtl_led {
575 void *hw; 646 void *hw;
576 enum rtl_led_pin ledpin; 647 enum rtl_led_pin ledpin;
577 bool b_ledon; 648 bool ledon;
578}; 649};
579 650
580struct rtl_led_ctl { 651struct rtl_led_ctl {
581 bool bled_opendrain; 652 bool led_opendrain;
582 struct rtl_led sw_led0; 653 struct rtl_led sw_led0;
583 struct rtl_led sw_led1; 654 struct rtl_led sw_led1;
584}; 655};
@@ -603,6 +674,8 @@ struct false_alarm_statistics {
603 u32 cnt_rate_illegal; 674 u32 cnt_rate_illegal;
604 u32 cnt_crc8_fail; 675 u32 cnt_crc8_fail;
605 u32 cnt_mcs_fail; 676 u32 cnt_mcs_fail;
677 u32 cnt_fast_fsync_fail;
678 u32 cnt_sb_search_fail;
606 u32 cnt_ofdm_fail; 679 u32 cnt_ofdm_fail;
607 u32 cnt_cck_fail; 680 u32 cnt_cck_fail;
608 u32 cnt_all; 681 u32 cnt_all;
@@ -690,6 +763,32 @@ struct rtl_rfkill {
690 bool rfkill_state; /*0 is off, 1 is on */ 763 bool rfkill_state; /*0 is off, 1 is on */
691}; 764};
692 765
766#define IQK_MATRIX_REG_NUM 8
767#define IQK_MATRIX_SETTINGS_NUM (1 + 24 + 21)
768struct iqk_matrix_regs {
769 bool b_iqk_done;
770 long value[1][IQK_MATRIX_REG_NUM];
771};
772
773struct phy_parameters {
774 u16 length;
775 u32 *pdata;
776};
777
778enum hw_param_tab_index {
779 PHY_REG_2T,
780 PHY_REG_1T,
781 PHY_REG_PG,
782 RADIOA_2T,
783 RADIOB_2T,
784 RADIOA_1T,
785 RADIOB_1T,
786 MAC_REG,
787 AGCTAB_2T,
788 AGCTAB_1T,
789 MAX_TAB
790};
791
693struct rtl_phy { 792struct rtl_phy {
694 struct bb_reg_def phyreg_def[4]; /*Radio A/B/C/D */ 793 struct bb_reg_def phyreg_def[4]; /*Radio A/B/C/D */
695 struct init_gain initgain_backup; 794 struct init_gain initgain_backup;
@@ -705,8 +804,9 @@ struct rtl_phy {
705 u8 current_channel; 804 u8 current_channel;
706 u8 h2c_box_num; 805 u8 h2c_box_num;
707 u8 set_io_inprogress; 806 u8 set_io_inprogress;
807 u8 lck_inprogress;
708 808
709 /*record for power tracking*/ 809 /* record for power tracking */
710 s32 reg_e94; 810 s32 reg_e94;
711 s32 reg_e9c; 811 s32 reg_e9c;
712 s32 reg_ea4; 812 s32 reg_ea4;
@@ -723,26 +823,32 @@ struct rtl_phy {
723 u32 iqk_mac_backup[IQK_MAC_REG_NUM]; 823 u32 iqk_mac_backup[IQK_MAC_REG_NUM];
724 u32 iqk_bb_backup[10]; 824 u32 iqk_bb_backup[10];
725 825
726 bool b_rfpi_enable; 826 /* Dual mac */
827 bool need_iqk;
828 struct iqk_matrix_regs iqk_matrix_regsetting[IQK_MATRIX_SETTINGS_NUM];
829
830 bool rfpi_enable;
727 831
728 u8 pwrgroup_cnt; 832 u8 pwrgroup_cnt;
729 u8 bcck_high_power; 833 u8 cck_high_power;
730 /* 3 groups of pwr diff by rates*/ 834 /* MAX_PG_GROUP groups of pwr diff by rates */
731 u32 mcs_txpwrlevel_origoffset[4][16]; 835 u32 mcs_txpwrlevel_origoffset[MAX_PG_GROUP][16];
732 u8 default_initialgain[4]; 836 u8 default_initialgain[4];
733 837
734 /*the current Tx power level*/ 838 /* the current Tx power level */
735 u8 cur_cck_txpwridx; 839 u8 cur_cck_txpwridx;
736 u8 cur_ofdm24g_txpwridx; 840 u8 cur_ofdm24g_txpwridx;
737 841
738 u32 rfreg_chnlval[2]; 842 u32 rfreg_chnlval[2];
739 bool b_apk_done; 843 bool apk_done;
844 u32 reg_rf3c[2]; /* pathA / pathB */
740 845
741 /*fsync*/
742 u8 framesync; 846 u8 framesync;
743 u32 framesync_c34; 847 u32 framesync_c34;
744 848
745 u8 num_total_rfpath; 849 u8 num_total_rfpath;
850 struct phy_parameters hwparam_tables[MAX_TAB];
851 u16 rf_pathmap;
746}; 852};
747 853
748#define MAX_TID_COUNT 9 854#define MAX_TID_COUNT 9
@@ -768,6 +874,7 @@ struct rtl_tid_data {
768struct rtl_priv; 874struct rtl_priv;
769struct rtl_io { 875struct rtl_io {
770 struct device *dev; 876 struct device *dev;
877 struct mutex bb_mutex;
771 878
772 /*PCI MEM map */ 879 /*PCI MEM map */
773 unsigned long pci_mem_end; /*shared mem end */ 880 unsigned long pci_mem_end; /*shared mem end */
@@ -779,11 +886,14 @@ struct rtl_io {
779 void (*write8_async) (struct rtl_priv *rtlpriv, u32 addr, u8 val); 886 void (*write8_async) (struct rtl_priv *rtlpriv, u32 addr, u8 val);
780 void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, u16 val); 887 void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, u16 val);
781 void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, u32 val); 888 void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, u32 val);
782 889 int (*writeN_async) (struct rtl_priv *rtlpriv, u32 addr, u16 len,
783 u8(*read8_sync) (struct rtl_priv *rtlpriv, u32 addr); 890 u8 *pdata);
784 u16(*read16_sync) (struct rtl_priv *rtlpriv, u32 addr); 891
785 u32(*read32_sync) (struct rtl_priv *rtlpriv, u32 addr); 892 u8(*read8_sync) (struct rtl_priv *rtlpriv, u32 addr);
786 893 u16(*read16_sync) (struct rtl_priv *rtlpriv, u32 addr);
894 u32(*read32_sync) (struct rtl_priv *rtlpriv, u32 addr);
895 int (*readN_sync) (struct rtl_priv *rtlpriv, u32 addr, u16 len,
896 u8 *pdata);
787}; 897};
788 898
789struct rtl_mac { 899struct rtl_mac {
@@ -815,16 +925,24 @@ struct rtl_mac {
815 bool act_scanning; 925 bool act_scanning;
816 u8 cnt_after_linked; 926 u8 cnt_after_linked;
817 927
818 /*RDG*/ bool rdg_en; 928 /* early mode */
929 /* skb wait queue */
930 struct sk_buff_head skb_waitq[MAX_TID_COUNT];
931 u8 earlymode_threshold;
932
933 /*RDG*/
934 bool rdg_en;
819 935
820 /*AP*/ u8 bssid[6]; 936 /*AP*/
821 u8 mcs[16]; /*16 bytes mcs for HT rates.*/ 937 u8 bssid[6];
822 u32 basic_rates; /*b/g rates*/ 938 u32 vendor;
939 u8 mcs[16]; /* 16 bytes mcs for HT rates. */
940 u32 basic_rates; /* b/g rates */
823 u8 ht_enable; 941 u8 ht_enable;
824 u8 sgi_40; 942 u8 sgi_40;
825 u8 sgi_20; 943 u8 sgi_20;
826 u8 bw_40; 944 u8 bw_40;
827 u8 mode; /*wireless mode*/ 945 u8 mode; /* wireless mode */
828 u8 slot_time; 946 u8 slot_time;
829 u8 short_preamble; 947 u8 short_preamble;
830 u8 use_cts_protect; 948 u8 use_cts_protect;
@@ -835,9 +953,11 @@ struct rtl_mac {
835 u8 retry_long; 953 u8 retry_long;
836 u16 assoc_id; 954 u16 assoc_id;
837 955
838 /*IBSS*/ int beacon_interval; 956 /*IBSS*/
957 int beacon_interval;
839 958
840 /*AMPDU*/ u8 min_space_cfg; /*For Min spacing configurations */ 959 /*AMPDU*/
960 u8 min_space_cfg; /*For Min spacing configurations */
841 u8 max_mss_density; 961 u8 max_mss_density;
842 u8 current_ampdu_factor; 962 u8 current_ampdu_factor;
843 u8 current_ampdu_density; 963 u8 current_ampdu_density;
@@ -852,17 +972,54 @@ struct rtl_hal {
852 972
853 enum intf_type interface; 973 enum intf_type interface;
854 u16 hw_type; /*92c or 92d or 92s and so on */ 974 u16 hw_type; /*92c or 92d or 92s and so on */
975 u8 ic_class;
855 u8 oem_id; 976 u8 oem_id;
856 u8 version; /*version of chip */ 977 u32 version; /*version of chip */
857 u8 state; /*stop 0, start 1 */ 978 u8 state; /*stop 0, start 1 */
858 979
859 /*firmware */ 980 /*firmware */
981 u32 fwsize;
860 u8 *pfirmware; 982 u8 *pfirmware;
861 bool b_h2c_setinprogress; 983 u16 fw_version;
984 u16 fw_subversion;
985 bool h2c_setinprogress;
862 u8 last_hmeboxnum; 986 u8 last_hmeboxnum;
863 bool bfw_ready; 987 bool fw_ready;
864 /*Reserve page start offset except beacon in TxQ. */ 988 /*Reserve page start offset except beacon in TxQ. */
865 u8 fw_rsvdpage_startoffset; 989 u8 fw_rsvdpage_startoffset;
990 u8 h2c_txcmd_seq;
991
992 /* FW Cmd IO related */
993 u16 fwcmd_iomap;
994 u32 fwcmd_ioparam;
995 bool set_fwcmd_inprogress;
996 u8 current_fwcmd_io;
997
998 /**/
999 bool driver_going2unload;
1000
1001 /*AMPDU init min space*/
1002 u8 minspace_cfg; /*For Min spacing configurations */
1003
1004 /* Dual mac */
1005 enum macphy_mode macphymode;
1006 enum band_type current_bandtype; /* 0:2.4G, 1:5G */
1007 enum band_type current_bandtypebackup;
1008 enum band_type bandset;
1009 /* dual MAC 0--Mac0 1--Mac1 */
1010 u32 interfaceindex;
1011 /* just for DualMac S3S4 */
1012 u8 macphyctl_reg;
1013 bool earlymode_enable;
1014 /* Dual mac*/
1015 bool during_mac0init_radiob;
1016 bool during_mac1init_radioa;
1017 bool reloadtxpowerindex;
1018 /* True if IMR or IQK have done
1019 for 2.4G in scan progress */
1020 bool load_imrandiqk_setting_for2g;
1021
1022 bool disable_amsdu_8k;
866}; 1023};
867 1024
868struct rtl_security { 1025struct rtl_security {
@@ -887,48 +1044,61 @@ struct rtl_security {
887}; 1044};
888 1045
889struct rtl_dm { 1046struct rtl_dm {
890 /*PHY status for DM */ 1047 /*PHY status for Dynamic Management */
891 long entry_min_undecoratedsmoothed_pwdb; 1048 long entry_min_undecoratedsmoothed_pwdb;
892 long undecorated_smoothed_pwdb; /*out dm */ 1049 long undecorated_smoothed_pwdb; /*out dm */
893 long entry_max_undecoratedsmoothed_pwdb; 1050 long entry_max_undecoratedsmoothed_pwdb;
894 bool b_dm_initialgain_enable; 1051 bool dm_initialgain_enable;
895 bool bdynamic_txpower_enable; 1052 bool dynamic_txpower_enable;
896 bool bcurrent_turbo_edca; 1053 bool current_turbo_edca;
897 bool bis_any_nonbepkts; /*out dm */ 1054 bool is_any_nonbepkts; /*out dm */
898 bool bis_cur_rdlstate; 1055 bool is_cur_rdlstate;
899 bool btxpower_trackingInit; 1056 bool txpower_trackingInit;
900 bool b_disable_framebursting; 1057 bool disable_framebursting;
901 bool b_cck_inch14; 1058 bool cck_inch14;
902 bool btxpower_tracking; 1059 bool txpower_tracking;
903 bool b_useramask; 1060 bool useramask;
904 bool brfpath_rxenable[4]; 1061 bool rfpath_rxenable[4];
905 1062 bool inform_fw_driverctrldm;
1063 bool current_mrc_switch;
1064 u8 txpowercount;
1065
1066 u8 thermalvalue_rxgain;
906 u8 thermalvalue_iqk; 1067 u8 thermalvalue_iqk;
907 u8 thermalvalue_lck; 1068 u8 thermalvalue_lck;
908 u8 thermalvalue; 1069 u8 thermalvalue;
909 u8 last_dtp_lvl; 1070 u8 last_dtp_lvl;
1071 u8 thermalvalue_avg[AVG_THERMAL_NUM];
1072 u8 thermalvalue_avg_index;
1073 bool done_txpower;
910 u8 dynamic_txhighpower_lvl; /*Tx high power level */ 1074 u8 dynamic_txhighpower_lvl; /*Tx high power level */
911 u8 dm_flag; /*Indicate if each dynamic mechanism's status. */ 1075 u8 dm_flag; /*Indicate each dynamic mechanism's status. */
912 u8 dm_type; 1076 u8 dm_type;
913 u8 txpower_track_control; 1077 u8 txpower_track_control;
914 1078 bool interrupt_migration;
1079 bool disable_tx_int;
915 char ofdm_index[2]; 1080 char ofdm_index[2];
916 char cck_index; 1081 char cck_index;
1082 u8 power_index_backup[6];
917}; 1083};
918 1084
919#define EFUSE_MAX_LOGICAL_SIZE 128 1085#define EFUSE_MAX_LOGICAL_SIZE 256
920 1086
921struct rtl_efuse { 1087struct rtl_efuse {
922 bool bautoLoad_ok; 1088 bool autoLoad_ok;
923 bool bootfromefuse; 1089 bool bootfromefuse;
924 u16 max_physical_size; 1090 u16 max_physical_size;
925 u8 contents[EFUSE_MAX_LOGICAL_SIZE];
926 1091
927 u8 efuse_map[2][EFUSE_MAX_LOGICAL_SIZE]; 1092 u8 efuse_map[2][EFUSE_MAX_LOGICAL_SIZE];
928 u16 efuse_usedbytes; 1093 u16 efuse_usedbytes;
929 u8 efuse_usedpercentage; 1094 u8 efuse_usedpercentage;
1095#ifdef EFUSE_REPG_WORKAROUND
1096 bool efuse_re_pg_sec1flag;
1097 u8 efuse_re_pg_data[8];
1098#endif
930 1099
931 u8 autoload_failflag; 1100 u8 autoload_failflag;
1101 u8 autoload_status;
932 1102
933 short epromtype; 1103 short epromtype;
934 u16 eeprom_vid; 1104 u16 eeprom_vid;
@@ -938,69 +1108,90 @@ struct rtl_efuse {
938 u8 eeprom_oemid; 1108 u8 eeprom_oemid;
939 u16 eeprom_channelplan; 1109 u16 eeprom_channelplan;
940 u8 eeprom_version; 1110 u8 eeprom_version;
1111 u8 board_type;
1112 u8 external_pa;
941 1113
942 u8 dev_addr[6]; 1114 u8 dev_addr[6];
943 1115
944 bool b_txpwr_fromeprom; 1116 bool txpwr_fromeprom;
1117 u8 eeprom_crystalcap;
945 u8 eeprom_tssi[2]; 1118 u8 eeprom_tssi[2];
946 u8 eeprom_pwrlimit_ht20[3]; 1119 u8 eeprom_tssi_5g[3][2]; /* for 5GL/5GM/5GH band. */
947 u8 eeprom_pwrlimit_ht40[3]; 1120 u8 eeprom_pwrlimit_ht20[CHANNEL_GROUP_MAX];
948 u8 eeprom_chnlarea_txpwr_cck[2][3]; 1121 u8 eeprom_pwrlimit_ht40[CHANNEL_GROUP_MAX];
949 u8 eeprom_chnlarea_txpwr_ht40_1s[2][3]; 1122 u8 eeprom_chnlarea_txpwr_cck[2][CHANNEL_GROUP_MAX_2G];
950 u8 eeprom_chnlarea_txpwr_ht40_2sdiif[2][3]; 1123 u8 eeprom_chnlarea_txpwr_ht40_1s[2][CHANNEL_GROUP_MAX];
951 u8 txpwrlevel_cck[2][14]; 1124 u8 eeprom_chnlarea_txpwr_ht40_2sdiif[2][CHANNEL_GROUP_MAX];
952 u8 txpwrlevel_ht40_1s[2][14]; /*For HT 40MHZ pwr */ 1125 u8 txpwrlevel_cck[2][CHANNEL_MAX_NUMBER_2G];
953 u8 txpwrlevel_ht40_2s[2][14]; /*For HT 40MHZ pwr */ 1126 u8 txpwrlevel_ht40_1s[2][CHANNEL_MAX_NUMBER]; /*For HT 40MHZ pwr */
1127 u8 txpwrlevel_ht40_2s[2][CHANNEL_MAX_NUMBER]; /*For HT 40MHZ pwr */
1128
1129 u8 internal_pa_5g[2]; /* pathA / pathB */
1130 u8 eeprom_c9;
1131 u8 eeprom_cc;
954 1132
955 /*For power group */ 1133 /*For power group */
956 u8 pwrgroup_ht20[2][14]; 1134 u8 eeprom_pwrgroup[2][3];
957 u8 pwrgroup_ht40[2][14]; 1135 u8 pwrgroup_ht20[2][CHANNEL_MAX_NUMBER];
958 1136 u8 pwrgroup_ht40[2][CHANNEL_MAX_NUMBER];
959 char txpwr_ht20diff[2][14]; /*HT 20<->40 Pwr diff */ 1137
960 u8 txpwr_legacyhtdiff[2][14]; /*For HT<->legacy pwr diff */ 1138 char txpwr_ht20diff[2][CHANNEL_MAX_NUMBER]; /*HT 20<->40 Pwr diff */
1139 /*For HT<->legacy pwr diff*/
1140 u8 txpwr_legacyhtdiff[2][CHANNEL_MAX_NUMBER];
1141 u8 txpwr_safetyflag; /* Band edge enable flag */
1142 u16 eeprom_txpowerdiff;
1143 u8 legacy_httxpowerdiff; /* Legacy to HT rate power diff */
1144 u8 antenna_txpwdiff[3];
961 1145
962 u8 eeprom_regulatory; 1146 u8 eeprom_regulatory;
963 u8 eeprom_thermalmeter; 1147 u8 eeprom_thermalmeter;
964 /*ThermalMeter, index 0 for RFIC0, and 1 for RFIC1 */ 1148 u8 thermalmeter[2]; /*ThermalMeter, index 0 for RFIC0, 1 for RFIC1 */
965 u8 thermalmeter[2]; 1149 u16 tssi_13dbm;
1150 u8 crystalcap; /* CrystalCap. */
1151 u8 delta_iqk;
1152 u8 delta_lck;
966 1153
967 u8 legacy_ht_txpowerdiff; /*Legacy to HT rate power diff */ 1154 u8 legacy_ht_txpowerdiff; /*Legacy to HT rate power diff */
968 bool b_apk_thermalmeterignore; 1155 bool apk_thermalmeterignore;
1156
1157 bool b1x1_recvcombine;
1158 bool b1ss_support;
1159
1160 /*channel plan */
1161 u8 channel_plan;
969}; 1162};
970 1163
971struct rtl_ps_ctl { 1164struct rtl_ps_ctl {
1165 bool pwrdomain_protect;
972 bool set_rfpowerstate_inprogress; 1166 bool set_rfpowerstate_inprogress;
973 bool b_in_powersavemode; 1167 bool in_powersavemode;
974 bool rfchange_inprogress; 1168 bool rfchange_inprogress;
975 bool b_swrf_processing; 1169 bool swrf_processing;
976 bool b_hwradiooff; 1170 bool hwradiooff;
977
978 u32 last_sleep_jiffies;
979 u32 last_awake_jiffies;
980 u32 last_delaylps_stamp_jiffies;
981 1171
982 /* 1172 /*
983 * just for PCIE ASPM 1173 * just for PCIE ASPM
984 * If it supports ASPM, Offset[560h] = 0x40, 1174 * If it supports ASPM, Offset[560h] = 0x40,
985 * otherwise Offset[560h] = 0x00. 1175 * otherwise Offset[560h] = 0x00.
986 * */ 1176 * */
987 bool b_support_aspm; 1177 bool support_aspm;
988 bool b_support_backdoor; 1178 bool support_backdoor;
989 1179
990 /*for LPS */ 1180 /*for LPS */
991 enum rt_psmode dot11_psmode; /*Power save mode configured. */ 1181 enum rt_psmode dot11_psmode; /*Power save mode configured. */
992 bool b_leisure_ps; 1182 bool swctrl_lps;
993 bool b_fwctrl_lps; 1183 bool leisure_ps;
1184 bool fwctrl_lps;
994 u8 fwctrl_psmode; 1185 u8 fwctrl_psmode;
995 /*For Fw control LPS mode */ 1186 /*For Fw control LPS mode */
996 u8 b_reg_fwctrl_lps; 1187 u8 reg_fwctrl_lps;
997 /*Record Fw PS mode status. */ 1188 /*Record Fw PS mode status. */
998 bool b_fw_current_inpsmode; 1189 bool fw_current_inpsmode;
999 u8 reg_max_lps_awakeintvl; 1190 u8 reg_max_lps_awakeintvl;
1000 bool report_linked; 1191 bool report_linked;
1001 1192
1002 /*for IPS */ 1193 /*for IPS */
1003 bool b_inactiveps; 1194 bool inactiveps;
1004 1195
1005 u32 rfoff_reason; 1196 u32 rfoff_reason;
1006 1197
@@ -1011,8 +1202,26 @@ struct rtl_ps_ctl {
1011 /*just for PCIE ASPM */ 1202 /*just for PCIE ASPM */
1012 u8 const_amdpci_aspm; 1203 u8 const_amdpci_aspm;
1013 1204
1205 bool pwrdown_mode;
1206
1014 enum rf_pwrstate inactive_pwrstate; 1207 enum rf_pwrstate inactive_pwrstate;
1015 enum rf_pwrstate rfpwr_state; /*cur power state */ 1208 enum rf_pwrstate rfpwr_state; /*cur power state */
1209
1210 /* for SW LPS*/
1211 bool sw_ps_enabled;
1212 bool state;
1213 bool state_inap;
1214 bool multi_buffered;
1215 u16 nullfunc_seq;
1216 unsigned int dtim_counter;
1217 unsigned int sleep_ms;
1218 unsigned long last_sleep_jiffies;
1219 unsigned long last_awake_jiffies;
1220 unsigned long last_delaylps_stamp_jiffies;
1221 unsigned long last_dtim;
1222 unsigned long last_beacon;
1223 unsigned long last_action;
1224 unsigned long last_slept;
1016}; 1225};
1017 1226
1018struct rtl_stats { 1227struct rtl_stats {
@@ -1038,10 +1247,10 @@ struct rtl_stats {
1038 s32 recvsignalpower; 1247 s32 recvsignalpower;
1039 s8 rxpower; /*in dBm Translate from PWdB */ 1248 s8 rxpower; /*in dBm Translate from PWdB */
1040 u8 signalstrength; /*in 0-100 index. */ 1249 u8 signalstrength; /*in 0-100 index. */
1041 u16 b_hwerror:1; 1250 u16 hwerror:1;
1042 u16 b_crc:1; 1251 u16 crc:1;
1043 u16 b_icv:1; 1252 u16 icv:1;
1044 u16 b_shortpreamble:1; 1253 u16 shortpreamble:1;
1045 u16 antenna:1; 1254 u16 antenna:1;
1046 u16 decrypted:1; 1255 u16 decrypted:1;
1047 u16 wakeup:1; 1256 u16 wakeup:1;
@@ -1050,15 +1259,16 @@ struct rtl_stats {
1050 1259
1051 u8 rx_drvinfo_size; 1260 u8 rx_drvinfo_size;
1052 u8 rx_bufshift; 1261 u8 rx_bufshift;
1053 bool b_isampdu; 1262 bool isampdu;
1263 bool isfirst_ampdu;
1054 bool rx_is40Mhzpacket; 1264 bool rx_is40Mhzpacket;
1055 u32 rx_pwdb_all; 1265 u32 rx_pwdb_all;
1056 u8 rx_mimo_signalstrength[4]; /*in 0~100 index */ 1266 u8 rx_mimo_signalstrength[4]; /*in 0~100 index */
1057 s8 rx_mimo_signalquality[2]; 1267 s8 rx_mimo_signalquality[2];
1058 bool b_packet_matchbssid; 1268 bool packet_matchbssid;
1059 bool b_is_cck; 1269 bool is_cck;
1060 bool b_packet_toself; 1270 bool packet_toself;
1061 bool b_packet_beacon; /*for rssi */ 1271 bool packet_beacon; /*for rssi */
1062 char cck_adc_pwdb[4]; /*for rx path selection */ 1272 char cck_adc_pwdb[4]; /*for rx path selection */
1063}; 1273};
1064 1274
@@ -1069,23 +1279,23 @@ struct rt_link_detect {
1069 u32 num_tx_inperiod; 1279 u32 num_tx_inperiod;
1070 u32 num_rx_inperiod; 1280 u32 num_rx_inperiod;
1071 1281
1072 bool b_busytraffic; 1282 bool busytraffic;
1073 bool b_higher_busytraffic; 1283 bool higher_busytraffic;
1074 bool b_higher_busyrxtraffic; 1284 bool higher_busyrxtraffic;
1075}; 1285};
1076 1286
1077struct rtl_tcb_desc { 1287struct rtl_tcb_desc {
1078 u8 b_packet_bw:1; 1288 u8 packet_bw:1;
1079 u8 b_multicast:1; 1289 u8 multicast:1;
1080 u8 b_broadcast:1; 1290 u8 broadcast:1;
1081 1291
1082 u8 b_rts_stbc:1; 1292 u8 rts_stbc:1;
1083 u8 b_rts_enable:1; 1293 u8 rts_enable:1;
1084 u8 b_cts_enable:1; 1294 u8 cts_enable:1;
1085 u8 b_rts_use_shortpreamble:1; 1295 u8 rts_use_shortpreamble:1;
1086 u8 b_rts_use_shortgi:1; 1296 u8 rts_use_shortgi:1;
1087 u8 rts_sc:1; 1297 u8 rts_sc:1;
1088 u8 b_rts_bw:1; 1298 u8 rts_bw:1;
1089 u8 rts_rate; 1299 u8 rts_rate;
1090 1300
1091 u8 use_shortgi:1; 1301 u8 use_shortgi:1;
@@ -1096,20 +1306,34 @@ struct rtl_tcb_desc {
1096 u8 ratr_index; 1306 u8 ratr_index;
1097 u8 mac_id; 1307 u8 mac_id;
1098 u8 hw_rate; 1308 u8 hw_rate;
1309
1310 u8 last_inipkt:1;
1311 u8 cmd_or_init:1;
1312 u8 queue_index;
1313
1314 /* early mode */
1315 u8 empkt_num;
1316 /* The max value by HW */
1317 u32 empkt_len[5];
1099}; 1318};
1100 1319
1101struct rtl_hal_ops { 1320struct rtl_hal_ops {
1102 int (*init_sw_vars) (struct ieee80211_hw *hw); 1321 int (*init_sw_vars) (struct ieee80211_hw *hw);
1103 void (*deinit_sw_vars) (struct ieee80211_hw *hw); 1322 void (*deinit_sw_vars) (struct ieee80211_hw *hw);
1323 void (*read_chip_version)(struct ieee80211_hw *hw);
1104 void (*read_eeprom_info) (struct ieee80211_hw *hw); 1324 void (*read_eeprom_info) (struct ieee80211_hw *hw);
1105 void (*interrupt_recognized) (struct ieee80211_hw *hw, 1325 void (*interrupt_recognized) (struct ieee80211_hw *hw,
1106 u32 *p_inta, u32 *p_intb); 1326 u32 *p_inta, u32 *p_intb);
1107 int (*hw_init) (struct ieee80211_hw *hw); 1327 int (*hw_init) (struct ieee80211_hw *hw);
1108 void (*hw_disable) (struct ieee80211_hw *hw); 1328 void (*hw_disable) (struct ieee80211_hw *hw);
1329 void (*hw_suspend) (struct ieee80211_hw *hw);
1330 void (*hw_resume) (struct ieee80211_hw *hw);
1109 void (*enable_interrupt) (struct ieee80211_hw *hw); 1331 void (*enable_interrupt) (struct ieee80211_hw *hw);
1110 void (*disable_interrupt) (struct ieee80211_hw *hw); 1332 void (*disable_interrupt) (struct ieee80211_hw *hw);
1111 int (*set_network_type) (struct ieee80211_hw *hw, 1333 int (*set_network_type) (struct ieee80211_hw *hw,
1112 enum nl80211_iftype type); 1334 enum nl80211_iftype type);
1335 void (*set_chk_bssid)(struct ieee80211_hw *hw,
1336 bool check_bssid);
1113 void (*set_bw_mode) (struct ieee80211_hw *hw, 1337 void (*set_bw_mode) (struct ieee80211_hw *hw,
1114 enum nl80211_channel_type ch_type); 1338 enum nl80211_channel_type ch_type);
1115 u8(*switch_channel) (struct ieee80211_hw *hw); 1339 u8(*switch_channel) (struct ieee80211_hw *hw);
@@ -1126,23 +1350,26 @@ struct rtl_hal_ops {
1126 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 1350 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
1127 struct ieee80211_tx_info *info, 1351 struct ieee80211_tx_info *info,
1128 struct sk_buff *skb, unsigned int queue_index); 1352 struct sk_buff *skb, unsigned int queue_index);
1353 void (*fill_fake_txdesc) (struct ieee80211_hw *hw, u8 * pDesc,
1354 u32 buffer_len, bool bIsPsPoll);
1129 void (*fill_tx_cmddesc) (struct ieee80211_hw *hw, u8 *pdesc, 1355 void (*fill_tx_cmddesc) (struct ieee80211_hw *hw, u8 *pdesc,
1130 bool b_firstseg, bool b_lastseg, 1356 bool firstseg, bool lastseg,
1131 struct sk_buff *skb); 1357 struct sk_buff *skb);
1132 bool(*query_rx_desc) (struct ieee80211_hw *hw, 1358 bool (*cmd_send_packet)(struct ieee80211_hw *hw, struct sk_buff *skb);
1359 bool (*query_rx_desc) (struct ieee80211_hw *hw,
1133 struct rtl_stats *stats, 1360 struct rtl_stats *stats,
1134 struct ieee80211_rx_status *rx_status, 1361 struct ieee80211_rx_status *rx_status,
1135 u8 *pdesc, struct sk_buff *skb); 1362 u8 *pdesc, struct sk_buff *skb);
1136 void (*set_channel_access) (struct ieee80211_hw *hw); 1363 void (*set_channel_access) (struct ieee80211_hw *hw);
1137 bool(*radio_onoff_checking) (struct ieee80211_hw *hw, u8 *valid); 1364 bool (*radio_onoff_checking) (struct ieee80211_hw *hw, u8 *valid);
1138 void (*dm_watchdog) (struct ieee80211_hw *hw); 1365 void (*dm_watchdog) (struct ieee80211_hw *hw);
1139 void (*scan_operation_backup) (struct ieee80211_hw *hw, u8 operation); 1366 void (*scan_operation_backup) (struct ieee80211_hw *hw, u8 operation);
1140 bool(*set_rf_power_state) (struct ieee80211_hw *hw, 1367 bool (*set_rf_power_state) (struct ieee80211_hw *hw,
1141 enum rf_pwrstate rfpwr_state); 1368 enum rf_pwrstate rfpwr_state);
1142 void (*led_control) (struct ieee80211_hw *hw, 1369 void (*led_control) (struct ieee80211_hw *hw,
1143 enum led_ctl_mode ledaction); 1370 enum led_ctl_mode ledaction);
1144 void (*set_desc) (u8 *pdesc, bool istx, u8 desc_name, u8 *val); 1371 void (*set_desc) (u8 *pdesc, bool istx, u8 desc_name, u8 *val);
1145 u32(*get_desc) (u8 *pdesc, bool istx, u8 desc_name); 1372 u32 (*get_desc) (u8 *pdesc, bool istx, u8 desc_name);
1146 void (*tx_polling) (struct ieee80211_hw *hw, unsigned int hw_queue); 1373 void (*tx_polling) (struct ieee80211_hw *hw, unsigned int hw_queue);
1147 void (*enable_hw_sec) (struct ieee80211_hw *hw); 1374 void (*enable_hw_sec) (struct ieee80211_hw *hw);
1148 void (*set_key) (struct ieee80211_hw *hw, u32 key_index, 1375 void (*set_key) (struct ieee80211_hw *hw, u32 key_index,
@@ -1150,22 +1377,36 @@ struct rtl_hal_ops {
1150 bool is_wepkey, bool clear_all); 1377 bool is_wepkey, bool clear_all);
1151 void (*init_sw_leds) (struct ieee80211_hw *hw); 1378 void (*init_sw_leds) (struct ieee80211_hw *hw);
1152 void (*deinit_sw_leds) (struct ieee80211_hw *hw); 1379 void (*deinit_sw_leds) (struct ieee80211_hw *hw);
1153 u32(*get_bbreg) (struct ieee80211_hw *hw, u32 regaddr, u32 bitmask); 1380 u32 (*get_bbreg) (struct ieee80211_hw *hw, u32 regaddr, u32 bitmask);
1154 void (*set_bbreg) (struct ieee80211_hw *hw, u32 regaddr, u32 bitmask, 1381 void (*set_bbreg) (struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
1155 u32 data); 1382 u32 data);
1156 u32(*get_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath, 1383 u32 (*get_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
1157 u32 regaddr, u32 bitmask); 1384 u32 regaddr, u32 bitmask);
1158 void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath, 1385 void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
1159 u32 regaddr, u32 bitmask, u32 data); 1386 u32 regaddr, u32 bitmask, u32 data);
1387 bool (*phy_rf6052_config) (struct ieee80211_hw *hw);
1388 void (*phy_rf6052_set_cck_txpower) (struct ieee80211_hw *hw,
1389 u8 *powerlevel);
1390 void (*phy_rf6052_set_ofdm_txpower) (struct ieee80211_hw *hw,
1391 u8 *ppowerlevel, u8 channel);
1392 bool (*config_bb_with_headerfile) (struct ieee80211_hw *hw,
1393 u8 configtype);
1394 bool (*config_bb_with_pgheaderfile) (struct ieee80211_hw *hw,
1395 u8 configtype);
1396 void (*phy_lc_calibrate) (struct ieee80211_hw *hw, bool is2t);
1397 void (*phy_set_bw_mode_callback) (struct ieee80211_hw *hw);
1398 void (*dm_dynamic_txpower) (struct ieee80211_hw *hw);
1160}; 1399};
1161 1400
1162struct rtl_intf_ops { 1401struct rtl_intf_ops {
1163 /*com */ 1402 /*com */
1403 void (*read_efuse_byte)(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
1164 int (*adapter_start) (struct ieee80211_hw *hw); 1404 int (*adapter_start) (struct ieee80211_hw *hw);
1165 void (*adapter_stop) (struct ieee80211_hw *hw); 1405 void (*adapter_stop) (struct ieee80211_hw *hw);
1166 1406
1167 int (*adapter_tx) (struct ieee80211_hw *hw, struct sk_buff *skb); 1407 int (*adapter_tx) (struct ieee80211_hw *hw, struct sk_buff *skb);
1168 int (*reset_trx_ring) (struct ieee80211_hw *hw); 1408 int (*reset_trx_ring) (struct ieee80211_hw *hw);
1409 bool (*waitq_insert) (struct ieee80211_hw *hw, struct sk_buff *skb);
1169 1410
1170 /*pci */ 1411 /*pci */
1171 void (*disable_aspm) (struct ieee80211_hw *hw); 1412 void (*disable_aspm) (struct ieee80211_hw *hw);
@@ -1179,11 +1420,36 @@ struct rtl_mod_params {
1179 int sw_crypto; 1420 int sw_crypto;
1180}; 1421};
1181 1422
1423struct rtl_hal_usbint_cfg {
1424 /* data - rx */
1425 u32 in_ep_num;
1426 u32 rx_urb_num;
1427 u32 rx_max_size;
1428
1429 /* op - rx */
1430 void (*usb_rx_hdl)(struct ieee80211_hw *, struct sk_buff *);
1431 void (*usb_rx_segregate_hdl)(struct ieee80211_hw *, struct sk_buff *,
1432 struct sk_buff_head *);
1433
1434 /* tx */
1435 void (*usb_tx_cleanup)(struct ieee80211_hw *, struct sk_buff *);
1436 int (*usb_tx_post_hdl)(struct ieee80211_hw *, struct urb *,
1437 struct sk_buff *);
1438 struct sk_buff *(*usb_tx_aggregate_hdl)(struct ieee80211_hw *,
1439 struct sk_buff_head *);
1440
1441 /* endpoint mapping */
1442 int (*usb_endpoint_mapping)(struct ieee80211_hw *hw);
1443 u16 (*usb_mq_to_hwq)(__le16 fc, u16 mac80211_queue_index);
1444};
1445
1182struct rtl_hal_cfg { 1446struct rtl_hal_cfg {
1447 u8 bar_id;
1183 char *name; 1448 char *name;
1184 char *fw_name; 1449 char *fw_name;
1185 struct rtl_hal_ops *ops; 1450 struct rtl_hal_ops *ops;
1186 struct rtl_mod_params *mod_params; 1451 struct rtl_mod_params *mod_params;
1452 struct rtl_hal_usbint_cfg *usb_interface_cfg;
1187 1453
1188 /*this map used for some registers or vars 1454 /*this map used for some registers or vars
1189 defined int HAL but used in MAIN */ 1455 defined int HAL but used in MAIN */
@@ -1202,6 +1468,11 @@ struct rtl_locks {
1202 spinlock_t rf_ps_lock; 1468 spinlock_t rf_ps_lock;
1203 spinlock_t rf_lock; 1469 spinlock_t rf_lock;
1204 spinlock_t lps_lock; 1470 spinlock_t lps_lock;
1471 spinlock_t waitq_lock;
1472 spinlock_t tx_urb_lock;
1473
1474 /*Dual mac*/
1475 spinlock_t cck_and_rw_pagea_lock;
1205}; 1476};
1206 1477
1207struct rtl_works { 1478struct rtl_works {
@@ -1218,12 +1489,20 @@ struct rtl_works {
1218 struct workqueue_struct *rtl_wq; 1489 struct workqueue_struct *rtl_wq;
1219 struct delayed_work watchdog_wq; 1490 struct delayed_work watchdog_wq;
1220 struct delayed_work ips_nic_off_wq; 1491 struct delayed_work ips_nic_off_wq;
1492
1493 /* For SW LPS */
1494 struct delayed_work ps_work;
1495 struct delayed_work ps_rfon_wq;
1221}; 1496};
1222 1497
1223struct rtl_debug { 1498struct rtl_debug {
1224 u32 dbgp_type[DBGP_TYPE_MAX]; 1499 u32 dbgp_type[DBGP_TYPE_MAX];
1225 u32 global_debuglevel; 1500 u32 global_debuglevel;
1226 u64 global_debugcomponents; 1501 u64 global_debugcomponents;
1502
1503 /* add for proc debug */
1504 struct proc_dir_entry *proc_dir;
1505 char proc_name[20];
1227}; 1506};
1228 1507
1229struct rtl_priv { 1508struct rtl_priv {
@@ -1274,6 +1553,91 @@ struct rtl_priv {
1274#define rtl_efuse(rtlpriv) (&((rtlpriv)->efuse)) 1553#define rtl_efuse(rtlpriv) (&((rtlpriv)->efuse))
1275#define rtl_psc(rtlpriv) (&((rtlpriv)->psc)) 1554#define rtl_psc(rtlpriv) (&((rtlpriv)->psc))
1276 1555
1556
1557/***************************************
1558 Bluetooth Co-existance Related
1559****************************************/
1560
1561enum bt_ant_num {
1562 ANT_X2 = 0,
1563 ANT_X1 = 1,
1564};
1565
1566enum bt_co_type {
1567 BT_2WIRE = 0,
1568 BT_ISSC_3WIRE = 1,
1569 BT_ACCEL = 2,
1570 BT_CSR_BC4 = 3,
1571 BT_CSR_BC8 = 4,
1572 BT_RTL8756 = 5,
1573};
1574
1575enum bt_cur_state {
1576 BT_OFF = 0,
1577 BT_ON = 1,
1578};
1579
1580enum bt_service_type {
1581 BT_SCO = 0,
1582 BT_A2DP = 1,
1583 BT_HID = 2,
1584 BT_HID_IDLE = 3,
1585 BT_SCAN = 4,
1586 BT_IDLE = 5,
1587 BT_OTHER_ACTION = 6,
1588 BT_BUSY = 7,
1589 BT_OTHERBUSY = 8,
1590 BT_PAN = 9,
1591};
1592
1593enum bt_radio_shared {
1594 BT_RADIO_SHARED = 0,
1595 BT_RADIO_INDIVIDUAL = 1,
1596};
1597
1598struct bt_coexist_info {
1599
1600 /* EEPROM BT info. */
1601 u8 eeprom_bt_coexist;
1602 u8 eeprom_bt_type;
1603 u8 eeprom_bt_ant_num;
1604 u8 eeprom_bt_ant_isolation;
1605 u8 eeprom_bt_radio_shared;
1606
1607 u8 bt_coexistence;
1608 u8 bt_ant_num;
1609 u8 bt_coexist_type;
1610 u8 bt_state;
1611 u8 bt_cur_state; /* 0:on, 1:off */
1612 u8 bt_ant_isolation; /* 0:good, 1:bad */
1613 u8 bt_pape_ctrl; /* 0:SW, 1:SW/HW dynamic */
1614 u8 bt_service;
1615 u8 bt_radio_shared_type;
1616 u8 bt_rfreg_origin_1e;
1617 u8 bt_rfreg_origin_1f;
1618 u8 bt_rssi_state;
1619 u32 ratio_tx;
1620 u32 ratio_pri;
1621 u32 bt_edca_ul;
1622 u32 bt_edca_dl;
1623
1624 bool b_init_set;
1625 bool b_bt_busy_traffic;
1626 bool b_bt_traffic_mode_set;
1627 bool b_bt_non_traffic_mode_set;
1628
1629 bool b_fw_coexist_all_off;
1630 bool b_sw_coexist_all_off;
1631 u32 current_state;
1632 u32 previous_state;
1633 u8 bt_pre_rssi_state;
1634
1635 u8 b_reg_bt_iso;
1636 u8 b_reg_bt_sco;
1637
1638};
1639
1640
1277/**************************************** 1641/****************************************
1278 mem access macro define start 1642 mem access macro define start
1279 Call endian free function when 1643 Call endian free function when
@@ -1281,7 +1645,7 @@ struct rtl_priv {
1281 2. Before write integer to IO. 1645 2. Before write integer to IO.
1282 3. After read integer from IO. 1646 3. After read integer from IO.
1283****************************************/ 1647****************************************/
1284/* Convert little data endian to host */ 1648/* Convert little data endian to host ordering */
1285#define EF1BYTE(_val) \ 1649#define EF1BYTE(_val) \
1286 ((u8)(_val)) 1650 ((u8)(_val))
1287#define EF2BYTE(_val) \ 1651#define EF2BYTE(_val) \
@@ -1289,27 +1653,21 @@ struct rtl_priv {
1289#define EF4BYTE(_val) \ 1653#define EF4BYTE(_val) \
1290 (le32_to_cpu(_val)) 1654 (le32_to_cpu(_val))
1291 1655
1292/* Read data from memory */ 1656/* Read le16 data from memory and convert to host ordering */
1293#define READEF1BYTE(_ptr) \
1294 EF1BYTE(*((u8 *)(_ptr)))
1295#define READEF2BYTE(_ptr) \ 1657#define READEF2BYTE(_ptr) \
1296 EF2BYTE(*((u16 *)(_ptr))) 1658 EF2BYTE(*((u16 *)(_ptr)))
1297#define READEF4BYTE(_ptr) \
1298 EF4BYTE(*((u32 *)(_ptr)))
1299 1659
1300/* Write data to memory */ 1660/* Write le16 data to memory in host ordering */
1301#define WRITEEF1BYTE(_ptr, _val) \
1302 (*((u8 *)(_ptr))) = EF1BYTE(_val)
1303#define WRITEEF2BYTE(_ptr, _val) \ 1661#define WRITEEF2BYTE(_ptr, _val) \
1304 (*((u16 *)(_ptr))) = EF2BYTE(_val) 1662 (*((u16 *)(_ptr))) = EF2BYTE(_val)
1305#define WRITEEF4BYTE(_ptr, _val) \ 1663
1306 (*((u32 *)(_ptr))) = EF4BYTE(_val) 1664/* Create a bit mask
1307 1665 * Examples:
1308/*Example: 1666 * BIT_LEN_MASK_32(0) => 0x00000000
1309BIT_LEN_MASK_32(0) => 0x00000000 1667 * BIT_LEN_MASK_32(1) => 0x00000001
1310BIT_LEN_MASK_32(1) => 0x00000001 1668 * BIT_LEN_MASK_32(2) => 0x00000003
1311BIT_LEN_MASK_32(2) => 0x00000003 1669 * BIT_LEN_MASK_32(32) => 0xFFFFFFFF
1312BIT_LEN_MASK_32(32) => 0xFFFFFFFF*/ 1670 */
1313#define BIT_LEN_MASK_32(__bitlen) \ 1671#define BIT_LEN_MASK_32(__bitlen) \
1314 (0xFFFFFFFF >> (32 - (__bitlen))) 1672 (0xFFFFFFFF >> (32 - (__bitlen)))
1315#define BIT_LEN_MASK_16(__bitlen) \ 1673#define BIT_LEN_MASK_16(__bitlen) \
@@ -1317,9 +1675,11 @@ BIT_LEN_MASK_32(32) => 0xFFFFFFFF*/
1317#define BIT_LEN_MASK_8(__bitlen) \ 1675#define BIT_LEN_MASK_8(__bitlen) \
1318 (0xFF >> (8 - (__bitlen))) 1676 (0xFF >> (8 - (__bitlen)))
1319 1677
1320/*Example: 1678/* Create an offset bit mask
1321BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003 1679 * Examples:
1322BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000*/ 1680 * BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003
1681 * BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000
1682 */
1323#define BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen) \ 1683#define BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen) \
1324 (BIT_LEN_MASK_32(__bitlen) << (__bitoffset)) 1684 (BIT_LEN_MASK_32(__bitlen) << (__bitoffset))
1325#define BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen) \ 1685#define BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen) \
@@ -1328,8 +1688,9 @@ BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000*/
1328 (BIT_LEN_MASK_8(__bitlen) << (__bitoffset)) 1688 (BIT_LEN_MASK_8(__bitlen) << (__bitoffset))
1329 1689
1330/*Description: 1690/*Description:
1331Return 4-byte value in host byte ordering from 1691 * Return 4-byte value in host byte ordering from
13324-byte pointer in little-endian system.*/ 1692 * 4-byte pointer in little-endian system.
1693 */
1333#define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \ 1694#define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \
1334 (EF4BYTE(*((u32 *)(__pstart)))) 1695 (EF4BYTE(*((u32 *)(__pstart))))
1335#define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \ 1696#define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \
@@ -1337,28 +1698,10 @@ Return 4-byte value in host byte ordering from
1337#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \ 1698#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \
1338 (EF1BYTE(*((u8 *)(__pstart)))) 1699 (EF1BYTE(*((u8 *)(__pstart))))
1339 1700
1340/*Description: 1701/* Description:
1341Translate subfield (continuous bits in little-endian) of 4-byte 1702 * Mask subfield (continuous bits in little-endian) of 4-byte value
1342value to host byte ordering.*/ 1703 * and return the result in 4-byte value in host byte ordering.
1343#define LE_BITS_TO_4BYTE(__pstart, __bitoffset, __bitlen) \ 1704 */
1344 ( \
1345 (LE_P4BYTE_TO_HOST_4BYTE(__pstart) >> (__bitoffset)) & \
1346 BIT_LEN_MASK_32(__bitlen) \
1347 )
1348#define LE_BITS_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
1349 ( \
1350 (LE_P2BYTE_TO_HOST_2BYTE(__pstart) >> (__bitoffset)) & \
1351 BIT_LEN_MASK_16(__bitlen) \
1352 )
1353#define LE_BITS_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
1354 ( \
1355 (LE_P1BYTE_TO_HOST_1BYTE(__pstart) >> (__bitoffset)) & \
1356 BIT_LEN_MASK_8(__bitlen) \
1357 )
1358
1359/*Description:
1360Mask subfield (continuous bits in little-endian) of 4-byte value
1361and return the result in 4-byte value in host byte ordering.*/
1362#define LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) \ 1705#define LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
1363 ( \ 1706 ( \
1364 LE_P4BYTE_TO_HOST_4BYTE(__pstart) & \ 1707 LE_P4BYTE_TO_HOST_4BYTE(__pstart) & \
@@ -1375,20 +1718,9 @@ and return the result in 4-byte value in host byte ordering.*/
1375 (~BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen)) \ 1718 (~BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen)) \
1376 ) 1719 )
1377 1720
1378/*Description: 1721/* Description:
1379Set subfield of little-endian 4-byte value to specified value. */ 1722 * Set subfield of little-endian 4-byte value to specified value.
1380#define SET_BITS_TO_LE_4BYTE(__pstart, __bitoffset, __bitlen, __val) \ 1723 */
1381 *((u32 *)(__pstart)) = EF4BYTE \
1382 ( \
1383 LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \
1384 ((((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset)) \
1385 );
1386#define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \
1387 *((u16 *)(__pstart)) = EF2BYTE \
1388 ( \
1389 LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \
1390 ((((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset)) \
1391 );
1392#define SET_BITS_TO_LE_1BYTE(__pstart, __bitoffset, __bitlen, __val) \ 1724#define SET_BITS_TO_LE_1BYTE(__pstart, __bitoffset, __bitlen, __val) \
1393 *((u8 *)(__pstart)) = EF1BYTE \ 1725 *((u8 *)(__pstart)) = EF1BYTE \
1394 ( \ 1726 ( \
@@ -1400,13 +1732,14 @@ Set subfield of little-endian 4-byte value to specified value. */
1400 mem access macro define end 1732 mem access macro define end
1401****************************************/ 1733****************************************/
1402 1734
1403#define packet_get_type(_packet) (EF1BYTE((_packet).octet[0]) & 0xFC) 1735#define byte(x, n) ((x >> (8 * n)) & 0xff)
1736
1404#define RTL_WATCH_DOG_TIME 2000 1737#define RTL_WATCH_DOG_TIME 2000
1405#define MSECS(t) msecs_to_jiffies(t) 1738#define MSECS(t) msecs_to_jiffies(t)
1406#define WLAN_FC_GET_VERS(fc) ((fc) & IEEE80211_FCTL_VERS) 1739#define WLAN_FC_GET_VERS(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_VERS)
1407#define WLAN_FC_GET_TYPE(fc) ((fc) & IEEE80211_FCTL_FTYPE) 1740#define WLAN_FC_GET_TYPE(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE)
1408#define WLAN_FC_GET_STYPE(fc) ((fc) & IEEE80211_FCTL_STYPE) 1741#define WLAN_FC_GET_STYPE(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE)
1409#define WLAN_FC_MORE_DATA(fc) ((fc) & IEEE80211_FCTL_MOREDATA) 1742#define WLAN_FC_MORE_DATA(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_MOREDATA)
1410#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) 1743#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
1411#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) 1744#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
1412#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4) 1745#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
@@ -1420,6 +1753,8 @@ Set subfield of little-endian 4-byte value to specified value. */
1420#define RT_RF_OFF_LEVL_FW_32K BIT(5) /*FW in 32k */ 1753#define RT_RF_OFF_LEVL_FW_32K BIT(5) /*FW in 32k */
1421/*Always enable ASPM and Clock Req in initialization.*/ 1754/*Always enable ASPM and Clock Req in initialization.*/
1422#define RT_RF_PS_LEVEL_ALWAYS_ASPM BIT(6) 1755#define RT_RF_PS_LEVEL_ALWAYS_ASPM BIT(6)
1756/* no matter RFOFF or SLEEP we set PS_ASPM_LEVL*/
1757#define RT_PS_LEVEL_ASPM BIT(7)
1423/*When LPS is on, disable 2R if no packet is received or transmittd.*/ 1758/*When LPS is on, disable 2R if no packet is received or transmittd.*/
1424#define RT_RF_LPS_DISALBE_2R BIT(30) 1759#define RT_RF_LPS_DISALBE_2R BIT(30)
1425#define RT_RF_LPS_LEVEL_ASPM BIT(31) /*LPS with ASPM */ 1760#define RT_RF_LPS_LEVEL_ASPM BIT(31) /*LPS with ASPM */
@@ -1433,15 +1768,6 @@ Set subfield of little-endian 4-byte value to specified value. */
1433#define container_of_dwork_rtl(x, y, z) \ 1768#define container_of_dwork_rtl(x, y, z) \
1434 container_of(container_of(x, struct delayed_work, work), y, z) 1769 container_of(container_of(x, struct delayed_work, work), y, z)
1435 1770
1436#define FILL_OCTET_STRING(_os, _octet, _len) \
1437 (_os).octet = (u8 *)(_octet); \
1438 (_os).length = (_len);
1439
1440#define CP_MACADDR(des, src) \
1441 ((des)[0] = (src)[0], (des)[1] = (src)[1],\
1442 (des)[2] = (src)[2], (des)[3] = (src)[3],\
1443 (des)[4] = (src)[4], (des)[5] = (src)[5])
1444
1445static inline u8 rtl_read_byte(struct rtl_priv *rtlpriv, u32 addr) 1771static inline u8 rtl_read_byte(struct rtl_priv *rtlpriv, u32 addr)
1446{ 1772{
1447 return rtlpriv->io.read8_sync(rtlpriv, addr); 1773 return rtlpriv->io.read8_sync(rtlpriv, addr);
diff --git a/drivers/net/wireless/wl1251/acx.c b/drivers/net/wireless/wl1251/acx.c
index 64a0214cfb29..ef8370edace7 100644
--- a/drivers/net/wireless/wl1251/acx.c
+++ b/drivers/net/wireless/wl1251/acx.c
@@ -776,6 +776,31 @@ out:
776 return ret; 776 return ret;
777} 777}
778 778
779int wl1251_acx_low_rssi(struct wl1251 *wl, s8 threshold, u8 weight,
780 u8 depth, enum wl1251_acx_low_rssi_type type)
781{
782 struct acx_low_rssi *rssi;
783 int ret;
784
785 wl1251_debug(DEBUG_ACX, "acx low rssi");
786
787 rssi = kzalloc(sizeof(*rssi), GFP_KERNEL);
788 if (!rssi)
789 return -ENOMEM;
790
791 rssi->threshold = threshold;
792 rssi->weight = weight;
793 rssi->depth = depth;
794 rssi->type = type;
795
796 ret = wl1251_cmd_configure(wl, ACX_LOW_RSSI, rssi, sizeof(*rssi));
797 if (ret < 0)
798 wl1251_warning("failed to set low rssi threshold: %d", ret);
799
800 kfree(rssi);
801 return ret;
802}
803
779int wl1251_acx_set_preamble(struct wl1251 *wl, enum acx_preamble_type preamble) 804int wl1251_acx_set_preamble(struct wl1251 *wl, enum acx_preamble_type preamble)
780{ 805{
781 struct acx_preamble *acx; 806 struct acx_preamble *acx;
@@ -978,6 +1003,34 @@ out:
978 return ret; 1003 return ret;
979} 1004}
980 1005
1006int wl1251_acx_bet_enable(struct wl1251 *wl, enum wl1251_acx_bet_mode mode,
1007 u8 max_consecutive)
1008{
1009 struct wl1251_acx_bet_enable *acx;
1010 int ret;
1011
1012 wl1251_debug(DEBUG_ACX, "acx bet enable");
1013
1014 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1015 if (!acx) {
1016 ret = -ENOMEM;
1017 goto out;
1018 }
1019
1020 acx->enable = mode;
1021 acx->max_consecutive = max_consecutive;
1022
1023 ret = wl1251_cmd_configure(wl, ACX_BET_ENABLE, acx, sizeof(*acx));
1024 if (ret < 0) {
1025 wl1251_warning("wl1251 acx bet enable failed: %d", ret);
1026 goto out;
1027 }
1028
1029out:
1030 kfree(acx);
1031 return ret;
1032}
1033
981int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max, 1034int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
982 u8 aifs, u16 txop) 1035 u8 aifs, u16 txop)
983{ 1036{
diff --git a/drivers/net/wireless/wl1251/acx.h b/drivers/net/wireless/wl1251/acx.h
index efcc3aaca14f..c2ba100f9b1a 100644
--- a/drivers/net/wireless/wl1251/acx.h
+++ b/drivers/net/wireless/wl1251/acx.h
@@ -399,6 +399,49 @@ struct acx_rts_threshold {
399 u8 pad[2]; 399 u8 pad[2];
400} __packed; 400} __packed;
401 401
402enum wl1251_acx_low_rssi_type {
403 /*
404 * The event is a "Level" indication which keeps triggering
405 * as long as the average RSSI is below the threshold.
406 */
407 WL1251_ACX_LOW_RSSI_TYPE_LEVEL = 0,
408
409 /*
410 * The event is an "Edge" indication which triggers
411 * only when the RSSI threshold is crossed from above.
412 */
413 WL1251_ACX_LOW_RSSI_TYPE_EDGE = 1,
414};
415
416struct acx_low_rssi {
417 struct acx_header header;
418
419 /*
420 * The threshold (in dBm) below (or above after low rssi
421 * indication) which the firmware generates an interrupt to the
422 * host. This parameter is signed.
423 */
424 s8 threshold;
425
426 /*
427 * The weight of the current RSSI sample, before adding the new
428 * sample, that is used to calculate the average RSSI.
429 */
430 u8 weight;
431
432 /*
433 * The number of Beacons/Probe response frames that will be
434 * received before issuing the Low or Regained RSSI event.
435 */
436 u8 depth;
437
438 /*
439 * Configures how the Low RSSI Event is triggered. Refer to
440 * enum wl1251_acx_low_rssi_type for more.
441 */
442 u8 type;
443} __packed;
444
402struct acx_beacon_filter_option { 445struct acx_beacon_filter_option {
403 struct acx_header header; 446 struct acx_header header;
404 447
@@ -1164,6 +1207,31 @@ struct wl1251_acx_wr_tbtt_and_dtim {
1164 u8 padding; 1207 u8 padding;
1165} __packed; 1208} __packed;
1166 1209
1210enum wl1251_acx_bet_mode {
1211 WL1251_ACX_BET_DISABLE = 0,
1212 WL1251_ACX_BET_ENABLE = 1,
1213};
1214
1215struct wl1251_acx_bet_enable {
1216 struct acx_header header;
1217
1218 /*
1219 * Specifies if beacon early termination procedure is enabled or
1220 * disabled, see enum wl1251_acx_bet_mode.
1221 */
1222 u8 enable;
1223
1224 /*
1225 * Specifies the maximum number of consecutive beacons that may be
1226 * early terminated. After this number is reached at least one full
1227 * beacon must be correctly received in FW before beacon ET
1228 * resumes. Range 0 - 255.
1229 */
1230 u8 max_consecutive;
1231
1232 u8 padding[2];
1233} __packed;
1234
1167struct wl1251_acx_ac_cfg { 1235struct wl1251_acx_ac_cfg {
1168 struct acx_header header; 1236 struct acx_header header;
1169 1237
@@ -1393,6 +1461,8 @@ int wl1251_acx_cca_threshold(struct wl1251 *wl);
1393int wl1251_acx_bcn_dtim_options(struct wl1251 *wl); 1461int wl1251_acx_bcn_dtim_options(struct wl1251 *wl);
1394int wl1251_acx_aid(struct wl1251 *wl, u16 aid); 1462int wl1251_acx_aid(struct wl1251 *wl, u16 aid);
1395int wl1251_acx_event_mbox_mask(struct wl1251 *wl, u32 event_mask); 1463int wl1251_acx_event_mbox_mask(struct wl1251 *wl, u32 event_mask);
1464int wl1251_acx_low_rssi(struct wl1251 *wl, s8 threshold, u8 weight,
1465 u8 depth, enum wl1251_acx_low_rssi_type type);
1396int wl1251_acx_set_preamble(struct wl1251 *wl, enum acx_preamble_type preamble); 1466int wl1251_acx_set_preamble(struct wl1251 *wl, enum acx_preamble_type preamble);
1397int wl1251_acx_cts_protect(struct wl1251 *wl, 1467int wl1251_acx_cts_protect(struct wl1251 *wl,
1398 enum acx_ctsprotect_type ctsprotect); 1468 enum acx_ctsprotect_type ctsprotect);
@@ -1401,6 +1471,8 @@ int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime);
1401int wl1251_acx_rate_policies(struct wl1251 *wl); 1471int wl1251_acx_rate_policies(struct wl1251 *wl);
1402int wl1251_acx_mem_cfg(struct wl1251 *wl); 1472int wl1251_acx_mem_cfg(struct wl1251 *wl);
1403int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim); 1473int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim);
1474int wl1251_acx_bet_enable(struct wl1251 *wl, enum wl1251_acx_bet_mode mode,
1475 u8 max_consecutive);
1404int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max, 1476int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
1405 u8 aifs, u16 txop); 1477 u8 aifs, u16 txop);
1406int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue, 1478int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
diff --git a/drivers/net/wireless/wl1251/event.c b/drivers/net/wireless/wl1251/event.c
index 712372e50a87..dfc4579acb06 100644
--- a/drivers/net/wireless/wl1251/event.c
+++ b/drivers/net/wireless/wl1251/event.c
@@ -90,6 +90,24 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
90 } 90 }
91 } 91 }
92 92
93 if (wl->vif && wl->rssi_thold) {
94 if (vector & ROAMING_TRIGGER_LOW_RSSI_EVENT_ID) {
95 wl1251_debug(DEBUG_EVENT,
96 "ROAMING_TRIGGER_LOW_RSSI_EVENT");
97 ieee80211_cqm_rssi_notify(wl->vif,
98 NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
99 GFP_KERNEL);
100 }
101
102 if (vector & ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID) {
103 wl1251_debug(DEBUG_EVENT,
104 "ROAMING_TRIGGER_REGAINED_RSSI_EVENT");
105 ieee80211_cqm_rssi_notify(wl->vif,
106 NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
107 GFP_KERNEL);
108 }
109 }
110
93 return 0; 111 return 0;
94} 112}
95 113
diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/wl1251/main.c
index 40372bac9482..12c9e635a6d6 100644
--- a/drivers/net/wireless/wl1251/main.c
+++ b/drivers/net/wireless/wl1251/main.c
@@ -375,7 +375,7 @@ out:
375 mutex_unlock(&wl->mutex); 375 mutex_unlock(&wl->mutex);
376} 376}
377 377
378static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 378static void wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
379{ 379{
380 struct wl1251 *wl = hw->priv; 380 struct wl1251 *wl = hw->priv;
381 unsigned long flags; 381 unsigned long flags;
@@ -401,8 +401,6 @@ static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
401 wl->tx_queue_stopped = true; 401 wl->tx_queue_stopped = true;
402 spin_unlock_irqrestore(&wl->wl_lock, flags); 402 spin_unlock_irqrestore(&wl->wl_lock, flags);
403 } 403 }
404
405 return NETDEV_TX_OK;
406} 404}
407 405
408static int wl1251_op_start(struct ieee80211_hw *hw) 406static int wl1251_op_start(struct ieee80211_hw *hw)
@@ -502,6 +500,7 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
502 wl->psm = 0; 500 wl->psm = 0;
503 wl->tx_queue_stopped = false; 501 wl->tx_queue_stopped = false;
504 wl->power_level = WL1251_DEFAULT_POWER_LEVEL; 502 wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
503 wl->rssi_thold = 0;
505 wl->channel = WL1251_DEFAULT_CHANNEL; 504 wl->channel = WL1251_DEFAULT_CHANNEL;
506 505
507 wl1251_debugfs_reset(wl); 506 wl1251_debugfs_reset(wl);
@@ -959,6 +958,16 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
959 if (ret < 0) 958 if (ret < 0)
960 goto out; 959 goto out;
961 960
961 if (changed & BSS_CHANGED_CQM) {
962 ret = wl1251_acx_low_rssi(wl, bss_conf->cqm_rssi_thold,
963 WL1251_DEFAULT_LOW_RSSI_WEIGHT,
964 WL1251_DEFAULT_LOW_RSSI_DEPTH,
965 WL1251_ACX_LOW_RSSI_TYPE_EDGE);
966 if (ret < 0)
967 goto out;
968 wl->rssi_thold = bss_conf->cqm_rssi_thold;
969 }
970
962 if (changed & BSS_CHANGED_BSSID) { 971 if (changed & BSS_CHANGED_BSSID) {
963 memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN); 972 memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
964 973
@@ -1313,9 +1322,11 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
1313 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM | 1322 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
1314 IEEE80211_HW_SUPPORTS_PS | 1323 IEEE80211_HW_SUPPORTS_PS |
1315 IEEE80211_HW_BEACON_FILTER | 1324 IEEE80211_HW_BEACON_FILTER |
1316 IEEE80211_HW_SUPPORTS_UAPSD; 1325 IEEE80211_HW_SUPPORTS_UAPSD |
1326 IEEE80211_HW_SUPPORTS_CQM_RSSI;
1317 1327
1318 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); 1328 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
1329 BIT(NL80211_IFTYPE_ADHOC);
1319 wl->hw->wiphy->max_scan_ssids = 1; 1330 wl->hw->wiphy->max_scan_ssids = 1;
1320 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1251_band_2ghz; 1331 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1251_band_2ghz;
1321 1332
@@ -1377,6 +1388,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
1377 wl->psm_requested = false; 1388 wl->psm_requested = false;
1378 wl->tx_queue_stopped = false; 1389 wl->tx_queue_stopped = false;
1379 wl->power_level = WL1251_DEFAULT_POWER_LEVEL; 1390 wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
1391 wl->rssi_thold = 0;
1380 wl->beacon_int = WL1251_DEFAULT_BEACON_INT; 1392 wl->beacon_int = WL1251_DEFAULT_BEACON_INT;
1381 wl->dtim_period = WL1251_DEFAULT_DTIM_PERIOD; 1393 wl->dtim_period = WL1251_DEFAULT_DTIM_PERIOD;
1382 wl->vif = NULL; 1394 wl->vif = NULL;
diff --git a/drivers/net/wireless/wl1251/ps.c b/drivers/net/wireless/wl1251/ps.c
index 5ed47c8373d2..9cc514703d2a 100644
--- a/drivers/net/wireless/wl1251/ps.c
+++ b/drivers/net/wireless/wl1251/ps.c
@@ -58,7 +58,6 @@ void wl1251_ps_elp_sleep(struct wl1251 *wl)
58 unsigned long delay; 58 unsigned long delay;
59 59
60 if (wl->psm) { 60 if (wl->psm) {
61 cancel_delayed_work(&wl->elp_work);
62 delay = msecs_to_jiffies(ELP_ENTRY_DELAY); 61 delay = msecs_to_jiffies(ELP_ENTRY_DELAY);
63 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, delay); 62 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, delay);
64 } 63 }
@@ -69,6 +68,9 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
69 unsigned long timeout, start; 68 unsigned long timeout, start;
70 u32 elp_reg; 69 u32 elp_reg;
71 70
71 if (delayed_work_pending(&wl->elp_work))
72 cancel_delayed_work(&wl->elp_work);
73
72 if (!wl->elp) 74 if (!wl->elp)
73 return 0; 75 return 0;
74 76
@@ -102,38 +104,6 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
102 return 0; 104 return 0;
103} 105}
104 106
105static int wl1251_ps_set_elp(struct wl1251 *wl, bool enable)
106{
107 int ret;
108
109 if (enable) {
110 wl1251_debug(DEBUG_PSM, "sleep auth psm/elp");
111
112 ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_ELP);
113 if (ret < 0)
114 return ret;
115
116 wl1251_ps_elp_sleep(wl);
117 } else {
118 wl1251_debug(DEBUG_PSM, "sleep auth cam");
119
120 /*
121 * When the target is in ELP, we can only
122 * access the ELP control register. Thus,
123 * we have to wake the target up before
124 * changing the power authorization.
125 */
126
127 wl1251_ps_elp_wakeup(wl);
128
129 ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_CAM);
130 if (ret < 0)
131 return ret;
132 }
133
134 return 0;
135}
136
137int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode) 107int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode)
138{ 108{
139 int ret; 109 int ret;
@@ -153,11 +123,16 @@ int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode)
153 if (ret < 0) 123 if (ret < 0)
154 return ret; 124 return ret;
155 125
126 ret = wl1251_acx_bet_enable(wl, WL1251_ACX_BET_ENABLE,
127 WL1251_DEFAULT_BET_CONSECUTIVE);
128 if (ret < 0)
129 return ret;
130
156 ret = wl1251_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE); 131 ret = wl1251_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE);
157 if (ret < 0) 132 if (ret < 0)
158 return ret; 133 return ret;
159 134
160 ret = wl1251_ps_set_elp(wl, true); 135 ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_ELP);
161 if (ret < 0) 136 if (ret < 0)
162 return ret; 137 return ret;
163 138
@@ -166,7 +141,14 @@ int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode)
166 case STATION_ACTIVE_MODE: 141 case STATION_ACTIVE_MODE:
167 default: 142 default:
168 wl1251_debug(DEBUG_PSM, "leaving psm"); 143 wl1251_debug(DEBUG_PSM, "leaving psm");
169 ret = wl1251_ps_set_elp(wl, false); 144
145 ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_CAM);
146 if (ret < 0)
147 return ret;
148
149 /* disable BET */
150 ret = wl1251_acx_bet_enable(wl, WL1251_ACX_BET_DISABLE,
151 WL1251_DEFAULT_BET_CONSECUTIVE);
170 if (ret < 0) 152 if (ret < 0)
171 return ret; 153 return ret;
172 154
diff --git a/drivers/net/wireless/wl1251/rx.c b/drivers/net/wireless/wl1251/rx.c
index efa53607d5c9..c1b3b3f03da2 100644
--- a/drivers/net/wireless/wl1251/rx.c
+++ b/drivers/net/wireless/wl1251/rx.c
@@ -78,9 +78,10 @@ static void wl1251_rx_status(struct wl1251 *wl,
78 */ 78 */
79 wl->noise = desc->rssi - desc->snr / 2; 79 wl->noise = desc->rssi - desc->snr / 2;
80 80
81 status->freq = ieee80211_channel_to_frequency(desc->channel); 81 status->freq = ieee80211_channel_to_frequency(desc->channel,
82 status->band);
82 83
83 status->flag |= RX_FLAG_TSFT; 84 status->flag |= RX_FLAG_MACTIME_MPDU;
84 85
85 if (desc->flags & RX_DESC_ENCRYPTION_MASK) { 86 if (desc->flags & RX_DESC_ENCRYPTION_MASK) {
86 status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED; 87 status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
@@ -95,8 +96,52 @@ static void wl1251_rx_status(struct wl1251 *wl,
95 if (unlikely(!(desc->flags & RX_DESC_VALID_FCS))) 96 if (unlikely(!(desc->flags & RX_DESC_VALID_FCS)))
96 status->flag |= RX_FLAG_FAILED_FCS_CRC; 97 status->flag |= RX_FLAG_FAILED_FCS_CRC;
97 98
99 switch (desc->rate) {
100 /* skip 1 and 12 Mbps because they have same value 0x0a */
101 case RATE_2MBPS:
102 status->rate_idx = 1;
103 break;
104 case RATE_5_5MBPS:
105 status->rate_idx = 2;
106 break;
107 case RATE_11MBPS:
108 status->rate_idx = 3;
109 break;
110 case RATE_6MBPS:
111 status->rate_idx = 4;
112 break;
113 case RATE_9MBPS:
114 status->rate_idx = 5;
115 break;
116 case RATE_18MBPS:
117 status->rate_idx = 7;
118 break;
119 case RATE_24MBPS:
120 status->rate_idx = 8;
121 break;
122 case RATE_36MBPS:
123 status->rate_idx = 9;
124 break;
125 case RATE_48MBPS:
126 status->rate_idx = 10;
127 break;
128 case RATE_54MBPS:
129 status->rate_idx = 11;
130 break;
131 }
132
133 /* for 1 and 12 Mbps we have to check the modulation */
134 if (desc->rate == RATE_1MBPS) {
135 if (!(desc->mod_pre & OFDM_RATE_BIT))
136 /* CCK -> RATE_1MBPS */
137 status->rate_idx = 0;
138 else
139 /* OFDM -> RATE_12MBPS */
140 status->rate_idx = 6;
141 }
98 142
99 /* FIXME: set status->rate_idx */ 143 if (desc->mod_pre & SHORT_PREAMBLE_BIT)
144 status->flag |= RX_FLAG_SHORTPRE;
100} 145}
101 146
102static void wl1251_rx_body(struct wl1251 *wl, 147static void wl1251_rx_body(struct wl1251 *wl,
diff --git a/drivers/net/wireless/wl1251/tx.c b/drivers/net/wireless/wl1251/tx.c
index 554b4f9a3d3e..28121c590a2b 100644
--- a/drivers/net/wireless/wl1251/tx.c
+++ b/drivers/net/wireless/wl1251/tx.c
@@ -213,16 +213,30 @@ static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
213 wl1251_debug(DEBUG_TX, "skb offset %d", offset); 213 wl1251_debug(DEBUG_TX, "skb offset %d", offset);
214 214
215 /* check whether the current skb can be used */ 215 /* check whether the current skb can be used */
216 if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) { 216 if (skb_cloned(skb) || (skb_tailroom(skb) < offset)) {
217 unsigned char *src = skb->data; 217 struct sk_buff *newskb = skb_copy_expand(skb, 0, 3,
218 GFP_KERNEL);
219
220 if (unlikely(newskb == NULL)) {
221 wl1251_error("Can't allocate skb!");
222 return -EINVAL;
223 }
218 224
219 /* align the buffer on a 4-byte boundary */ 225 tx_hdr = (struct tx_double_buffer_desc *) newskb->data;
226
227 dev_kfree_skb_any(skb);
228 wl->tx_frames[tx_hdr->id] = skb = newskb;
229
230 offset = (4 - (long)skb->data) & 0x03;
231 wl1251_debug(DEBUG_TX, "new skb offset %d", offset);
232 }
233
234 /* align the buffer on a 4-byte boundary */
235 if (offset) {
236 unsigned char *src = skb->data;
220 skb_reserve(skb, offset); 237 skb_reserve(skb, offset);
221 memmove(skb->data, src, skb->len); 238 memmove(skb->data, src, skb->len);
222 tx_hdr = (struct tx_double_buffer_desc *) skb->data; 239 tx_hdr = (struct tx_double_buffer_desc *) skb->data;
223 } else {
224 wl1251_info("No handler, fixme!");
225 return -EINVAL;
226 } 240 }
227 } 241 }
228 242
@@ -368,7 +382,7 @@ static void wl1251_tx_packet_cb(struct wl1251 *wl,
368{ 382{
369 struct ieee80211_tx_info *info; 383 struct ieee80211_tx_info *info;
370 struct sk_buff *skb; 384 struct sk_buff *skb;
371 int hdrlen, ret; 385 int hdrlen;
372 u8 *frame; 386 u8 *frame;
373 387
374 skb = wl->tx_frames[result->id]; 388 skb = wl->tx_frames[result->id];
@@ -407,40 +421,12 @@ static void wl1251_tx_packet_cb(struct wl1251 *wl,
407 ieee80211_tx_status(wl->hw, skb); 421 ieee80211_tx_status(wl->hw, skb);
408 422
409 wl->tx_frames[result->id] = NULL; 423 wl->tx_frames[result->id] = NULL;
410
411 if (wl->tx_queue_stopped) {
412 wl1251_debug(DEBUG_TX, "cb: queue was stopped");
413
414 skb = skb_dequeue(&wl->tx_queue);
415
416 /* The skb can be NULL because tx_work might have been
417 scheduled before the queue was stopped making the
418 queue empty */
419
420 if (skb) {
421 ret = wl1251_tx_frame(wl, skb);
422 if (ret == -EBUSY) {
423 /* firmware buffer is still full */
424 wl1251_debug(DEBUG_TX, "cb: fw buffer "
425 "still full");
426 skb_queue_head(&wl->tx_queue, skb);
427 return;
428 } else if (ret < 0) {
429 dev_kfree_skb(skb);
430 return;
431 }
432 }
433
434 wl1251_debug(DEBUG_TX, "cb: waking queues");
435 ieee80211_wake_queues(wl->hw);
436 wl->tx_queue_stopped = false;
437 }
438} 424}
439 425
440/* Called upon reception of a TX complete interrupt */ 426/* Called upon reception of a TX complete interrupt */
441void wl1251_tx_complete(struct wl1251 *wl) 427void wl1251_tx_complete(struct wl1251 *wl)
442{ 428{
443 int i, result_index, num_complete = 0; 429 int i, result_index, num_complete = 0, queue_len;
444 struct tx_result result[FW_TX_CMPLT_BLOCK_SIZE], *result_ptr; 430 struct tx_result result[FW_TX_CMPLT_BLOCK_SIZE], *result_ptr;
445 unsigned long flags; 431 unsigned long flags;
446 432
@@ -471,18 +457,22 @@ void wl1251_tx_complete(struct wl1251 *wl)
471 } 457 }
472 } 458 }
473 459
474 if (wl->tx_queue_stopped 460 queue_len = skb_queue_len(&wl->tx_queue);
475 &&
476 skb_queue_len(&wl->tx_queue) <= WL1251_TX_QUEUE_LOW_WATERMARK){
477 461
478 /* firmware buffer has space, restart queues */ 462 if ((num_complete > 0) && (queue_len > 0)) {
463 /* firmware buffer has space, reschedule tx_work */
464 wl1251_debug(DEBUG_TX, "tx_complete: reschedule tx_work");
465 ieee80211_queue_work(wl->hw, &wl->tx_work);
466 }
467
468 if (wl->tx_queue_stopped &&
469 queue_len <= WL1251_TX_QUEUE_LOW_WATERMARK) {
470 /* tx_queue has space, restart queues */
479 wl1251_debug(DEBUG_TX, "tx_complete: waking queues"); 471 wl1251_debug(DEBUG_TX, "tx_complete: waking queues");
480 spin_lock_irqsave(&wl->wl_lock, flags); 472 spin_lock_irqsave(&wl->wl_lock, flags);
481 ieee80211_wake_queues(wl->hw); 473 ieee80211_wake_queues(wl->hw);
482 wl->tx_queue_stopped = false; 474 wl->tx_queue_stopped = false;
483 spin_unlock_irqrestore(&wl->wl_lock, flags); 475 spin_unlock_irqrestore(&wl->wl_lock, flags);
484 ieee80211_queue_work(wl->hw, &wl->tx_work);
485
486 } 476 }
487 477
488 /* Every completed frame needs to be acknowledged */ 478 /* Every completed frame needs to be acknowledged */
diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
index c0ce2c8b43b8..bb23cd522b22 100644
--- a/drivers/net/wireless/wl1251/wl1251.h
+++ b/drivers/net/wireless/wl1251/wl1251.h
@@ -370,6 +370,8 @@ struct wl1251 {
370 /* in dBm */ 370 /* in dBm */
371 int power_level; 371 int power_level;
372 372
373 int rssi_thold;
374
373 struct wl1251_stats stats; 375 struct wl1251_stats stats;
374 struct wl1251_debugfs debugfs; 376 struct wl1251_debugfs debugfs;
375 377
@@ -410,6 +412,8 @@ void wl1251_disable_interrupts(struct wl1251 *wl);
410 412
411#define WL1251_DEFAULT_CHANNEL 0 413#define WL1251_DEFAULT_CHANNEL 0
412 414
415#define WL1251_DEFAULT_BET_CONSECUTIVE 10
416
413#define CHIP_ID_1251_PG10 (0x7010101) 417#define CHIP_ID_1251_PG10 (0x7010101)
414#define CHIP_ID_1251_PG11 (0x7020101) 418#define CHIP_ID_1251_PG11 (0x7020101)
415#define CHIP_ID_1251_PG12 (0x7030101) 419#define CHIP_ID_1251_PG12 (0x7030101)
@@ -431,4 +435,7 @@ void wl1251_disable_interrupts(struct wl1251 *wl);
431#define WL1251_PART_WORK_REG_START REGISTERS_BASE 435#define WL1251_PART_WORK_REG_START REGISTERS_BASE
432#define WL1251_PART_WORK_REG_SIZE REGISTERS_WORK_SIZE 436#define WL1251_PART_WORK_REG_SIZE REGISTERS_WORK_SIZE
433 437
438#define WL1251_DEFAULT_LOW_RSSI_WEIGHT 10
439#define WL1251_DEFAULT_LOW_RSSI_DEPTH 10
440
434#endif 441#endif
diff --git a/drivers/net/wireless/wl1251/wl12xx_80211.h b/drivers/net/wireless/wl1251/wl12xx_80211.h
index 184628027213..1417b1445c3d 100644
--- a/drivers/net/wireless/wl1251/wl12xx_80211.h
+++ b/drivers/net/wireless/wl1251/wl12xx_80211.h
@@ -54,7 +54,6 @@
54 54
55/* This really should be 8, but not for our firmware */ 55/* This really should be 8, but not for our firmware */
56#define MAX_SUPPORTED_RATES 32 56#define MAX_SUPPORTED_RATES 32
57#define COUNTRY_STRING_LEN 3
58#define MAX_COUNTRY_TRIPLETS 32 57#define MAX_COUNTRY_TRIPLETS 32
59 58
60/* Headers */ 59/* Headers */
@@ -98,7 +97,7 @@ struct country_triplet {
98 97
99struct wl12xx_ie_country { 98struct wl12xx_ie_country {
100 struct wl12xx_ie_header header; 99 struct wl12xx_ie_header header;
101 u8 country_string[COUNTRY_STRING_LEN]; 100 u8 country_string[IEEE80211_COUNTRY_STRING_LEN];
102 struct country_triplet triplets[MAX_COUNTRY_TRIPLETS]; 101 struct country_triplet triplets[MAX_COUNTRY_TRIPLETS];
103} __packed; 102} __packed;
104 103
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index 0e65bce457d6..692ebff38fc8 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -54,7 +54,7 @@ config WL12XX_SDIO
54 54
55config WL12XX_SDIO_TEST 55config WL12XX_SDIO_TEST
56 tristate "TI wl12xx SDIO testing support" 56 tristate "TI wl12xx SDIO testing support"
57 depends on WL12XX && MMC 57 depends on WL12XX && MMC && WL12XX_SDIO
58 default n 58 default n
59 ---help--- 59 ---help---
60 This module adds support for the SDIO bus testing with the 60 This module adds support for the SDIO bus testing with the
diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/wl12xx/acx.c
index cc4068d2b4a8..a3db755ceeda 100644
--- a/drivers/net/wireless/wl12xx/acx.c
+++ b/drivers/net/wireless/wl12xx/acx.c
@@ -751,10 +751,10 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
751 return 0; 751 return 0;
752} 752}
753 753
754int wl1271_acx_rate_policies(struct wl1271 *wl) 754int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
755{ 755{
756 struct acx_rate_policy *acx; 756 struct acx_sta_rate_policy *acx;
757 struct conf_tx_rate_class *c = &wl->conf.tx.rc_conf; 757 struct conf_tx_rate_class *c = &wl->conf.tx.sta_rc_conf;
758 int idx = 0; 758 int idx = 0;
759 int ret = 0; 759 int ret = 0;
760 760
@@ -783,6 +783,10 @@ int wl1271_acx_rate_policies(struct wl1271 *wl)
783 783
784 acx->rate_class_cnt = cpu_to_le32(ACX_TX_RATE_POLICY_CNT); 784 acx->rate_class_cnt = cpu_to_le32(ACX_TX_RATE_POLICY_CNT);
785 785
786 wl1271_debug(DEBUG_ACX, "basic_rate: 0x%x, full_rate: 0x%x",
787 acx->rate_class[ACX_TX_BASIC_RATE].enabled_rates,
788 acx->rate_class[ACX_TX_AP_FULL_RATE].enabled_rates);
789
786 ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx)); 790 ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
787 if (ret < 0) { 791 if (ret < 0) {
788 wl1271_warning("Setting of rate policies failed: %d", ret); 792 wl1271_warning("Setting of rate policies failed: %d", ret);
@@ -794,6 +798,38 @@ out:
794 return ret; 798 return ret;
795} 799}
796 800
801int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
802 u8 idx)
803{
804 struct acx_ap_rate_policy *acx;
805 int ret = 0;
806
807 wl1271_debug(DEBUG_ACX, "acx ap rate policy");
808
809 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
810 if (!acx) {
811 ret = -ENOMEM;
812 goto out;
813 }
814
815 acx->rate_policy.enabled_rates = cpu_to_le32(c->enabled_rates);
816 acx->rate_policy.short_retry_limit = c->short_retry_limit;
817 acx->rate_policy.long_retry_limit = c->long_retry_limit;
818 acx->rate_policy.aflags = c->aflags;
819
820 acx->rate_policy_idx = cpu_to_le32(idx);
821
822 ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
823 if (ret < 0) {
824 wl1271_warning("Setting of ap rate policy failed: %d", ret);
825 goto out;
826 }
827
828out:
829 kfree(acx);
830 return ret;
831}
832
797int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max, 833int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
798 u8 aifsn, u16 txop) 834 u8 aifsn, u16 txop)
799{ 835{
@@ -915,9 +951,9 @@ out:
915 return ret; 951 return ret;
916} 952}
917 953
918int wl1271_acx_mem_cfg(struct wl1271 *wl) 954int wl1271_acx_ap_mem_cfg(struct wl1271 *wl)
919{ 955{
920 struct wl1271_acx_config_memory *mem_conf; 956 struct wl1271_acx_ap_config_memory *mem_conf;
921 int ret; 957 int ret;
922 958
923 wl1271_debug(DEBUG_ACX, "wl1271 mem cfg"); 959 wl1271_debug(DEBUG_ACX, "wl1271 mem cfg");
@@ -929,10 +965,10 @@ int wl1271_acx_mem_cfg(struct wl1271 *wl)
929 } 965 }
930 966
931 /* memory config */ 967 /* memory config */
932 mem_conf->num_stations = DEFAULT_NUM_STATIONS; 968 mem_conf->num_stations = wl->conf.mem.num_stations;
933 mem_conf->rx_mem_block_num = ACX_RX_MEM_BLOCKS; 969 mem_conf->rx_mem_block_num = wl->conf.mem.rx_block_num;
934 mem_conf->tx_min_mem_block_num = ACX_TX_MIN_MEM_BLOCKS; 970 mem_conf->tx_min_mem_block_num = wl->conf.mem.tx_min_block_num;
935 mem_conf->num_ssid_profiles = ACX_NUM_SSID_PROFILES; 971 mem_conf->num_ssid_profiles = wl->conf.mem.ssid_profiles;
936 mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS); 972 mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS);
937 973
938 ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf, 974 ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf,
@@ -947,13 +983,45 @@ out:
947 return ret; 983 return ret;
948} 984}
949 985
950int wl1271_acx_init_mem_config(struct wl1271 *wl) 986int wl1271_acx_sta_mem_cfg(struct wl1271 *wl)
951{ 987{
988 struct wl1271_acx_sta_config_memory *mem_conf;
952 int ret; 989 int ret;
953 990
954 ret = wl1271_acx_mem_cfg(wl); 991 wl1271_debug(DEBUG_ACX, "wl1271 mem cfg");
955 if (ret < 0) 992
956 return ret; 993 mem_conf = kzalloc(sizeof(*mem_conf), GFP_KERNEL);
994 if (!mem_conf) {
995 ret = -ENOMEM;
996 goto out;
997 }
998
999 /* memory config */
1000 mem_conf->num_stations = wl->conf.mem.num_stations;
1001 mem_conf->rx_mem_block_num = wl->conf.mem.rx_block_num;
1002 mem_conf->tx_min_mem_block_num = wl->conf.mem.tx_min_block_num;
1003 mem_conf->num_ssid_profiles = wl->conf.mem.ssid_profiles;
1004 mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS);
1005 mem_conf->dyn_mem_enable = wl->conf.mem.dynamic_memory;
1006 mem_conf->tx_free_req = wl->conf.mem.min_req_tx_blocks;
1007 mem_conf->rx_free_req = wl->conf.mem.min_req_rx_blocks;
1008 mem_conf->tx_min = wl->conf.mem.tx_min;
1009
1010 ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf,
1011 sizeof(*mem_conf));
1012 if (ret < 0) {
1013 wl1271_warning("wl1271 mem config failed: %d", ret);
1014 goto out;
1015 }
1016
1017out:
1018 kfree(mem_conf);
1019 return ret;
1020}
1021
1022int wl1271_acx_init_mem_config(struct wl1271 *wl)
1023{
1024 int ret;
957 1025
958 wl->target_mem_map = kzalloc(sizeof(struct wl1271_acx_mem_map), 1026 wl->target_mem_map = kzalloc(sizeof(struct wl1271_acx_mem_map),
959 GFP_KERNEL); 1027 GFP_KERNEL);
@@ -1233,6 +1301,7 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
1233 struct wl1271_acx_ht_capabilities *acx; 1301 struct wl1271_acx_ht_capabilities *acx;
1234 u8 mac_address[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 1302 u8 mac_address[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1235 int ret = 0; 1303 int ret = 0;
1304 u32 ht_capabilites = 0;
1236 1305
1237 wl1271_debug(DEBUG_ACX, "acx ht capabilities setting"); 1306 wl1271_debug(DEBUG_ACX, "acx ht capabilities setting");
1238 1307
@@ -1244,27 +1313,26 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
1244 1313
1245 /* Allow HT Operation ? */ 1314 /* Allow HT Operation ? */
1246 if (allow_ht_operation) { 1315 if (allow_ht_operation) {
1247 acx->ht_capabilites = 1316 ht_capabilites =
1248 WL1271_ACX_FW_CAP_HT_OPERATION; 1317 WL1271_ACX_FW_CAP_HT_OPERATION;
1249 if (ht_cap->cap & IEEE80211_HT_CAP_GRN_FLD) 1318 if (ht_cap->cap & IEEE80211_HT_CAP_GRN_FLD)
1250 acx->ht_capabilites |= 1319 ht_capabilites |=
1251 WL1271_ACX_FW_CAP_GREENFIELD_FRAME_FORMAT; 1320 WL1271_ACX_FW_CAP_GREENFIELD_FRAME_FORMAT;
1252 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) 1321 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
1253 acx->ht_capabilites |= 1322 ht_capabilites |=
1254 WL1271_ACX_FW_CAP_SHORT_GI_FOR_20MHZ_PACKETS; 1323 WL1271_ACX_FW_CAP_SHORT_GI_FOR_20MHZ_PACKETS;
1255 if (ht_cap->cap & IEEE80211_HT_CAP_LSIG_TXOP_PROT) 1324 if (ht_cap->cap & IEEE80211_HT_CAP_LSIG_TXOP_PROT)
1256 acx->ht_capabilites |= 1325 ht_capabilites |=
1257 WL1271_ACX_FW_CAP_LSIG_TXOP_PROTECTION; 1326 WL1271_ACX_FW_CAP_LSIG_TXOP_PROTECTION;
1258 1327
1259 /* get data from A-MPDU parameters field */ 1328 /* get data from A-MPDU parameters field */
1260 acx->ampdu_max_length = ht_cap->ampdu_factor; 1329 acx->ampdu_max_length = ht_cap->ampdu_factor;
1261 acx->ampdu_min_spacing = ht_cap->ampdu_density; 1330 acx->ampdu_min_spacing = ht_cap->ampdu_density;
1262
1263 memcpy(acx->mac_address, mac_address, ETH_ALEN);
1264 } else { /* HT operations are not allowed */
1265 acx->ht_capabilites = 0;
1266 } 1331 }
1267 1332
1333 memcpy(acx->mac_address, mac_address, ETH_ALEN);
1334 acx->ht_capabilites = cpu_to_le32(ht_capabilites);
1335
1268 ret = wl1271_cmd_configure(wl, ACX_PEER_HT_CAP, acx, sizeof(*acx)); 1336 ret = wl1271_cmd_configure(wl, ACX_PEER_HT_CAP, acx, sizeof(*acx));
1269 if (ret < 0) { 1337 if (ret < 0) {
1270 wl1271_warning("acx ht capabilities setting failed: %d", ret); 1338 wl1271_warning("acx ht capabilities setting failed: %d", ret);
@@ -1293,7 +1361,8 @@ int wl1271_acx_set_ht_information(struct wl1271 *wl,
1293 acx->ht_protection = 1361 acx->ht_protection =
1294 (u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION); 1362 (u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION);
1295 acx->rifs_mode = 0; 1363 acx->rifs_mode = 0;
1296 acx->gf_protection = 0; 1364 acx->gf_protection =
1365 !!(ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
1297 acx->ht_tx_burst_limit = 0; 1366 acx->ht_tx_burst_limit = 0;
1298 acx->dual_cts_protection = 0; 1367 acx->dual_cts_protection = 0;
1299 1368
@@ -1309,6 +1378,91 @@ out:
1309 return ret; 1378 return ret;
1310} 1379}
1311 1380
1381/* Configure BA session initiator/receiver parameters setting in the FW. */
1382int wl1271_acx_set_ba_session(struct wl1271 *wl,
1383 enum ieee80211_back_parties direction,
1384 u8 tid_index, u8 policy)
1385{
1386 struct wl1271_acx_ba_session_policy *acx;
1387 int ret;
1388
1389 wl1271_debug(DEBUG_ACX, "acx ba session setting");
1390
1391 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1392 if (!acx) {
1393 ret = -ENOMEM;
1394 goto out;
1395 }
1396
1397 /* ANY role */
1398 acx->role_id = 0xff;
1399 acx->tid = tid_index;
1400 acx->enable = policy;
1401 acx->ba_direction = direction;
1402
1403 switch (direction) {
1404 case WLAN_BACK_INITIATOR:
1405 acx->win_size = wl->conf.ht.tx_ba_win_size;
1406 acx->inactivity_timeout = wl->conf.ht.inactivity_timeout;
1407 break;
1408 case WLAN_BACK_RECIPIENT:
1409 acx->win_size = RX_BA_WIN_SIZE;
1410 acx->inactivity_timeout = 0;
1411 break;
1412 default:
1413 wl1271_error("Incorrect acx command id=%x\n", direction);
1414 ret = -EINVAL;
1415 goto out;
1416 }
1417
1418 ret = wl1271_cmd_configure(wl,
1419 ACX_BA_SESSION_POLICY_CFG,
1420 acx,
1421 sizeof(*acx));
1422 if (ret < 0) {
1423 wl1271_warning("acx ba session setting failed: %d", ret);
1424 goto out;
1425 }
1426
1427out:
1428 kfree(acx);
1429 return ret;
1430}
1431
1432/* setup BA session receiver setting in the FW. */
1433int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn,
1434 bool enable)
1435{
1436 struct wl1271_acx_ba_receiver_setup *acx;
1437 int ret;
1438
1439 wl1271_debug(DEBUG_ACX, "acx ba receiver session setting");
1440
1441 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1442 if (!acx) {
1443 ret = -ENOMEM;
1444 goto out;
1445 }
1446
1447 /* Single link for now */
1448 acx->link_id = 1;
1449 acx->tid = tid_index;
1450 acx->enable = enable;
1451 acx->win_size = 0;
1452 acx->ssn = ssn;
1453
1454 ret = wl1271_cmd_configure(wl, ACX_BA_SESSION_RX_SETUP, acx,
1455 sizeof(*acx));
1456 if (ret < 0) {
1457 wl1271_warning("acx ba receiver session failed: %d", ret);
1458 goto out;
1459 }
1460
1461out:
1462 kfree(acx);
1463 return ret;
1464}
1465
1312int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime) 1466int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime)
1313{ 1467{
1314 struct wl1271_acx_fw_tsf_information *tsf_info; 1468 struct wl1271_acx_fw_tsf_information *tsf_info;
@@ -1334,3 +1488,82 @@ out:
1334 kfree(tsf_info); 1488 kfree(tsf_info);
1335 return ret; 1489 return ret;
1336} 1490}
1491
1492int wl1271_acx_max_tx_retry(struct wl1271 *wl)
1493{
1494 struct wl1271_acx_max_tx_retry *acx = NULL;
1495 int ret;
1496
1497 wl1271_debug(DEBUG_ACX, "acx max tx retry");
1498
1499 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1500 if (!acx)
1501 return -ENOMEM;
1502
1503 acx->max_tx_retry = cpu_to_le16(wl->conf.tx.ap_max_tx_retries);
1504
1505 ret = wl1271_cmd_configure(wl, ACX_MAX_TX_FAILURE, acx, sizeof(*acx));
1506 if (ret < 0) {
1507 wl1271_warning("acx max tx retry failed: %d", ret);
1508 goto out;
1509 }
1510
1511out:
1512 kfree(acx);
1513 return ret;
1514}
1515
1516int wl1271_acx_config_ps(struct wl1271 *wl)
1517{
1518 struct wl1271_acx_config_ps *config_ps;
1519 int ret;
1520
1521 wl1271_debug(DEBUG_ACX, "acx config ps");
1522
1523 config_ps = kzalloc(sizeof(*config_ps), GFP_KERNEL);
1524 if (!config_ps) {
1525 ret = -ENOMEM;
1526 goto out;
1527 }
1528
1529 config_ps->exit_retries = wl->conf.conn.psm_exit_retries;
1530 config_ps->enter_retries = wl->conf.conn.psm_entry_retries;
1531 config_ps->null_data_rate = cpu_to_le32(wl->basic_rate);
1532
1533 ret = wl1271_cmd_configure(wl, ACX_CONFIG_PS, config_ps,
1534 sizeof(*config_ps));
1535
1536 if (ret < 0) {
1537 wl1271_warning("acx config ps failed: %d", ret);
1538 goto out;
1539 }
1540
1541out:
1542 kfree(config_ps);
1543 return ret;
1544}
1545
1546int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr)
1547{
1548 struct wl1271_acx_inconnection_sta *acx = NULL;
1549 int ret;
1550
1551 wl1271_debug(DEBUG_ACX, "acx set inconnaction sta %pM", addr);
1552
1553 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1554 if (!acx)
1555 return -ENOMEM;
1556
1557 memcpy(acx->addr, addr, ETH_ALEN);
1558
1559 ret = wl1271_cmd_configure(wl, ACX_UPDATE_INCONNECTION_STA_LIST,
1560 acx, sizeof(*acx));
1561 if (ret < 0) {
1562 wl1271_warning("acx set inconnaction sta failed: %d", ret);
1563 goto out;
1564 }
1565
1566out:
1567 kfree(acx);
1568 return ret;
1569}
diff --git a/drivers/net/wireless/wl12xx/acx.h b/drivers/net/wireless/wl12xx/acx.h
index 7bd8e4db4a71..dd19b01d807b 100644
--- a/drivers/net/wireless/wl12xx/acx.h
+++ b/drivers/net/wireless/wl12xx/acx.h
@@ -133,7 +133,6 @@ enum {
133 133
134#define DEFAULT_UCAST_PRIORITY 0 134#define DEFAULT_UCAST_PRIORITY 0
135#define DEFAULT_RX_Q_PRIORITY 0 135#define DEFAULT_RX_Q_PRIORITY 0
136#define DEFAULT_NUM_STATIONS 1
137#define DEFAULT_RXQ_PRIORITY 0 /* low 0 .. 15 high */ 136#define DEFAULT_RXQ_PRIORITY 0 /* low 0 .. 15 high */
138#define DEFAULT_RXQ_TYPE 0x07 /* All frames, Data/Ctrl/Mgmt */ 137#define DEFAULT_RXQ_TYPE 0x07 /* All frames, Data/Ctrl/Mgmt */
139#define TRACE_BUFFER_MAX_SIZE 256 138#define TRACE_BUFFER_MAX_SIZE 256
@@ -747,13 +746,23 @@ struct acx_rate_class {
747#define ACX_TX_BASIC_RATE 0 746#define ACX_TX_BASIC_RATE 0
748#define ACX_TX_AP_FULL_RATE 1 747#define ACX_TX_AP_FULL_RATE 1
749#define ACX_TX_RATE_POLICY_CNT 2 748#define ACX_TX_RATE_POLICY_CNT 2
750struct acx_rate_policy { 749struct acx_sta_rate_policy {
751 struct acx_header header; 750 struct acx_header header;
752 751
753 __le32 rate_class_cnt; 752 __le32 rate_class_cnt;
754 struct acx_rate_class rate_class[CONF_TX_MAX_RATE_CLASSES]; 753 struct acx_rate_class rate_class[CONF_TX_MAX_RATE_CLASSES];
755} __packed; 754} __packed;
756 755
756
757#define ACX_TX_AP_MODE_MGMT_RATE 4
758#define ACX_TX_AP_MODE_BCST_RATE 5
759struct acx_ap_rate_policy {
760 struct acx_header header;
761
762 __le32 rate_policy_idx;
763 struct acx_rate_class rate_policy;
764} __packed;
765
757struct acx_ac_cfg { 766struct acx_ac_cfg {
758 struct acx_header header; 767 struct acx_header header;
759 u8 ac; 768 u8 ac;
@@ -787,12 +796,9 @@ struct acx_tx_config_options {
787 __le16 tx_compl_threshold; /* number of packets */ 796 __le16 tx_compl_threshold; /* number of packets */
788} __packed; 797} __packed;
789 798
790#define ACX_RX_MEM_BLOCKS 70
791#define ACX_TX_MIN_MEM_BLOCKS 40
792#define ACX_TX_DESCRIPTORS 32 799#define ACX_TX_DESCRIPTORS 32
793#define ACX_NUM_SSID_PROFILES 1
794 800
795struct wl1271_acx_config_memory { 801struct wl1271_acx_ap_config_memory {
796 struct acx_header header; 802 struct acx_header header;
797 803
798 u8 rx_mem_block_num; 804 u8 rx_mem_block_num;
@@ -802,6 +808,20 @@ struct wl1271_acx_config_memory {
802 __le32 total_tx_descriptors; 808 __le32 total_tx_descriptors;
803} __packed; 809} __packed;
804 810
811struct wl1271_acx_sta_config_memory {
812 struct acx_header header;
813
814 u8 rx_mem_block_num;
815 u8 tx_min_mem_block_num;
816 u8 num_stations;
817 u8 num_ssid_profiles;
818 __le32 total_tx_descriptors;
819 u8 dyn_mem_enable;
820 u8 tx_free_req;
821 u8 rx_free_req;
822 u8 tx_min;
823} __packed;
824
805struct wl1271_acx_mem_map { 825struct wl1271_acx_mem_map {
806 struct acx_header header; 826 struct acx_header header;
807 827
@@ -1051,6 +1071,59 @@ struct wl1271_acx_ht_information {
1051 u8 padding[3]; 1071 u8 padding[3];
1052} __packed; 1072} __packed;
1053 1073
1074#define RX_BA_WIN_SIZE 8
1075
1076struct wl1271_acx_ba_session_policy {
1077 struct acx_header header;
1078 /*
1079 * Specifies role Id, Range 0-7, 0xFF means ANY role.
1080 * Future use. For now this field is irrelevant
1081 */
1082 u8 role_id;
1083 /*
1084 * Specifies Link Id, Range 0-31, 0xFF means ANY Link Id.
1085 * Not applicable if Role Id is set to ANY.
1086 */
1087 u8 link_id;
1088
1089 u8 tid;
1090
1091 u8 enable;
1092
1093 /* Windows size in number of packets */
1094 u16 win_size;
1095
1096 /*
1097 * As initiator inactivity timeout in time units(TU) of 1024us.
1098 * As receiver reserved
1099 */
1100 u16 inactivity_timeout;
1101
1102 /* Initiator = 1/Receiver = 0 */
1103 u8 ba_direction;
1104
1105 u8 padding[3];
1106} __packed;
1107
1108struct wl1271_acx_ba_receiver_setup {
1109 struct acx_header header;
1110
1111 /* Specifies Link Id, Range 0-31, 0xFF means ANY Link Id */
1112 u8 link_id;
1113
1114 u8 tid;
1115
1116 u8 enable;
1117
1118 u8 padding[1];
1119
1120 /* Windows size in number of packets */
1121 u16 win_size;
1122
1123 /* BA session starting sequence number. RANGE 0-FFF */
1124 u16 ssn;
1125} __packed;
1126
1054struct wl1271_acx_fw_tsf_information { 1127struct wl1271_acx_fw_tsf_information {
1055 struct acx_header header; 1128 struct acx_header header;
1056 1129
@@ -1062,6 +1135,33 @@ struct wl1271_acx_fw_tsf_information {
1062 u8 padding[3]; 1135 u8 padding[3];
1063} __packed; 1136} __packed;
1064 1137
1138struct wl1271_acx_max_tx_retry {
1139 struct acx_header header;
1140
1141 /*
1142 * the number of frames transmission failures before
1143 * issuing the aging event.
1144 */
1145 __le16 max_tx_retry;
1146 u8 padding_1[2];
1147} __packed;
1148
1149struct wl1271_acx_config_ps {
1150 struct acx_header header;
1151
1152 u8 exit_retries;
1153 u8 enter_retries;
1154 u8 padding[2];
1155 __le32 null_data_rate;
1156} __packed;
1157
1158struct wl1271_acx_inconnection_sta {
1159 struct acx_header header;
1160
1161 u8 addr[ETH_ALEN];
1162 u8 padding1[2];
1163} __packed;
1164
1065enum { 1165enum {
1066 ACX_WAKE_UP_CONDITIONS = 0x0002, 1166 ACX_WAKE_UP_CONDITIONS = 0x0002,
1067 ACX_MEM_CFG = 0x0003, 1167 ACX_MEM_CFG = 0x0003,
@@ -1113,22 +1213,24 @@ enum {
1113 ACX_RSSI_SNR_WEIGHTS = 0x0052, 1213 ACX_RSSI_SNR_WEIGHTS = 0x0052,
1114 ACX_KEEP_ALIVE_MODE = 0x0053, 1214 ACX_KEEP_ALIVE_MODE = 0x0053,
1115 ACX_SET_KEEP_ALIVE_CONFIG = 0x0054, 1215 ACX_SET_KEEP_ALIVE_CONFIG = 0x0054,
1116 ACX_BA_SESSION_RESPONDER_POLICY = 0x0055, 1216 ACX_BA_SESSION_POLICY_CFG = 0x0055,
1117 ACX_BA_SESSION_INITIATOR_POLICY = 0x0056, 1217 ACX_BA_SESSION_RX_SETUP = 0x0056,
1118 ACX_PEER_HT_CAP = 0x0057, 1218 ACX_PEER_HT_CAP = 0x0057,
1119 ACX_HT_BSS_OPERATION = 0x0058, 1219 ACX_HT_BSS_OPERATION = 0x0058,
1120 ACX_COEX_ACTIVITY = 0x0059, 1220 ACX_COEX_ACTIVITY = 0x0059,
1121 ACX_SET_DCO_ITRIM_PARAMS = 0x0061, 1221 ACX_SET_DCO_ITRIM_PARAMS = 0x0061,
1222 ACX_GEN_FW_CMD = 0x0070,
1223 ACX_HOST_IF_CFG_BITMAP = 0x0071,
1224 ACX_MAX_TX_FAILURE = 0x0072,
1225 ACX_UPDATE_INCONNECTION_STA_LIST = 0x0073,
1122 DOT11_RX_MSDU_LIFE_TIME = 0x1004, 1226 DOT11_RX_MSDU_LIFE_TIME = 0x1004,
1123 DOT11_CUR_TX_PWR = 0x100D, 1227 DOT11_CUR_TX_PWR = 0x100D,
1124 DOT11_RX_DOT11_MODE = 0x1012, 1228 DOT11_RX_DOT11_MODE = 0x1012,
1125 DOT11_RTS_THRESHOLD = 0x1013, 1229 DOT11_RTS_THRESHOLD = 0x1013,
1126 DOT11_GROUP_ADDRESS_TBL = 0x1014, 1230 DOT11_GROUP_ADDRESS_TBL = 0x1014,
1127 ACX_PM_CONFIG = 0x1016, 1231 ACX_PM_CONFIG = 0x1016,
1128 1232 ACX_CONFIG_PS = 0x1017,
1129 MAX_DOT11_IE = DOT11_GROUP_ADDRESS_TBL, 1233 ACX_CONFIG_HANGOVER = 0x1018,
1130
1131 MAX_IE = 0xFFFF
1132}; 1234};
1133 1235
1134 1236
@@ -1160,7 +1262,9 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble);
1160int wl1271_acx_cts_protect(struct wl1271 *wl, 1262int wl1271_acx_cts_protect(struct wl1271 *wl,
1161 enum acx_ctsprotect_type ctsprotect); 1263 enum acx_ctsprotect_type ctsprotect);
1162int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats); 1264int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
1163int wl1271_acx_rate_policies(struct wl1271 *wl); 1265int wl1271_acx_sta_rate_policies(struct wl1271 *wl);
1266int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
1267 u8 idx);
1164int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max, 1268int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
1165 u8 aifsn, u16 txop); 1269 u8 aifsn, u16 txop);
1166int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type, 1270int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
@@ -1168,7 +1272,8 @@ int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
1168 u32 apsd_conf0, u32 apsd_conf1); 1272 u32 apsd_conf0, u32 apsd_conf1);
1169int wl1271_acx_frag_threshold(struct wl1271 *wl, u16 frag_threshold); 1273int wl1271_acx_frag_threshold(struct wl1271 *wl, u16 frag_threshold);
1170int wl1271_acx_tx_config_options(struct wl1271 *wl); 1274int wl1271_acx_tx_config_options(struct wl1271 *wl);
1171int wl1271_acx_mem_cfg(struct wl1271 *wl); 1275int wl1271_acx_ap_mem_cfg(struct wl1271 *wl);
1276int wl1271_acx_sta_mem_cfg(struct wl1271 *wl);
1172int wl1271_acx_init_mem_config(struct wl1271 *wl); 1277int wl1271_acx_init_mem_config(struct wl1271 *wl);
1173int wl1271_acx_init_rx_interrupt(struct wl1271 *wl); 1278int wl1271_acx_init_rx_interrupt(struct wl1271 *wl);
1174int wl1271_acx_smart_reflex(struct wl1271 *wl); 1279int wl1271_acx_smart_reflex(struct wl1271 *wl);
@@ -1185,6 +1290,14 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
1185 bool allow_ht_operation); 1290 bool allow_ht_operation);
1186int wl1271_acx_set_ht_information(struct wl1271 *wl, 1291int wl1271_acx_set_ht_information(struct wl1271 *wl,
1187 u16 ht_operation_mode); 1292 u16 ht_operation_mode);
1293int wl1271_acx_set_ba_session(struct wl1271 *wl,
1294 enum ieee80211_back_parties direction,
1295 u8 tid_index, u8 policy);
1296int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn,
1297 bool enable);
1188int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime); 1298int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime);
1299int wl1271_acx_max_tx_retry(struct wl1271 *wl);
1300int wl1271_acx_config_ps(struct wl1271 *wl);
1301int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
1189 1302
1190#endif /* __WL1271_ACX_H__ */ 1303#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/boot.c b/drivers/net/wireless/wl12xx/boot.c
index 4df04f84d7f1..6934dffd5174 100644
--- a/drivers/net/wireless/wl12xx/boot.c
+++ b/drivers/net/wireless/wl12xx/boot.c
@@ -28,6 +28,7 @@
28#include "boot.h" 28#include "boot.h"
29#include "io.h" 29#include "io.h"
30#include "event.h" 30#include "event.h"
31#include "rx.h"
31 32
32static struct wl1271_partition_set part_table[PART_TABLE_LEN] = { 33static struct wl1271_partition_set part_table[PART_TABLE_LEN] = {
33 [PART_DOWN] = { 34 [PART_DOWN] = {
@@ -100,6 +101,22 @@ static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag)
100 wl1271_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl); 101 wl1271_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl);
101} 102}
102 103
104static void wl1271_parse_fw_ver(struct wl1271 *wl)
105{
106 int ret;
107
108 ret = sscanf(wl->chip.fw_ver_str + 4, "%u.%u.%u.%u.%u",
109 &wl->chip.fw_ver[0], &wl->chip.fw_ver[1],
110 &wl->chip.fw_ver[2], &wl->chip.fw_ver[3],
111 &wl->chip.fw_ver[4]);
112
113 if (ret != 5) {
114 wl1271_warning("fw version incorrect value");
115 memset(wl->chip.fw_ver, 0, sizeof(wl->chip.fw_ver));
116 return;
117 }
118}
119
103static void wl1271_boot_fw_version(struct wl1271 *wl) 120static void wl1271_boot_fw_version(struct wl1271 *wl)
104{ 121{
105 struct wl1271_static_data static_data; 122 struct wl1271_static_data static_data;
@@ -107,11 +124,13 @@ static void wl1271_boot_fw_version(struct wl1271 *wl)
107 wl1271_read(wl, wl->cmd_box_addr, &static_data, sizeof(static_data), 124 wl1271_read(wl, wl->cmd_box_addr, &static_data, sizeof(static_data),
108 false); 125 false);
109 126
110 strncpy(wl->chip.fw_ver, static_data.fw_version, 127 strncpy(wl->chip.fw_ver_str, static_data.fw_version,
111 sizeof(wl->chip.fw_ver)); 128 sizeof(wl->chip.fw_ver_str));
112 129
113 /* make sure the string is NULL-terminated */ 130 /* make sure the string is NULL-terminated */
114 wl->chip.fw_ver[sizeof(wl->chip.fw_ver) - 1] = '\0'; 131 wl->chip.fw_ver_str[sizeof(wl->chip.fw_ver_str) - 1] = '\0';
132
133 wl1271_parse_fw_ver(wl);
115} 134}
116 135
117static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf, 136static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
@@ -231,7 +250,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
231 */ 250 */
232 if (wl->nvs_len == sizeof(struct wl1271_nvs_file) || 251 if (wl->nvs_len == sizeof(struct wl1271_nvs_file) ||
233 wl->nvs_len == WL1271_INI_LEGACY_NVS_FILE_SIZE) { 252 wl->nvs_len == WL1271_INI_LEGACY_NVS_FILE_SIZE) {
234 if (wl->nvs->general_params.dual_mode_select) 253 /* for now 11a is unsupported in AP mode */
254 if (wl->bss_type != BSS_TYPE_AP_BSS &&
255 wl->nvs->general_params.dual_mode_select)
235 wl->enable_11a = true; 256 wl->enable_11a = true;
236 } 257 }
237 258
@@ -431,6 +452,9 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
431 PSPOLL_DELIVERY_FAILURE_EVENT_ID | 452 PSPOLL_DELIVERY_FAILURE_EVENT_ID |
432 SOFT_GEMINI_SENSE_EVENT_ID; 453 SOFT_GEMINI_SENSE_EVENT_ID;
433 454
455 if (wl->bss_type == BSS_TYPE_AP_BSS)
456 wl->event_mask |= STA_REMOVE_COMPLETE_EVENT_ID;
457
434 ret = wl1271_event_unmask(wl); 458 ret = wl1271_event_unmask(wl);
435 if (ret < 0) { 459 if (ret < 0) {
436 wl1271_error("EVENT mask setting failed"); 460 wl1271_error("EVENT mask setting failed");
@@ -464,6 +488,9 @@ static void wl1271_boot_hw_version(struct wl1271 *wl)
464 fuse = (fuse & PG_VER_MASK) >> PG_VER_OFFSET; 488 fuse = (fuse & PG_VER_MASK) >> PG_VER_OFFSET;
465 489
466 wl->hw_pg_ver = (s8)fuse; 490 wl->hw_pg_ver = (s8)fuse;
491
492 if (((wl->hw_pg_ver & PG_MAJOR_VER_MASK) >> PG_MAJOR_VER_OFFSET) < 3)
493 wl->quirks |= WL12XX_QUIRK_END_OF_TRANSACTION;
467} 494}
468 495
469/* uploads NVS and firmware */ 496/* uploads NVS and firmware */
@@ -595,8 +622,7 @@ int wl1271_boot(struct wl1271 *wl)
595 wl1271_boot_enable_interrupts(wl); 622 wl1271_boot_enable_interrupts(wl);
596 623
597 /* set the wl1271 default filters */ 624 /* set the wl1271 default filters */
598 wl->rx_config = WL1271_DEFAULT_RX_CONFIG; 625 wl1271_set_default_filters(wl);
599 wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
600 626
601 wl1271_event_mbox_config(wl); 627 wl1271_event_mbox_config(wl);
602 628
diff --git a/drivers/net/wireless/wl12xx/boot.h b/drivers/net/wireless/wl12xx/boot.h
index d67dcffa31eb..17229b86fc71 100644
--- a/drivers/net/wireless/wl12xx/boot.h
+++ b/drivers/net/wireless/wl12xx/boot.h
@@ -59,6 +59,11 @@ struct wl1271_static_data {
59#define PG_VER_MASK 0x3c 59#define PG_VER_MASK 0x3c
60#define PG_VER_OFFSET 2 60#define PG_VER_OFFSET 2
61 61
62#define PG_MAJOR_VER_MASK 0x3
63#define PG_MAJOR_VER_OFFSET 0x0
64#define PG_MINOR_VER_MASK 0xc
65#define PG_MINOR_VER_OFFSET 0x2
66
62#define CMD_MBOX_ADDRESS 0x407B4 67#define CMD_MBOX_ADDRESS 0x407B4
63 68
64#define POLARITY_LOW BIT(1) 69#define POLARITY_LOW BIT(1)
diff --git a/drivers/net/wireless/wl12xx/cmd.c b/drivers/net/wireless/wl12xx/cmd.c
index 0106628aa5a2..f0aa7ab97bf7 100644
--- a/drivers/net/wireless/wl12xx/cmd.c
+++ b/drivers/net/wireless/wl12xx/cmd.c
@@ -36,6 +36,7 @@
36#include "wl12xx_80211.h" 36#include "wl12xx_80211.h"
37#include "cmd.h" 37#include "cmd.h"
38#include "event.h" 38#include "event.h"
39#include "tx.h"
39 40
40#define WL1271_CMD_FAST_POLL_COUNT 50 41#define WL1271_CMD_FAST_POLL_COUNT 50
41 42
@@ -62,6 +63,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
62 cmd->status = 0; 63 cmd->status = 0;
63 64
64 WARN_ON(len % 4 != 0); 65 WARN_ON(len % 4 != 0);
66 WARN_ON(test_bit(WL1271_FLAG_IN_ELP, &wl->flags));
65 67
66 wl1271_write(wl, wl->cmd_box_addr, buf, len, false); 68 wl1271_write(wl, wl->cmd_box_addr, buf, len, false);
67 69
@@ -221,7 +223,7 @@ int wl1271_cmd_ext_radio_parms(struct wl1271 *wl)
221 * Poll the mailbox event field until any of the bits in the mask is set or a 223 * Poll the mailbox event field until any of the bits in the mask is set or a
222 * timeout occurs (WL1271_EVENT_TIMEOUT in msecs) 224 * timeout occurs (WL1271_EVENT_TIMEOUT in msecs)
223 */ 225 */
224static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask) 226static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl, u32 mask)
225{ 227{
226 u32 events_vector, event; 228 u32 events_vector, event;
227 unsigned long timeout; 229 unsigned long timeout;
@@ -230,7 +232,8 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
230 232
231 do { 233 do {
232 if (time_after(jiffies, timeout)) { 234 if (time_after(jiffies, timeout)) {
233 ieee80211_queue_work(wl->hw, &wl->recovery_work); 235 wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
236 (int)mask);
234 return -ETIMEDOUT; 237 return -ETIMEDOUT;
235 } 238 }
236 239
@@ -248,6 +251,19 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
248 return 0; 251 return 0;
249} 252}
250 253
254static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
255{
256 int ret;
257
258 ret = wl1271_cmd_wait_for_event_or_timeout(wl, mask);
259 if (ret != 0) {
260 ieee80211_queue_work(wl->hw, &wl->recovery_work);
261 return ret;
262 }
263
264 return 0;
265}
266
251int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type) 267int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
252{ 268{
253 struct wl1271_cmd_join *join; 269 struct wl1271_cmd_join *join;
@@ -271,6 +287,7 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
271 join->rx_filter_options = cpu_to_le32(wl->rx_filter); 287 join->rx_filter_options = cpu_to_le32(wl->rx_filter);
272 join->bss_type = bss_type; 288 join->bss_type = bss_type;
273 join->basic_rate_set = cpu_to_le32(wl->basic_rate_set); 289 join->basic_rate_set = cpu_to_le32(wl->basic_rate_set);
290 join->supported_rate_set = cpu_to_le32(wl->rate_set);
274 291
275 if (wl->band == IEEE80211_BAND_5GHZ) 292 if (wl->band == IEEE80211_BAND_5GHZ)
276 join->bss_type |= WL1271_JOIN_CMD_BSS_TYPE_5GHZ; 293 join->bss_type |= WL1271_JOIN_CMD_BSS_TYPE_5GHZ;
@@ -288,6 +305,9 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
288 wl->tx_security_last_seq = 0; 305 wl->tx_security_last_seq = 0;
289 wl->tx_security_seq = 0; 306 wl->tx_security_seq = 0;
290 307
308 wl1271_debug(DEBUG_CMD, "cmd join: basic_rate_set=0x%x, rate_set=0x%x",
309 join->basic_rate_set, join->supported_rate_set);
310
291 ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join), 0); 311 ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join), 0);
292 if (ret < 0) { 312 if (ret < 0) {
293 wl1271_error("failed to initiate cmd join"); 313 wl1271_error("failed to initiate cmd join");
@@ -439,7 +459,7 @@ out:
439 return ret; 459 return ret;
440} 460}
441 461
442int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, u32 rates, bool send) 462int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
443{ 463{
444 struct wl1271_cmd_ps_params *ps_params = NULL; 464 struct wl1271_cmd_ps_params *ps_params = NULL;
445 int ret = 0; 465 int ret = 0;
@@ -453,10 +473,6 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, u32 rates, bool send)
453 } 473 }
454 474
455 ps_params->ps_mode = ps_mode; 475 ps_params->ps_mode = ps_mode;
456 ps_params->send_null_data = send;
457 ps_params->retries = wl->conf.conn.psm_entry_nullfunc_retries;
458 ps_params->hang_over_period = wl->conf.conn.psm_entry_hangover_period;
459 ps_params->null_data_rate = cpu_to_le32(rates);
460 476
461 ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params, 477 ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
462 sizeof(*ps_params), 0); 478 sizeof(*ps_params), 0);
@@ -490,8 +506,8 @@ int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
490 cmd->len = cpu_to_le16(buf_len); 506 cmd->len = cpu_to_le16(buf_len);
491 cmd->template_type = template_id; 507 cmd->template_type = template_id;
492 cmd->enabled_rates = cpu_to_le32(rates); 508 cmd->enabled_rates = cpu_to_le32(rates);
493 cmd->short_retry_limit = wl->conf.tx.rc_conf.short_retry_limit; 509 cmd->short_retry_limit = wl->conf.tx.tmpl_short_retry_limit;
494 cmd->long_retry_limit = wl->conf.tx.rc_conf.long_retry_limit; 510 cmd->long_retry_limit = wl->conf.tx.tmpl_long_retry_limit;
495 cmd->index = index; 511 cmd->index = index;
496 512
497 if (buf) 513 if (buf)
@@ -659,15 +675,15 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr)
659 675
660 /* llc layer */ 676 /* llc layer */
661 memcpy(tmpl.llc_hdr, rfc1042_header, sizeof(rfc1042_header)); 677 memcpy(tmpl.llc_hdr, rfc1042_header, sizeof(rfc1042_header));
662 tmpl.llc_type = htons(ETH_P_ARP); 678 tmpl.llc_type = cpu_to_be16(ETH_P_ARP);
663 679
664 /* arp header */ 680 /* arp header */
665 arp_hdr = &tmpl.arp_hdr; 681 arp_hdr = &tmpl.arp_hdr;
666 arp_hdr->ar_hrd = htons(ARPHRD_ETHER); 682 arp_hdr->ar_hrd = cpu_to_be16(ARPHRD_ETHER);
667 arp_hdr->ar_pro = htons(ETH_P_IP); 683 arp_hdr->ar_pro = cpu_to_be16(ETH_P_IP);
668 arp_hdr->ar_hln = ETH_ALEN; 684 arp_hdr->ar_hln = ETH_ALEN;
669 arp_hdr->ar_pln = 4; 685 arp_hdr->ar_pln = 4;
670 arp_hdr->ar_op = htons(ARPOP_REPLY); 686 arp_hdr->ar_op = cpu_to_be16(ARPOP_REPLY);
671 687
672 /* arp payload */ 688 /* arp payload */
673 memcpy(tmpl.sender_hw, wl->vif->addr, ETH_ALEN); 689 memcpy(tmpl.sender_hw, wl->vif->addr, ETH_ALEN);
@@ -702,9 +718,9 @@ int wl1271_build_qos_null_data(struct wl1271 *wl)
702 wl->basic_rate); 718 wl->basic_rate);
703} 719}
704 720
705int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id) 721int wl1271_cmd_set_sta_default_wep_key(struct wl1271 *wl, u8 id)
706{ 722{
707 struct wl1271_cmd_set_keys *cmd; 723 struct wl1271_cmd_set_sta_keys *cmd;
708 int ret = 0; 724 int ret = 0;
709 725
710 wl1271_debug(DEBUG_CMD, "cmd set_default_wep_key %d", id); 726 wl1271_debug(DEBUG_CMD, "cmd set_default_wep_key %d", id);
@@ -731,11 +747,42 @@ out:
731 return ret; 747 return ret;
732} 748}
733 749
734int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, 750int wl1271_cmd_set_ap_default_wep_key(struct wl1271 *wl, u8 id)
751{
752 struct wl1271_cmd_set_ap_keys *cmd;
753 int ret = 0;
754
755 wl1271_debug(DEBUG_CMD, "cmd set_ap_default_wep_key %d", id);
756
757 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
758 if (!cmd) {
759 ret = -ENOMEM;
760 goto out;
761 }
762
763 cmd->hlid = WL1271_AP_BROADCAST_HLID;
764 cmd->key_id = id;
765 cmd->lid_key_type = WEP_DEFAULT_LID_TYPE;
766 cmd->key_action = cpu_to_le16(KEY_SET_ID);
767 cmd->key_type = KEY_WEP;
768
769 ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
770 if (ret < 0) {
771 wl1271_warning("cmd set_ap_default_wep_key failed: %d", ret);
772 goto out;
773 }
774
775out:
776 kfree(cmd);
777
778 return ret;
779}
780
781int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
735 u8 key_size, const u8 *key, const u8 *addr, 782 u8 key_size, const u8 *key, const u8 *addr,
736 u32 tx_seq_32, u16 tx_seq_16) 783 u32 tx_seq_32, u16 tx_seq_16)
737{ 784{
738 struct wl1271_cmd_set_keys *cmd; 785 struct wl1271_cmd_set_sta_keys *cmd;
739 int ret = 0; 786 int ret = 0;
740 787
741 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 788 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -788,6 +835,67 @@ out:
788 return ret; 835 return ret;
789} 836}
790 837
838int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
839 u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
840 u16 tx_seq_16)
841{
842 struct wl1271_cmd_set_ap_keys *cmd;
843 int ret = 0;
844 u8 lid_type;
845
846 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
847 if (!cmd)
848 return -ENOMEM;
849
850 if (hlid == WL1271_AP_BROADCAST_HLID) {
851 if (key_type == KEY_WEP)
852 lid_type = WEP_DEFAULT_LID_TYPE;
853 else
854 lid_type = BROADCAST_LID_TYPE;
855 } else {
856 lid_type = UNICAST_LID_TYPE;
857 }
858
859 wl1271_debug(DEBUG_CRYPT, "ap key action: %d id: %d lid: %d type: %d"
860 " hlid: %d", (int)action, (int)id, (int)lid_type,
861 (int)key_type, (int)hlid);
862
863 cmd->lid_key_type = lid_type;
864 cmd->hlid = hlid;
865 cmd->key_action = cpu_to_le16(action);
866 cmd->key_size = key_size;
867 cmd->key_type = key_type;
868 cmd->key_id = id;
869 cmd->ac_seq_num16[0] = cpu_to_le16(tx_seq_16);
870 cmd->ac_seq_num32[0] = cpu_to_le32(tx_seq_32);
871
872 if (key_type == KEY_TKIP) {
873 /*
874 * We get the key in the following form:
875 * TKIP (16 bytes) - TX MIC (8 bytes) - RX MIC (8 bytes)
876 * but the target is expecting:
877 * TKIP - RX MIC - TX MIC
878 */
879 memcpy(cmd->key, key, 16);
880 memcpy(cmd->key + 16, key + 24, 8);
881 memcpy(cmd->key + 24, key + 16, 8);
882 } else {
883 memcpy(cmd->key, key, key_size);
884 }
885
886 wl1271_dump(DEBUG_CRYPT, "TARGET AP KEY: ", cmd, sizeof(*cmd));
887
888 ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
889 if (ret < 0) {
890 wl1271_warning("could not set ap keys");
891 goto out;
892 }
893
894out:
895 kfree(cmd);
896 return ret;
897}
898
791int wl1271_cmd_disconnect(struct wl1271 *wl) 899int wl1271_cmd_disconnect(struct wl1271 *wl)
792{ 900{
793 struct wl1271_cmd_disconnect *cmd; 901 struct wl1271_cmd_disconnect *cmd;
@@ -850,3 +958,180 @@ out_free:
850out: 958out:
851 return ret; 959 return ret;
852} 960}
961
962int wl1271_cmd_start_bss(struct wl1271 *wl)
963{
964 struct wl1271_cmd_bss_start *cmd;
965 struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
966 int ret;
967
968 wl1271_debug(DEBUG_CMD, "cmd start bss");
969
970 /*
971 * FIXME: We currently do not support hidden SSID. The real SSID
972 * should be fetched from mac80211 first.
973 */
974 if (wl->ssid_len == 0) {
975 wl1271_warning("Hidden SSID currently not supported for AP");
976 ret = -EINVAL;
977 goto out;
978 }
979
980 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
981 if (!cmd) {
982 ret = -ENOMEM;
983 goto out;
984 }
985
986 memcpy(cmd->bssid, bss_conf->bssid, ETH_ALEN);
987
988 cmd->aging_period = cpu_to_le16(WL1271_AP_DEF_INACTIV_SEC);
989 cmd->bss_index = WL1271_AP_BSS_INDEX;
990 cmd->global_hlid = WL1271_AP_GLOBAL_HLID;
991 cmd->broadcast_hlid = WL1271_AP_BROADCAST_HLID;
992 cmd->basic_rate_set = cpu_to_le32(wl->basic_rate_set);
993 cmd->beacon_interval = cpu_to_le16(wl->beacon_int);
994 cmd->dtim_interval = bss_conf->dtim_period;
995 cmd->beacon_expiry = WL1271_AP_DEF_BEACON_EXP;
996 cmd->channel = wl->channel;
997 cmd->ssid_len = wl->ssid_len;
998 cmd->ssid_type = SSID_TYPE_PUBLIC;
999 memcpy(cmd->ssid, wl->ssid, wl->ssid_len);
1000
1001 switch (wl->band) {
1002 case IEEE80211_BAND_2GHZ:
1003 cmd->band = RADIO_BAND_2_4GHZ;
1004 break;
1005 case IEEE80211_BAND_5GHZ:
1006 cmd->band = RADIO_BAND_5GHZ;
1007 break;
1008 default:
1009 wl1271_warning("bss start - unknown band: %d", (int)wl->band);
1010 cmd->band = RADIO_BAND_2_4GHZ;
1011 break;
1012 }
1013
1014 ret = wl1271_cmd_send(wl, CMD_BSS_START, cmd, sizeof(*cmd), 0);
1015 if (ret < 0) {
1016 wl1271_error("failed to initiate cmd start bss");
1017 goto out_free;
1018 }
1019
1020out_free:
1021 kfree(cmd);
1022
1023out:
1024 return ret;
1025}
1026
1027int wl1271_cmd_stop_bss(struct wl1271 *wl)
1028{
1029 struct wl1271_cmd_bss_start *cmd;
1030 int ret;
1031
1032 wl1271_debug(DEBUG_CMD, "cmd stop bss");
1033
1034 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1035 if (!cmd) {
1036 ret = -ENOMEM;
1037 goto out;
1038 }
1039
1040 cmd->bss_index = WL1271_AP_BSS_INDEX;
1041
1042 ret = wl1271_cmd_send(wl, CMD_BSS_STOP, cmd, sizeof(*cmd), 0);
1043 if (ret < 0) {
1044 wl1271_error("failed to initiate cmd stop bss");
1045 goto out_free;
1046 }
1047
1048out_free:
1049 kfree(cmd);
1050
1051out:
1052 return ret;
1053}
1054
1055int wl1271_cmd_add_sta(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
1056{
1057 struct wl1271_cmd_add_sta *cmd;
1058 int ret;
1059
1060 wl1271_debug(DEBUG_CMD, "cmd add sta %d", (int)hlid);
1061
1062 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1063 if (!cmd) {
1064 ret = -ENOMEM;
1065 goto out;
1066 }
1067
1068 /* currently we don't support UAPSD */
1069 cmd->sp_len = 0;
1070
1071 memcpy(cmd->addr, sta->addr, ETH_ALEN);
1072 cmd->bss_index = WL1271_AP_BSS_INDEX;
1073 cmd->aid = sta->aid;
1074 cmd->hlid = hlid;
1075
1076 /*
1077 * FIXME: Does STA support QOS? We need to propagate this info from
1078 * hostapd. Currently not that important since this is only used for
1079 * sending the correct flavor of null-data packet in response to a
1080 * trigger.
1081 */
1082 cmd->wmm = 0;
1083
1084 cmd->supported_rates = cpu_to_le32(wl1271_tx_enabled_rates_get(wl,
1085 sta->supp_rates[wl->band]));
1086
1087 wl1271_debug(DEBUG_CMD, "new sta rates: 0x%x", cmd->supported_rates);
1088
1089 ret = wl1271_cmd_send(wl, CMD_ADD_STA, cmd, sizeof(*cmd), 0);
1090 if (ret < 0) {
1091 wl1271_error("failed to initiate cmd add sta");
1092 goto out_free;
1093 }
1094
1095out_free:
1096 kfree(cmd);
1097
1098out:
1099 return ret;
1100}
1101
1102int wl1271_cmd_remove_sta(struct wl1271 *wl, u8 hlid)
1103{
1104 struct wl1271_cmd_remove_sta *cmd;
1105 int ret;
1106
1107 wl1271_debug(DEBUG_CMD, "cmd remove sta %d", (int)hlid);
1108
1109 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1110 if (!cmd) {
1111 ret = -ENOMEM;
1112 goto out;
1113 }
1114
1115 cmd->hlid = hlid;
1116 /* We never send a deauth, mac80211 is in charge of this */
1117 cmd->reason_opcode = 0;
1118 cmd->send_deauth_flag = 0;
1119
1120 ret = wl1271_cmd_send(wl, CMD_REMOVE_STA, cmd, sizeof(*cmd), 0);
1121 if (ret < 0) {
1122 wl1271_error("failed to initiate cmd remove sta");
1123 goto out_free;
1124 }
1125
1126 /*
1127 * We are ok with a timeout here. The event is sometimes not sent
1128 * due to a firmware bug.
1129 */
1130 wl1271_cmd_wait_for_event_or_timeout(wl, STA_REMOVE_COMPLETE_EVENT_ID);
1131
1132out_free:
1133 kfree(cmd);
1134
1135out:
1136 return ret;
1137}
diff --git a/drivers/net/wireless/wl12xx/cmd.h b/drivers/net/wireless/wl12xx/cmd.h
index 2a1d9db7ceb8..54c12e71417e 100644
--- a/drivers/net/wireless/wl12xx/cmd.h
+++ b/drivers/net/wireless/wl12xx/cmd.h
@@ -39,7 +39,7 @@ int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
39int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len); 39int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
40int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len); 40int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
41int wl1271_cmd_data_path(struct wl1271 *wl, bool enable); 41int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
42int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, u32 rates, bool send); 42int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode);
43int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer, 43int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
44 size_t len); 44 size_t len);
45int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id, 45int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
@@ -54,12 +54,20 @@ struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
54int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr); 54int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr);
55int wl1271_build_qos_null_data(struct wl1271 *wl); 55int wl1271_build_qos_null_data(struct wl1271 *wl);
56int wl1271_cmd_build_klv_null_data(struct wl1271 *wl); 56int wl1271_cmd_build_klv_null_data(struct wl1271 *wl);
57int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id); 57int wl1271_cmd_set_sta_default_wep_key(struct wl1271 *wl, u8 id);
58int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, 58int wl1271_cmd_set_ap_default_wep_key(struct wl1271 *wl, u8 id);
59 u8 key_size, const u8 *key, const u8 *addr, 59int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
60 u32 tx_seq_32, u16 tx_seq_16); 60 u8 key_size, const u8 *key, const u8 *addr,
61 u32 tx_seq_32, u16 tx_seq_16);
62int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
63 u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
64 u16 tx_seq_16);
61int wl1271_cmd_disconnect(struct wl1271 *wl); 65int wl1271_cmd_disconnect(struct wl1271 *wl);
62int wl1271_cmd_set_sta_state(struct wl1271 *wl); 66int wl1271_cmd_set_sta_state(struct wl1271 *wl);
67int wl1271_cmd_start_bss(struct wl1271 *wl);
68int wl1271_cmd_stop_bss(struct wl1271 *wl);
69int wl1271_cmd_add_sta(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid);
70int wl1271_cmd_remove_sta(struct wl1271 *wl, u8 hlid);
63 71
64enum wl1271_commands { 72enum wl1271_commands {
65 CMD_INTERROGATE = 1, /*use this to read information elements*/ 73 CMD_INTERROGATE = 1, /*use this to read information elements*/
@@ -98,6 +106,12 @@ enum wl1271_commands {
98 CMD_STOP_PERIODIC_SCAN = 51, 106 CMD_STOP_PERIODIC_SCAN = 51,
99 CMD_SET_STA_STATE = 52, 107 CMD_SET_STA_STATE = 52,
100 108
109 /* AP mode commands */
110 CMD_BSS_START = 60,
111 CMD_BSS_STOP = 61,
112 CMD_ADD_STA = 62,
113 CMD_REMOVE_STA = 63,
114
101 NUM_COMMANDS, 115 NUM_COMMANDS,
102 MAX_COMMAND_ID = 0xFFFF, 116 MAX_COMMAND_ID = 0xFFFF,
103}; 117};
@@ -126,6 +140,14 @@ enum cmd_templ {
126 * For CTS-to-self (FastCTS) mechanism 140 * For CTS-to-self (FastCTS) mechanism
127 * for BT/WLAN coexistence (SoftGemini). */ 141 * for BT/WLAN coexistence (SoftGemini). */
128 CMD_TEMPL_ARP_RSP, 142 CMD_TEMPL_ARP_RSP,
143 CMD_TEMPL_LINK_MEASUREMENT_REPORT,
144
145 /* AP-mode specific */
146 CMD_TEMPL_AP_BEACON = 13,
147 CMD_TEMPL_AP_PROBE_RESPONSE,
148 CMD_TEMPL_AP_ARP_RSP,
149 CMD_TEMPL_DEAUTH_AP,
150
129 CMD_TEMPL_MAX = 0xff 151 CMD_TEMPL_MAX = 0xff
130}; 152};
131 153
@@ -195,6 +217,7 @@ struct wl1271_cmd_join {
195 * ACK or CTS frames). 217 * ACK or CTS frames).
196 */ 218 */
197 __le32 basic_rate_set; 219 __le32 basic_rate_set;
220 __le32 supported_rate_set;
198 u8 dtim_interval; 221 u8 dtim_interval;
199 /* 222 /*
200 * bits 0-2: This bitwise field specifies the type 223 * bits 0-2: This bitwise field specifies the type
@@ -257,20 +280,11 @@ struct wl1271_cmd_ps_params {
257 struct wl1271_cmd_header header; 280 struct wl1271_cmd_header header;
258 281
259 u8 ps_mode; /* STATION_* */ 282 u8 ps_mode; /* STATION_* */
260 u8 send_null_data; /* Do we have to send NULL data packet ? */ 283 u8 padding[3];
261 u8 retries; /* Number of retires for the initial NULL data packet */
262
263 /*
264 * TUs during which the target stays awake after switching
265 * to power save mode.
266 */
267 u8 hang_over_period;
268 __le32 null_data_rate;
269} __packed; 284} __packed;
270 285
271/* HW encryption keys */ 286/* HW encryption keys */
272#define NUM_ACCESS_CATEGORIES_COPY 4 287#define NUM_ACCESS_CATEGORIES_COPY 4
273#define MAX_KEY_SIZE 32
274 288
275enum wl1271_cmd_key_action { 289enum wl1271_cmd_key_action {
276 KEY_ADD_OR_REPLACE = 1, 290 KEY_ADD_OR_REPLACE = 1,
@@ -289,7 +303,7 @@ enum wl1271_cmd_key_type {
289 303
290/* FIXME: Add description for key-types */ 304/* FIXME: Add description for key-types */
291 305
292struct wl1271_cmd_set_keys { 306struct wl1271_cmd_set_sta_keys {
293 struct wl1271_cmd_header header; 307 struct wl1271_cmd_header header;
294 308
295 /* Ignored for default WEP key */ 309 /* Ignored for default WEP key */
@@ -318,6 +332,57 @@ struct wl1271_cmd_set_keys {
318 __le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY]; 332 __le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
319} __packed; 333} __packed;
320 334
335enum wl1271_cmd_lid_key_type {
336 UNICAST_LID_TYPE = 0,
337 BROADCAST_LID_TYPE = 1,
338 WEP_DEFAULT_LID_TYPE = 2
339};
340
341struct wl1271_cmd_set_ap_keys {
342 struct wl1271_cmd_header header;
343
344 /*
345 * Indicates whether the HLID is a unicast key set
346 * or broadcast key set. A special value 0xFF is
347 * used to indicate that the HLID is on WEP-default
348 * (multi-hlids). of type wl1271_cmd_lid_key_type.
349 */
350 u8 hlid;
351
352 /*
353 * In WEP-default network (hlid == 0xFF) used to
354 * indicate which network STA/IBSS/AP role should be
355 * changed
356 */
357 u8 lid_key_type;
358
359 /*
360 * Key ID - For TKIP and AES key types, this field
361 * indicates the value that should be inserted into
362 * the KeyID field of frames transmitted using this
363 * key entry. For broadcast keys the index use as a
364 * marker for TX/RX key.
365 * For WEP default network (HLID=0xFF), this field
366 * indicates the ID of the key to add or remove.
367 */
368 u8 key_id;
369 u8 reserved_1;
370
371 /* key_action_e */
372 __le16 key_action;
373
374 /* key size in bytes */
375 u8 key_size;
376
377 /* key_type_e */
378 u8 key_type;
379
380 /* This field holds the security key data to add to the STA table */
381 u8 key[MAX_KEY_SIZE];
382 __le16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY];
383 __le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
384} __packed;
385
321struct wl1271_cmd_test_header { 386struct wl1271_cmd_test_header {
322 u8 id; 387 u8 id;
323 u8 padding[3]; 388 u8 padding[3];
@@ -412,4 +477,68 @@ struct wl1271_cmd_set_sta_state {
412 u8 padding[3]; 477 u8 padding[3];
413} __packed; 478} __packed;
414 479
480enum wl1271_ssid_type {
481 SSID_TYPE_PUBLIC = 0,
482 SSID_TYPE_HIDDEN = 1
483};
484
485struct wl1271_cmd_bss_start {
486 struct wl1271_cmd_header header;
487
488 /* wl1271_ssid_type */
489 u8 ssid_type;
490 u8 ssid_len;
491 u8 ssid[IW_ESSID_MAX_SIZE];
492 u8 padding_1[2];
493
494 /* Basic rate set */
495 __le32 basic_rate_set;
496 /* Aging period in seconds*/
497 __le16 aging_period;
498
499 /*
500 * This field specifies the time between target beacon
501 * transmission times (TBTTs), in time units (TUs).
502 * Valid values are 1 to 1024.
503 */
504 __le16 beacon_interval;
505 u8 bssid[ETH_ALEN];
506 u8 bss_index;
507 /* Radio band */
508 u8 band;
509 u8 channel;
510 /* The host link id for the AP's global queue */
511 u8 global_hlid;
512 /* The host link id for the AP's broadcast queue */
513 u8 broadcast_hlid;
514 /* DTIM count */
515 u8 dtim_interval;
516 /* Beacon expiry time in ms */
517 u8 beacon_expiry;
518 u8 padding_2[3];
519} __packed;
520
521struct wl1271_cmd_add_sta {
522 struct wl1271_cmd_header header;
523
524 u8 addr[ETH_ALEN];
525 u8 hlid;
526 u8 aid;
527 u8 psd_type[NUM_ACCESS_CATEGORIES_COPY];
528 __le32 supported_rates;
529 u8 bss_index;
530 u8 sp_len;
531 u8 wmm;
532 u8 padding1;
533} __packed;
534
535struct wl1271_cmd_remove_sta {
536 struct wl1271_cmd_header header;
537
538 u8 hlid;
539 u8 reason_opcode;
540 u8 send_deauth_flag;
541 u8 padding1;
542} __packed;
543
415#endif /* __WL1271_CMD_H__ */ 544#endif /* __WL1271_CMD_H__ */
diff --git a/drivers/net/wireless/wl12xx/conf.h b/drivers/net/wireless/wl12xx/conf.h
index a16b3616e430..856a8a2fff4f 100644
--- a/drivers/net/wireless/wl12xx/conf.h
+++ b/drivers/net/wireless/wl12xx/conf.h
@@ -496,6 +496,26 @@ struct conf_rx_settings {
496 CONF_HW_BIT_RATE_2MBPS) 496 CONF_HW_BIT_RATE_2MBPS)
497#define CONF_TX_RATE_RETRY_LIMIT 10 497#define CONF_TX_RATE_RETRY_LIMIT 10
498 498
499/*
500 * Rates supported for data packets when operating as AP. Note the absense
501 * of the 22Mbps rate. There is a FW limitation on 12 rates so we must drop
502 * one. The rate dropped is not mandatory under any operating mode.
503 */
504#define CONF_TX_AP_ENABLED_RATES (CONF_HW_BIT_RATE_1MBPS | \
505 CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS | \
506 CONF_HW_BIT_RATE_6MBPS | CONF_HW_BIT_RATE_9MBPS | \
507 CONF_HW_BIT_RATE_11MBPS | CONF_HW_BIT_RATE_12MBPS | \
508 CONF_HW_BIT_RATE_18MBPS | CONF_HW_BIT_RATE_24MBPS | \
509 CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \
510 CONF_HW_BIT_RATE_54MBPS)
511
512/*
513 * Default rates for management traffic when operating in AP mode. This
514 * should be configured according to the basic rate set of the AP
515 */
516#define CONF_TX_AP_DEFAULT_MGMT_RATES (CONF_HW_BIT_RATE_1MBPS | \
517 CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS)
518
499struct conf_tx_rate_class { 519struct conf_tx_rate_class {
500 520
501 /* 521 /*
@@ -636,9 +656,9 @@ struct conf_tx_settings {
636 656
637 /* 657 /*
638 * Configuration for rate classes for TX (currently only one 658 * Configuration for rate classes for TX (currently only one
639 * rate class supported.) 659 * rate class supported). Used in non-AP mode.
640 */ 660 */
641 struct conf_tx_rate_class rc_conf; 661 struct conf_tx_rate_class sta_rc_conf;
642 662
643 /* 663 /*
644 * Configuration for access categories for TX rate control. 664 * Configuration for access categories for TX rate control.
@@ -647,6 +667,28 @@ struct conf_tx_settings {
647 struct conf_tx_ac_category ac_conf[CONF_TX_MAX_AC_COUNT]; 667 struct conf_tx_ac_category ac_conf[CONF_TX_MAX_AC_COUNT];
648 668
649 /* 669 /*
670 * Configuration for rate classes in AP-mode. These rate classes
671 * are for the AC TX queues
672 */
673 struct conf_tx_rate_class ap_rc_conf[CONF_TX_MAX_AC_COUNT];
674
675 /*
676 * Management TX rate class for AP-mode.
677 */
678 struct conf_tx_rate_class ap_mgmt_conf;
679
680 /*
681 * Broadcast TX rate class for AP-mode.
682 */
683 struct conf_tx_rate_class ap_bcst_conf;
684
685 /*
686 * AP-mode - allow this number of TX retries to a station before an
687 * event is triggered from FW.
688 */
689 u16 ap_max_tx_retries;
690
691 /*
650 * Configuration for TID parameters. 692 * Configuration for TID parameters.
651 */ 693 */
652 u8 tid_conf_count; 694 u8 tid_conf_count;
@@ -687,6 +729,12 @@ struct conf_tx_settings {
687 * Range: CONF_HW_BIT_RATE_* bit mask 729 * Range: CONF_HW_BIT_RATE_* bit mask
688 */ 730 */
689 u32 basic_rate_5; 731 u32 basic_rate_5;
732
733 /*
734 * TX retry limits for templates
735 */
736 u8 tmpl_short_retry_limit;
737 u8 tmpl_long_retry_limit;
690}; 738};
691 739
692enum { 740enum {
@@ -912,6 +960,14 @@ struct conf_conn_settings {
912 u8 psm_entry_retries; 960 u8 psm_entry_retries;
913 961
914 /* 962 /*
963 * Specifies the maximum number of times to try PSM exit if it fails
964 * (if sending the appropriate null-func message fails.)
965 *
966 * Range 0 - 255
967 */
968 u8 psm_exit_retries;
969
970 /*
915 * Specifies the maximum number of times to try transmit the PSM entry 971 * Specifies the maximum number of times to try transmit the PSM entry
916 * null-func frame for each PSM entry attempt 972 * null-func frame for each PSM entry attempt
917 * 973 *
@@ -1036,30 +1092,30 @@ struct conf_scan_settings {
1036 /* 1092 /*
1037 * The minimum time to wait on each channel for active scans 1093 * The minimum time to wait on each channel for active scans
1038 * 1094 *
1039 * Range: 0 - 65536 tu 1095 * Range: u32 tu/1000
1040 */ 1096 */
1041 u16 min_dwell_time_active; 1097 u32 min_dwell_time_active;
1042 1098
1043 /* 1099 /*
1044 * The maximum time to wait on each channel for active scans 1100 * The maximum time to wait on each channel for active scans
1045 * 1101 *
1046 * Range: 0 - 65536 tu 1102 * Range: u32 tu/1000
1047 */ 1103 */
1048 u16 max_dwell_time_active; 1104 u32 max_dwell_time_active;
1049 1105
1050 /* 1106 /*
1051 * The maximum time to wait on each channel for passive scans 1107 * The minimum time to wait on each channel for passive scans
1052 * 1108 *
1053 * Range: 0 - 65536 tu 1109 * Range: u32 tu/1000
1054 */ 1110 */
1055 u16 min_dwell_time_passive; 1111 u32 min_dwell_time_passive;
1056 1112
1057 /* 1113 /*
1058 * The maximum time to wait on each channel for passive scans 1114 * The maximum time to wait on each channel for passive scans
1059 * 1115 *
1060 * Range: 0 - 65536 tu 1116 * Range: u32 tu/1000
1061 */ 1117 */
1062 u16 max_dwell_time_passive; 1118 u32 max_dwell_time_passive;
1063 1119
1064 /* 1120 /*
1065 * Number of probe requests to transmit on each active scan channel 1121 * Number of probe requests to transmit on each active scan channel
@@ -1090,6 +1146,51 @@ struct conf_rf_settings {
1090 u8 tx_per_channel_power_compensation_5[CONF_TX_PWR_COMPENSATION_LEN_5]; 1146 u8 tx_per_channel_power_compensation_5[CONF_TX_PWR_COMPENSATION_LEN_5];
1091}; 1147};
1092 1148
1149struct conf_ht_setting {
1150 u16 tx_ba_win_size;
1151 u16 inactivity_timeout;
1152};
1153
1154struct conf_memory_settings {
1155 /* Number of stations supported in IBSS mode */
1156 u8 num_stations;
1157
1158 /* Number of ssid profiles used in IBSS mode */
1159 u8 ssid_profiles;
1160
1161 /* Number of memory buffers allocated to rx pool */
1162 u8 rx_block_num;
1163
1164 /* Minimum number of blocks allocated to tx pool */
1165 u8 tx_min_block_num;
1166
1167 /* Disable/Enable dynamic memory */
1168 u8 dynamic_memory;
1169
1170 /*
1171 * Minimum required free tx memory blocks in order to assure optimum
1172 * performence
1173 *
1174 * Range: 0-120
1175 */
1176 u8 min_req_tx_blocks;
1177
1178 /*
1179 * Minimum required free rx memory blocks in order to assure optimum
1180 * performence
1181 *
1182 * Range: 0-120
1183 */
1184 u8 min_req_rx_blocks;
1185
1186 /*
1187 * Minimum number of mem blocks (free+used) guaranteed for TX
1188 *
1189 * Range: 0-120
1190 */
1191 u8 tx_min;
1192};
1193
1093struct conf_drv_settings { 1194struct conf_drv_settings {
1094 struct conf_sg_settings sg; 1195 struct conf_sg_settings sg;
1095 struct conf_rx_settings rx; 1196 struct conf_rx_settings rx;
@@ -1100,6 +1201,8 @@ struct conf_drv_settings {
1100 struct conf_roam_trigger_settings roam_trigger; 1201 struct conf_roam_trigger_settings roam_trigger;
1101 struct conf_scan_settings scan; 1202 struct conf_scan_settings scan;
1102 struct conf_rf_settings rf; 1203 struct conf_rf_settings rf;
1204 struct conf_ht_setting ht;
1205 struct conf_memory_settings mem;
1103}; 1206};
1104 1207
1105#endif 1208#endif
diff --git a/drivers/net/wireless/wl12xx/debugfs.c b/drivers/net/wireless/wl12xx/debugfs.c
index ec6077760157..8e75b09723b9 100644
--- a/drivers/net/wireless/wl12xx/debugfs.c
+++ b/drivers/net/wireless/wl12xx/debugfs.c
@@ -99,7 +99,7 @@ static void wl1271_debugfs_update_stats(struct wl1271 *wl)
99 99
100 mutex_lock(&wl->mutex); 100 mutex_lock(&wl->mutex);
101 101
102 ret = wl1271_ps_elp_wakeup(wl, false); 102 ret = wl1271_ps_elp_wakeup(wl);
103 if (ret < 0) 103 if (ret < 0)
104 goto out; 104 goto out;
105 105
@@ -261,27 +261,25 @@ static ssize_t gpio_power_write(struct file *file,
261 unsigned long value; 261 unsigned long value;
262 int ret; 262 int ret;
263 263
264 mutex_lock(&wl->mutex);
265
266 len = min(count, sizeof(buf) - 1); 264 len = min(count, sizeof(buf) - 1);
267 if (copy_from_user(buf, user_buf, len)) { 265 if (copy_from_user(buf, user_buf, len)) {
268 ret = -EFAULT; 266 return -EFAULT;
269 goto out;
270 } 267 }
271 buf[len] = '\0'; 268 buf[len] = '\0';
272 269
273 ret = strict_strtoul(buf, 0, &value); 270 ret = strict_strtoul(buf, 0, &value);
274 if (ret < 0) { 271 if (ret < 0) {
275 wl1271_warning("illegal value in gpio_power"); 272 wl1271_warning("illegal value in gpio_power");
276 goto out; 273 return -EINVAL;
277 } 274 }
278 275
276 mutex_lock(&wl->mutex);
277
279 if (value) 278 if (value)
280 wl1271_power_on(wl); 279 wl1271_power_on(wl);
281 else 280 else
282 wl1271_power_off(wl); 281 wl1271_power_off(wl);
283 282
284out:
285 mutex_unlock(&wl->mutex); 283 mutex_unlock(&wl->mutex);
286 return count; 284 return count;
287} 285}
@@ -293,12 +291,13 @@ static const struct file_operations gpio_power_ops = {
293 .llseek = default_llseek, 291 .llseek = default_llseek,
294}; 292};
295 293
296static int wl1271_debugfs_add_files(struct wl1271 *wl) 294static int wl1271_debugfs_add_files(struct wl1271 *wl,
295 struct dentry *rootdir)
297{ 296{
298 int ret = 0; 297 int ret = 0;
299 struct dentry *entry, *stats; 298 struct dentry *entry, *stats;
300 299
301 stats = debugfs_create_dir("fw-statistics", wl->rootdir); 300 stats = debugfs_create_dir("fw-statistics", rootdir);
302 if (!stats || IS_ERR(stats)) { 301 if (!stats || IS_ERR(stats)) {
303 entry = stats; 302 entry = stats;
304 goto err; 303 goto err;
@@ -395,16 +394,11 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl)
395 DEBUGFS_FWSTATS_ADD(rxpipe, missed_beacon_host_int_trig_rx_data); 394 DEBUGFS_FWSTATS_ADD(rxpipe, missed_beacon_host_int_trig_rx_data);
396 DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data); 395 DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data);
397 396
398 DEBUGFS_ADD(tx_queue_len, wl->rootdir); 397 DEBUGFS_ADD(tx_queue_len, rootdir);
399 DEBUGFS_ADD(retry_count, wl->rootdir); 398 DEBUGFS_ADD(retry_count, rootdir);
400 DEBUGFS_ADD(excessive_retries, wl->rootdir); 399 DEBUGFS_ADD(excessive_retries, rootdir);
401
402 DEBUGFS_ADD(gpio_power, wl->rootdir);
403 400
404 entry = debugfs_create_x32("debug_level", 0600, wl->rootdir, 401 DEBUGFS_ADD(gpio_power, rootdir);
405 &wl12xx_debug_level);
406 if (!entry || IS_ERR(entry))
407 goto err;
408 402
409 return 0; 403 return 0;
410 404
@@ -419,7 +413,7 @@ err:
419 413
420void wl1271_debugfs_reset(struct wl1271 *wl) 414void wl1271_debugfs_reset(struct wl1271 *wl)
421{ 415{
422 if (!wl->rootdir) 416 if (!wl->stats.fw_stats)
423 return; 417 return;
424 418
425 memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats)); 419 memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats));
@@ -430,13 +424,13 @@ void wl1271_debugfs_reset(struct wl1271 *wl)
430int wl1271_debugfs_init(struct wl1271 *wl) 424int wl1271_debugfs_init(struct wl1271 *wl)
431{ 425{
432 int ret; 426 int ret;
427 struct dentry *rootdir;
433 428
434 wl->rootdir = debugfs_create_dir(KBUILD_MODNAME, 429 rootdir = debugfs_create_dir(KBUILD_MODNAME,
435 wl->hw->wiphy->debugfsdir); 430 wl->hw->wiphy->debugfsdir);
436 431
437 if (IS_ERR(wl->rootdir)) { 432 if (IS_ERR(rootdir)) {
438 ret = PTR_ERR(wl->rootdir); 433 ret = PTR_ERR(rootdir);
439 wl->rootdir = NULL;
440 goto err; 434 goto err;
441 } 435 }
442 436
@@ -450,7 +444,7 @@ int wl1271_debugfs_init(struct wl1271 *wl)
450 444
451 wl->stats.fw_stats_update = jiffies; 445 wl->stats.fw_stats_update = jiffies;
452 446
453 ret = wl1271_debugfs_add_files(wl); 447 ret = wl1271_debugfs_add_files(wl, rootdir);
454 448
455 if (ret < 0) 449 if (ret < 0)
456 goto err_file; 450 goto err_file;
@@ -462,8 +456,7 @@ err_file:
462 wl->stats.fw_stats = NULL; 456 wl->stats.fw_stats = NULL;
463 457
464err_fw: 458err_fw:
465 debugfs_remove_recursive(wl->rootdir); 459 debugfs_remove_recursive(rootdir);
466 wl->rootdir = NULL;
467 460
468err: 461err:
469 return ret; 462 return ret;
@@ -473,8 +466,4 @@ void wl1271_debugfs_exit(struct wl1271 *wl)
473{ 466{
474 kfree(wl->stats.fw_stats); 467 kfree(wl->stats.fw_stats);
475 wl->stats.fw_stats = NULL; 468 wl->stats.fw_stats = NULL;
476
477 debugfs_remove_recursive(wl->rootdir);
478 wl->rootdir = NULL;
479
480} 469}
diff --git a/drivers/net/wireless/wl12xx/event.c b/drivers/net/wireless/wl12xx/event.c
index f9146f5242fb..1b170c5cc595 100644
--- a/drivers/net/wireless/wl12xx/event.c
+++ b/drivers/net/wireless/wl12xx/event.c
@@ -135,20 +135,6 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
135 /* go to extremely low power mode */ 135 /* go to extremely low power mode */
136 wl1271_ps_elp_sleep(wl); 136 wl1271_ps_elp_sleep(wl);
137 break; 137 break;
138 case EVENT_EXIT_POWER_SAVE_FAIL:
139 wl1271_debug(DEBUG_PSM, "PSM exit failed");
140
141 if (test_bit(WL1271_FLAG_PSM, &wl->flags)) {
142 wl->psm_entry_retry = 0;
143 break;
144 }
145
146 /* make sure the firmware goes to active mode - the frame to
147 be sent next will indicate to the AP, that we are active. */
148 ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
149 wl->basic_rate, false);
150 break;
151 case EVENT_EXIT_POWER_SAVE_SUCCESS:
152 default: 138 default:
153 break; 139 break;
154 } 140 }
@@ -186,6 +172,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
186 int ret; 172 int ret;
187 u32 vector; 173 u32 vector;
188 bool beacon_loss = false; 174 bool beacon_loss = false;
175 bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
189 176
190 wl1271_event_mbox_dump(mbox); 177 wl1271_event_mbox_dump(mbox);
191 178
@@ -218,21 +205,21 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
218 * BSS_LOSE_EVENT, beacon loss has to be reported to the stack. 205 * BSS_LOSE_EVENT, beacon loss has to be reported to the stack.
219 * 206 *
220 */ 207 */
221 if (vector & BSS_LOSE_EVENT_ID) { 208 if ((vector & BSS_LOSE_EVENT_ID) && !is_ap) {
222 wl1271_info("Beacon loss detected."); 209 wl1271_info("Beacon loss detected.");
223 210
224 /* indicate to the stack, that beacons have been lost */ 211 /* indicate to the stack, that beacons have been lost */
225 beacon_loss = true; 212 beacon_loss = true;
226 } 213 }
227 214
228 if (vector & PS_REPORT_EVENT_ID) { 215 if ((vector & PS_REPORT_EVENT_ID) && !is_ap) {
229 wl1271_debug(DEBUG_EVENT, "PS_REPORT_EVENT"); 216 wl1271_debug(DEBUG_EVENT, "PS_REPORT_EVENT");
230 ret = wl1271_event_ps_report(wl, mbox, &beacon_loss); 217 ret = wl1271_event_ps_report(wl, mbox, &beacon_loss);
231 if (ret < 0) 218 if (ret < 0)
232 return ret; 219 return ret;
233 } 220 }
234 221
235 if (vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID) 222 if ((vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID) && !is_ap)
236 wl1271_event_pspoll_delivery_fail(wl); 223 wl1271_event_pspoll_delivery_fail(wl);
237 224
238 if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) { 225 if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
diff --git a/drivers/net/wireless/wl12xx/event.h b/drivers/net/wireless/wl12xx/event.h
index 6cce0143adb5..0e80886f3031 100644
--- a/drivers/net/wireless/wl12xx/event.h
+++ b/drivers/net/wireless/wl12xx/event.h
@@ -59,6 +59,7 @@ enum {
59 BSS_LOSE_EVENT_ID = BIT(18), 59 BSS_LOSE_EVENT_ID = BIT(18),
60 REGAINED_BSS_EVENT_ID = BIT(19), 60 REGAINED_BSS_EVENT_ID = BIT(19),
61 ROAMING_TRIGGER_MAX_TX_RETRY_EVENT_ID = BIT(20), 61 ROAMING_TRIGGER_MAX_TX_RETRY_EVENT_ID = BIT(20),
62 STA_REMOVE_COMPLETE_EVENT_ID = BIT(21), /* AP */
62 SOFT_GEMINI_SENSE_EVENT_ID = BIT(22), 63 SOFT_GEMINI_SENSE_EVENT_ID = BIT(22),
63 SOFT_GEMINI_PREDICTION_EVENT_ID = BIT(23), 64 SOFT_GEMINI_PREDICTION_EVENT_ID = BIT(23),
64 SOFT_GEMINI_AVALANCHE_EVENT_ID = BIT(24), 65 SOFT_GEMINI_AVALANCHE_EVENT_ID = BIT(24),
@@ -74,8 +75,6 @@ enum {
74enum { 75enum {
75 EVENT_ENTER_POWER_SAVE_FAIL = 0, 76 EVENT_ENTER_POWER_SAVE_FAIL = 0,
76 EVENT_ENTER_POWER_SAVE_SUCCESS, 77 EVENT_ENTER_POWER_SAVE_SUCCESS,
77 EVENT_EXIT_POWER_SAVE_FAIL,
78 EVENT_EXIT_POWER_SAVE_SUCCESS,
79}; 78};
80 79
81struct event_debug_report { 80struct event_debug_report {
@@ -115,7 +114,12 @@ struct event_mailbox {
115 u8 scheduled_scan_status; 114 u8 scheduled_scan_status;
116 u8 ps_status; 115 u8 ps_status;
117 116
118 u8 reserved_5[29]; 117 /* AP FW only */
118 u8 hlid_removed;
119 __le16 sta_aging_status;
120 __le16 sta_tx_retry_exceeded;
121
122 u8 reserved_5[24];
119} __packed; 123} __packed;
120 124
121int wl1271_event_unmask(struct wl1271 *wl); 125int wl1271_event_unmask(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/init.c b/drivers/net/wireless/wl12xx/init.c
index 785a5304bfc4..6072fe457135 100644
--- a/drivers/net/wireless/wl12xx/init.c
+++ b/drivers/net/wireless/wl12xx/init.c
@@ -30,27 +30,9 @@
30#include "acx.h" 30#include "acx.h"
31#include "cmd.h" 31#include "cmd.h"
32#include "reg.h" 32#include "reg.h"
33#include "tx.h"
33 34
34static int wl1271_init_hwenc_config(struct wl1271 *wl) 35int wl1271_sta_init_templates_config(struct wl1271 *wl)
35{
36 int ret;
37
38 ret = wl1271_acx_feature_cfg(wl);
39 if (ret < 0) {
40 wl1271_warning("couldn't set feature config");
41 return ret;
42 }
43
44 ret = wl1271_cmd_set_default_wep_key(wl, wl->default_key);
45 if (ret < 0) {
46 wl1271_warning("couldn't set default key");
47 return ret;
48 }
49
50 return 0;
51}
52
53int wl1271_init_templates_config(struct wl1271 *wl)
54{ 36{
55 int ret, i; 37 int ret, i;
56 38
@@ -118,6 +100,132 @@ int wl1271_init_templates_config(struct wl1271 *wl)
118 return 0; 100 return 0;
119} 101}
120 102
103static int wl1271_ap_init_deauth_template(struct wl1271 *wl)
104{
105 struct wl12xx_disconn_template *tmpl;
106 int ret;
107
108 tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
109 if (!tmpl) {
110 ret = -ENOMEM;
111 goto out;
112 }
113
114 tmpl->header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT |
115 IEEE80211_STYPE_DEAUTH);
116
117 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP,
118 tmpl, sizeof(*tmpl), 0,
119 wl1271_tx_min_rate_get(wl));
120
121out:
122 kfree(tmpl);
123 return ret;
124}
125
126static int wl1271_ap_init_null_template(struct wl1271 *wl)
127{
128 struct ieee80211_hdr_3addr *nullfunc;
129 int ret;
130
131 nullfunc = kzalloc(sizeof(*nullfunc), GFP_KERNEL);
132 if (!nullfunc) {
133 ret = -ENOMEM;
134 goto out;
135 }
136
137 nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
138 IEEE80211_STYPE_NULLFUNC |
139 IEEE80211_FCTL_FROMDS);
140
141 /* nullfunc->addr1 is filled by FW */
142
143 memcpy(nullfunc->addr2, wl->mac_addr, ETH_ALEN);
144 memcpy(nullfunc->addr3, wl->mac_addr, ETH_ALEN);
145
146 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, nullfunc,
147 sizeof(*nullfunc), 0,
148 wl1271_tx_min_rate_get(wl));
149
150out:
151 kfree(nullfunc);
152 return ret;
153}
154
155static int wl1271_ap_init_qos_null_template(struct wl1271 *wl)
156{
157 struct ieee80211_qos_hdr *qosnull;
158 int ret;
159
160 qosnull = kzalloc(sizeof(*qosnull), GFP_KERNEL);
161 if (!qosnull) {
162 ret = -ENOMEM;
163 goto out;
164 }
165
166 qosnull->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
167 IEEE80211_STYPE_QOS_NULLFUNC |
168 IEEE80211_FCTL_FROMDS);
169
170 /* qosnull->addr1 is filled by FW */
171
172 memcpy(qosnull->addr2, wl->mac_addr, ETH_ALEN);
173 memcpy(qosnull->addr3, wl->mac_addr, ETH_ALEN);
174
175 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, qosnull,
176 sizeof(*qosnull), 0,
177 wl1271_tx_min_rate_get(wl));
178
179out:
180 kfree(qosnull);
181 return ret;
182}
183
184static int wl1271_ap_init_templates_config(struct wl1271 *wl)
185{
186 int ret;
187
188 /*
189 * Put very large empty placeholders for all templates. These
190 * reserve memory for later.
191 */
192 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL,
193 sizeof
194 (struct wl12xx_probe_resp_template),
195 0, WL1271_RATE_AUTOMATIC);
196 if (ret < 0)
197 return ret;
198
199 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL,
200 sizeof
201 (struct wl12xx_beacon_template),
202 0, WL1271_RATE_AUTOMATIC);
203 if (ret < 0)
204 return ret;
205
206 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, NULL,
207 sizeof
208 (struct wl12xx_disconn_template),
209 0, WL1271_RATE_AUTOMATIC);
210 if (ret < 0)
211 return ret;
212
213 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL,
214 sizeof(struct wl12xx_null_data_template),
215 0, WL1271_RATE_AUTOMATIC);
216 if (ret < 0)
217 return ret;
218
219 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL,
220 sizeof
221 (struct wl12xx_qos_null_data_template),
222 0, WL1271_RATE_AUTOMATIC);
223 if (ret < 0)
224 return ret;
225
226 return 0;
227}
228
121static int wl1271_init_rx_config(struct wl1271 *wl, u32 config, u32 filter) 229static int wl1271_init_rx_config(struct wl1271 *wl, u32 config, u32 filter)
122{ 230{
123 int ret; 231 int ret;
@@ -145,10 +253,6 @@ int wl1271_init_phy_config(struct wl1271 *wl)
145 if (ret < 0) 253 if (ret < 0)
146 return ret; 254 return ret;
147 255
148 ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0);
149 if (ret < 0)
150 return ret;
151
152 ret = wl1271_acx_service_period_timeout(wl); 256 ret = wl1271_acx_service_period_timeout(wl);
153 if (ret < 0) 257 if (ret < 0)
154 return ret; 258 return ret;
@@ -213,11 +317,199 @@ static int wl1271_init_beacon_broadcast(struct wl1271 *wl)
213 return 0; 317 return 0;
214} 318}
215 319
320static int wl1271_sta_hw_init(struct wl1271 *wl)
321{
322 int ret;
323
324 ret = wl1271_cmd_ext_radio_parms(wl);
325 if (ret < 0)
326 return ret;
327
328 /* PS config */
329 ret = wl1271_acx_config_ps(wl);
330 if (ret < 0)
331 return ret;
332
333 ret = wl1271_sta_init_templates_config(wl);
334 if (ret < 0)
335 return ret;
336
337 ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0);
338 if (ret < 0)
339 return ret;
340
341 /* Initialize connection monitoring thresholds */
342 ret = wl1271_acx_conn_monit_params(wl, false);
343 if (ret < 0)
344 return ret;
345
346 /* Beacon filtering */
347 ret = wl1271_init_beacon_filter(wl);
348 if (ret < 0)
349 return ret;
350
351 /* Bluetooth WLAN coexistence */
352 ret = wl1271_init_pta(wl);
353 if (ret < 0)
354 return ret;
355
356 /* Beacons and broadcast settings */
357 ret = wl1271_init_beacon_broadcast(wl);
358 if (ret < 0)
359 return ret;
360
361 /* Configure for ELP power saving */
362 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
363 if (ret < 0)
364 return ret;
365
366 /* Configure rssi/snr averaging weights */
367 ret = wl1271_acx_rssi_snr_avg_weights(wl);
368 if (ret < 0)
369 return ret;
370
371 ret = wl1271_acx_sta_rate_policies(wl);
372 if (ret < 0)
373 return ret;
374
375 ret = wl1271_acx_sta_mem_cfg(wl);
376 if (ret < 0)
377 return ret;
378
379 return 0;
380}
381
382static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl)
383{
384 int ret, i;
385
386 ret = wl1271_cmd_set_sta_default_wep_key(wl, wl->default_key);
387 if (ret < 0) {
388 wl1271_warning("couldn't set default key");
389 return ret;
390 }
391
392 /* disable all keep-alive templates */
393 for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
394 ret = wl1271_acx_keep_alive_config(wl, i,
395 ACX_KEEP_ALIVE_TPL_INVALID);
396 if (ret < 0)
397 return ret;
398 }
399
400 /* disable the keep-alive feature */
401 ret = wl1271_acx_keep_alive_mode(wl, false);
402 if (ret < 0)
403 return ret;
404
405 return 0;
406}
407
408static int wl1271_ap_hw_init(struct wl1271 *wl)
409{
410 int ret, i;
411
412 ret = wl1271_ap_init_templates_config(wl);
413 if (ret < 0)
414 return ret;
415
416 /* Configure for power always on */
417 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
418 if (ret < 0)
419 return ret;
420
421 /* Configure initial TX rate classes */
422 for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
423 ret = wl1271_acx_ap_rate_policy(wl,
424 &wl->conf.tx.ap_rc_conf[i], i);
425 if (ret < 0)
426 return ret;
427 }
428
429 ret = wl1271_acx_ap_rate_policy(wl,
430 &wl->conf.tx.ap_mgmt_conf,
431 ACX_TX_AP_MODE_MGMT_RATE);
432 if (ret < 0)
433 return ret;
434
435 ret = wl1271_acx_ap_rate_policy(wl,
436 &wl->conf.tx.ap_bcst_conf,
437 ACX_TX_AP_MODE_BCST_RATE);
438 if (ret < 0)
439 return ret;
440
441 ret = wl1271_acx_max_tx_retry(wl);
442 if (ret < 0)
443 return ret;
444
445 ret = wl1271_acx_ap_mem_cfg(wl);
446 if (ret < 0)
447 return ret;
448
449 return 0;
450}
451
452static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl)
453{
454 int ret;
455
456 ret = wl1271_ap_init_deauth_template(wl);
457 if (ret < 0)
458 return ret;
459
460 ret = wl1271_ap_init_null_template(wl);
461 if (ret < 0)
462 return ret;
463
464 ret = wl1271_ap_init_qos_null_template(wl);
465 if (ret < 0)
466 return ret;
467
468 return 0;
469}
470
471static void wl1271_check_ba_support(struct wl1271 *wl)
472{
473 /* validate FW cose ver x.x.x.50-60.x */
474 if ((wl->chip.fw_ver[3] >= WL12XX_BA_SUPPORT_FW_COST_VER2_START) &&
475 (wl->chip.fw_ver[3] < WL12XX_BA_SUPPORT_FW_COST_VER2_END)) {
476 wl->ba_support = true;
477 return;
478 }
479
480 wl->ba_support = false;
481}
482
483static int wl1271_set_ba_policies(struct wl1271 *wl)
484{
485 u8 tid_index;
486 int ret = 0;
487
488 /* Reset the BA RX indicators */
489 wl->ba_rx_bitmap = 0;
490
491 /* validate that FW support BA */
492 wl1271_check_ba_support(wl);
493
494 if (wl->ba_support)
495 /* 802.11n initiator BA session setting */
496 for (tid_index = 0; tid_index < CONF_TX_MAX_TID_COUNT;
497 ++tid_index) {
498 ret = wl1271_acx_set_ba_session(wl, WLAN_BACK_INITIATOR,
499 tid_index, true);
500 if (ret < 0)
501 break;
502 }
503
504 return ret;
505}
506
216int wl1271_hw_init(struct wl1271 *wl) 507int wl1271_hw_init(struct wl1271 *wl)
217{ 508{
218 struct conf_tx_ac_category *conf_ac; 509 struct conf_tx_ac_category *conf_ac;
219 struct conf_tx_tid *conf_tid; 510 struct conf_tx_tid *conf_tid;
220 int ret, i; 511 int ret, i;
512 bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
221 513
222 ret = wl1271_cmd_general_parms(wl); 514 ret = wl1271_cmd_general_parms(wl);
223 if (ret < 0) 515 if (ret < 0)
@@ -227,12 +519,12 @@ int wl1271_hw_init(struct wl1271 *wl)
227 if (ret < 0) 519 if (ret < 0)
228 return ret; 520 return ret;
229 521
230 ret = wl1271_cmd_ext_radio_parms(wl); 522 /* Mode specific init */
231 if (ret < 0) 523 if (is_ap)
232 return ret; 524 ret = wl1271_ap_hw_init(wl);
525 else
526 ret = wl1271_sta_hw_init(wl);
233 527
234 /* Template settings */
235 ret = wl1271_init_templates_config(wl);
236 if (ret < 0) 528 if (ret < 0)
237 return ret; 529 return ret;
238 530
@@ -259,16 +551,6 @@ int wl1271_hw_init(struct wl1271 *wl)
259 if (ret < 0) 551 if (ret < 0)
260 goto out_free_memmap; 552 goto out_free_memmap;
261 553
262 /* Initialize connection monitoring thresholds */
263 ret = wl1271_acx_conn_monit_params(wl, false);
264 if (ret < 0)
265 goto out_free_memmap;
266
267 /* Beacon filtering */
268 ret = wl1271_init_beacon_filter(wl);
269 if (ret < 0)
270 goto out_free_memmap;
271
272 /* Configure TX patch complete interrupt behavior */ 554 /* Configure TX patch complete interrupt behavior */
273 ret = wl1271_acx_tx_config_options(wl); 555 ret = wl1271_acx_tx_config_options(wl);
274 if (ret < 0) 556 if (ret < 0)
@@ -279,21 +561,11 @@ int wl1271_hw_init(struct wl1271 *wl)
279 if (ret < 0) 561 if (ret < 0)
280 goto out_free_memmap; 562 goto out_free_memmap;
281 563
282 /* Bluetooth WLAN coexistence */
283 ret = wl1271_init_pta(wl);
284 if (ret < 0)
285 goto out_free_memmap;
286
287 /* Energy detection */ 564 /* Energy detection */
288 ret = wl1271_init_energy_detection(wl); 565 ret = wl1271_init_energy_detection(wl);
289 if (ret < 0) 566 if (ret < 0)
290 goto out_free_memmap; 567 goto out_free_memmap;
291 568
292 /* Beacons and boradcast settings */
293 ret = wl1271_init_beacon_broadcast(wl);
294 if (ret < 0)
295 goto out_free_memmap;
296
297 /* Default fragmentation threshold */ 569 /* Default fragmentation threshold */
298 ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold); 570 ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold);
299 if (ret < 0) 571 if (ret < 0)
@@ -321,23 +593,13 @@ int wl1271_hw_init(struct wl1271 *wl)
321 goto out_free_memmap; 593 goto out_free_memmap;
322 } 594 }
323 595
324 /* Configure TX rate classes */
325 ret = wl1271_acx_rate_policies(wl);
326 if (ret < 0)
327 goto out_free_memmap;
328
329 /* Enable data path */ 596 /* Enable data path */
330 ret = wl1271_cmd_data_path(wl, 1); 597 ret = wl1271_cmd_data_path(wl, 1);
331 if (ret < 0) 598 if (ret < 0)
332 goto out_free_memmap; 599 goto out_free_memmap;
333 600
334 /* Configure for ELP power saving */
335 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
336 if (ret < 0)
337 goto out_free_memmap;
338
339 /* Configure HW encryption */ 601 /* Configure HW encryption */
340 ret = wl1271_init_hwenc_config(wl); 602 ret = wl1271_acx_feature_cfg(wl);
341 if (ret < 0) 603 if (ret < 0)
342 goto out_free_memmap; 604 goto out_free_memmap;
343 605
@@ -346,21 +608,17 @@ int wl1271_hw_init(struct wl1271 *wl)
346 if (ret < 0) 608 if (ret < 0)
347 goto out_free_memmap; 609 goto out_free_memmap;
348 610
349 /* disable all keep-alive templates */ 611 /* Mode specific init - post mem init */
350 for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) { 612 if (is_ap)
351 ret = wl1271_acx_keep_alive_config(wl, i, 613 ret = wl1271_ap_hw_init_post_mem(wl);
352 ACX_KEEP_ALIVE_TPL_INVALID); 614 else
353 if (ret < 0) 615 ret = wl1271_sta_hw_init_post_mem(wl);
354 goto out_free_memmap;
355 }
356 616
357 /* disable the keep-alive feature */
358 ret = wl1271_acx_keep_alive_mode(wl, false);
359 if (ret < 0) 617 if (ret < 0)
360 goto out_free_memmap; 618 goto out_free_memmap;
361 619
362 /* Configure rssi/snr averaging weights */ 620 /* Configure initiator BA sessions policies */
363 ret = wl1271_acx_rssi_snr_avg_weights(wl); 621 ret = wl1271_set_ba_policies(wl);
364 if (ret < 0) 622 if (ret < 0)
365 goto out_free_memmap; 623 goto out_free_memmap;
366 624
diff --git a/drivers/net/wireless/wl12xx/init.h b/drivers/net/wireless/wl12xx/init.h
index 7762421f8602..3a8bd3f426d2 100644
--- a/drivers/net/wireless/wl12xx/init.h
+++ b/drivers/net/wireless/wl12xx/init.h
@@ -27,7 +27,7 @@
27#include "wl12xx.h" 27#include "wl12xx.h"
28 28
29int wl1271_hw_init_power_auth(struct wl1271 *wl); 29int wl1271_hw_init_power_auth(struct wl1271 *wl);
30int wl1271_init_templates_config(struct wl1271 *wl); 30int wl1271_sta_init_templates_config(struct wl1271 *wl);
31int wl1271_init_phy_config(struct wl1271 *wl); 31int wl1271_init_phy_config(struct wl1271 *wl);
32int wl1271_init_pta(struct wl1271 *wl); 32int wl1271_init_pta(struct wl1271 *wl);
33int wl1271_init_energy_detection(struct wl1271 *wl); 33int wl1271_init_energy_detection(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/io.h b/drivers/net/wireless/wl12xx/io.h
index 844b32b170bb..c1aac8292089 100644
--- a/drivers/net/wireless/wl12xx/io.h
+++ b/drivers/net/wireless/wl12xx/io.h
@@ -168,5 +168,6 @@ void wl1271_unregister_hw(struct wl1271 *wl);
168int wl1271_init_ieee80211(struct wl1271 *wl); 168int wl1271_init_ieee80211(struct wl1271 *wl);
169struct ieee80211_hw *wl1271_alloc_hw(void); 169struct ieee80211_hw *wl1271_alloc_hw(void);
170int wl1271_free_hw(struct wl1271 *wl); 170int wl1271_free_hw(struct wl1271 *wl);
171irqreturn_t wl1271_irq(int irq, void *data);
171 172
172#endif 173#endif
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c
index 062247ef3ad2..8b3c8d196b03 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -116,11 +116,11 @@ static struct conf_drv_settings default_conf = {
116 }, 116 },
117 .tx = { 117 .tx = {
118 .tx_energy_detection = 0, 118 .tx_energy_detection = 0,
119 .rc_conf = { 119 .sta_rc_conf = {
120 .enabled_rates = 0, 120 .enabled_rates = 0,
121 .short_retry_limit = 10, 121 .short_retry_limit = 10,
122 .long_retry_limit = 10, 122 .long_retry_limit = 10,
123 .aflags = 0 123 .aflags = 0,
124 }, 124 },
125 .ac_conf_count = 4, 125 .ac_conf_count = 4,
126 .ac_conf = { 126 .ac_conf = {
@@ -153,6 +153,45 @@ static struct conf_drv_settings default_conf = {
153 .tx_op_limit = 1504, 153 .tx_op_limit = 1504,
154 }, 154 },
155 }, 155 },
156 .ap_rc_conf = {
157 [0] = {
158 .enabled_rates = CONF_TX_AP_ENABLED_RATES,
159 .short_retry_limit = 10,
160 .long_retry_limit = 10,
161 .aflags = 0,
162 },
163 [1] = {
164 .enabled_rates = CONF_TX_AP_ENABLED_RATES,
165 .short_retry_limit = 10,
166 .long_retry_limit = 10,
167 .aflags = 0,
168 },
169 [2] = {
170 .enabled_rates = CONF_TX_AP_ENABLED_RATES,
171 .short_retry_limit = 10,
172 .long_retry_limit = 10,
173 .aflags = 0,
174 },
175 [3] = {
176 .enabled_rates = CONF_TX_AP_ENABLED_RATES,
177 .short_retry_limit = 10,
178 .long_retry_limit = 10,
179 .aflags = 0,
180 },
181 },
182 .ap_mgmt_conf = {
183 .enabled_rates = CONF_TX_AP_DEFAULT_MGMT_RATES,
184 .short_retry_limit = 10,
185 .long_retry_limit = 10,
186 .aflags = 0,
187 },
188 .ap_bcst_conf = {
189 .enabled_rates = CONF_HW_BIT_RATE_1MBPS,
190 .short_retry_limit = 10,
191 .long_retry_limit = 10,
192 .aflags = 0,
193 },
194 .ap_max_tx_retries = 100,
156 .tid_conf_count = 4, 195 .tid_conf_count = 4,
157 .tid_conf = { 196 .tid_conf = {
158 [CONF_TX_AC_BE] = { 197 [CONF_TX_AC_BE] = {
@@ -193,6 +232,8 @@ static struct conf_drv_settings default_conf = {
193 .tx_compl_threshold = 4, 232 .tx_compl_threshold = 4,
194 .basic_rate = CONF_HW_BIT_RATE_1MBPS, 233 .basic_rate = CONF_HW_BIT_RATE_1MBPS,
195 .basic_rate_5 = CONF_HW_BIT_RATE_6MBPS, 234 .basic_rate_5 = CONF_HW_BIT_RATE_6MBPS,
235 .tmpl_short_retry_limit = 10,
236 .tmpl_long_retry_limit = 10,
196 }, 237 },
197 .conn = { 238 .conn = {
198 .wake_up_event = CONF_WAKE_UP_EVENT_DTIM, 239 .wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
@@ -215,6 +256,7 @@ static struct conf_drv_settings default_conf = {
215 .bet_enable = CONF_BET_MODE_ENABLE, 256 .bet_enable = CONF_BET_MODE_ENABLE,
216 .bet_max_consecutive = 10, 257 .bet_max_consecutive = 10,
217 .psm_entry_retries = 5, 258 .psm_entry_retries = 5,
259 .psm_exit_retries = 255,
218 .psm_entry_nullfunc_retries = 3, 260 .psm_entry_nullfunc_retries = 3,
219 .psm_entry_hangover_period = 1, 261 .psm_entry_hangover_period = 1,
220 .keep_alive_interval = 55000, 262 .keep_alive_interval = 55000,
@@ -233,13 +275,13 @@ static struct conf_drv_settings default_conf = {
233 .avg_weight_rssi_beacon = 20, 275 .avg_weight_rssi_beacon = 20,
234 .avg_weight_rssi_data = 10, 276 .avg_weight_rssi_data = 10,
235 .avg_weight_snr_beacon = 20, 277 .avg_weight_snr_beacon = 20,
236 .avg_weight_snr_data = 10 278 .avg_weight_snr_data = 10,
237 }, 279 },
238 .scan = { 280 .scan = {
239 .min_dwell_time_active = 7500, 281 .min_dwell_time_active = 7500,
240 .max_dwell_time_active = 30000, 282 .max_dwell_time_active = 30000,
241 .min_dwell_time_passive = 30000, 283 .min_dwell_time_passive = 100000,
242 .max_dwell_time_passive = 60000, 284 .max_dwell_time_passive = 100000,
243 .num_probe_reqs = 2, 285 .num_probe_reqs = 2,
244 }, 286 },
245 .rf = { 287 .rf = {
@@ -252,9 +294,24 @@ static struct conf_drv_settings default_conf = {
252 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 294 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
253 }, 295 },
254 }, 296 },
297 .ht = {
298 .tx_ba_win_size = 64,
299 .inactivity_timeout = 10000,
300 },
301 .mem = {
302 .num_stations = 1,
303 .ssid_profiles = 1,
304 .rx_block_num = 70,
305 .tx_min_block_num = 40,
306 .dynamic_memory = 0,
307 .min_req_tx_blocks = 100,
308 .min_req_rx_blocks = 22,
309 .tx_min = 27,
310 }
255}; 311};
256 312
257static void __wl1271_op_remove_interface(struct wl1271 *wl); 313static void __wl1271_op_remove_interface(struct wl1271 *wl);
314static void wl1271_free_ap_keys(struct wl1271 *wl);
258 315
259 316
260static void wl1271_device_release(struct device *dev) 317static void wl1271_device_release(struct device *dev)
@@ -317,7 +374,7 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
317 if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) 374 if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
318 goto out; 375 goto out;
319 376
320 ret = wl1271_ps_elp_wakeup(wl, false); 377 ret = wl1271_ps_elp_wakeup(wl);
321 if (ret < 0) 378 if (ret < 0)
322 goto out; 379 goto out;
323 380
@@ -393,7 +450,7 @@ static int wl1271_plt_init(struct wl1271 *wl)
393 if (ret < 0) 450 if (ret < 0)
394 return ret; 451 return ret;
395 452
396 ret = wl1271_init_templates_config(wl); 453 ret = wl1271_sta_init_templates_config(wl);
397 if (ret < 0) 454 if (ret < 0)
398 return ret; 455 return ret;
399 456
@@ -425,6 +482,10 @@ static int wl1271_plt_init(struct wl1271 *wl)
425 if (ret < 0) 482 if (ret < 0)
426 goto out_free_memmap; 483 goto out_free_memmap;
427 484
485 ret = wl1271_acx_sta_mem_cfg(wl);
486 if (ret < 0)
487 goto out_free_memmap;
488
428 /* Default fragmentation threshold */ 489 /* Default fragmentation threshold */
429 ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold); 490 ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold);
430 if (ret < 0) 491 if (ret < 0)
@@ -476,14 +537,71 @@ static int wl1271_plt_init(struct wl1271 *wl)
476 return ret; 537 return ret;
477} 538}
478 539
540static void wl1271_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_blks)
541{
542 bool fw_ps;
543
544 /* only regulate station links */
545 if (hlid < WL1271_AP_STA_HLID_START)
546 return;
547
548 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
549
550 /*
551 * Wake up from high level PS if the STA is asleep with too little
552 * blocks in FW or if the STA is awake.
553 */
554 if (!fw_ps || tx_blks < WL1271_PS_STA_MAX_BLOCKS)
555 wl1271_ps_link_end(wl, hlid);
556
557 /* Start high-level PS if the STA is asleep with enough blocks in FW */
558 else if (fw_ps && tx_blks >= WL1271_PS_STA_MAX_BLOCKS)
559 wl1271_ps_link_start(wl, hlid, true);
560}
561
562static void wl1271_irq_update_links_status(struct wl1271 *wl,
563 struct wl1271_fw_ap_status *status)
564{
565 u32 cur_fw_ps_map;
566 u8 hlid;
567
568 cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
569 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
570 wl1271_debug(DEBUG_PSM,
571 "link ps prev 0x%x cur 0x%x changed 0x%x",
572 wl->ap_fw_ps_map, cur_fw_ps_map,
573 wl->ap_fw_ps_map ^ cur_fw_ps_map);
574
575 wl->ap_fw_ps_map = cur_fw_ps_map;
576 }
577
578 for (hlid = WL1271_AP_STA_HLID_START; hlid < AP_MAX_LINKS; hlid++) {
579 u8 cnt = status->tx_lnk_free_blks[hlid] -
580 wl->links[hlid].prev_freed_blks;
581
582 wl->links[hlid].prev_freed_blks =
583 status->tx_lnk_free_blks[hlid];
584 wl->links[hlid].allocated_blks -= cnt;
585
586 wl1271_irq_ps_regulate_link(wl, hlid,
587 wl->links[hlid].allocated_blks);
588 }
589}
590
479static void wl1271_fw_status(struct wl1271 *wl, 591static void wl1271_fw_status(struct wl1271 *wl,
480 struct wl1271_fw_status *status) 592 struct wl1271_fw_full_status *full_status)
481{ 593{
594 struct wl1271_fw_common_status *status = &full_status->common;
482 struct timespec ts; 595 struct timespec ts;
483 u32 total = 0; 596 u32 total = 0;
484 int i; 597 int i;
485 598
486 wl1271_raw_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false); 599 if (wl->bss_type == BSS_TYPE_AP_BSS)
600 wl1271_raw_read(wl, FW_STATUS_ADDR, status,
601 sizeof(struct wl1271_fw_ap_status), false);
602 else
603 wl1271_raw_read(wl, FW_STATUS_ADDR, status,
604 sizeof(struct wl1271_fw_sta_status), false);
487 605
488 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, " 606 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
489 "drv_rx_counter = %d, tx_results_counter = %d)", 607 "drv_rx_counter = %d, tx_results_counter = %d)",
@@ -507,22 +625,54 @@ static void wl1271_fw_status(struct wl1271 *wl,
507 if (total) 625 if (total)
508 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); 626 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
509 627
628 /* for AP update num of allocated TX blocks per link and ps status */
629 if (wl->bss_type == BSS_TYPE_AP_BSS)
630 wl1271_irq_update_links_status(wl, &full_status->ap);
631
510 /* update the host-chipset time offset */ 632 /* update the host-chipset time offset */
511 getnstimeofday(&ts); 633 getnstimeofday(&ts);
512 wl->time_offset = (timespec_to_ns(&ts) >> 10) - 634 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
513 (s64)le32_to_cpu(status->fw_localtime); 635 (s64)le32_to_cpu(status->fw_localtime);
514} 636}
515 637
516#define WL1271_IRQ_MAX_LOOPS 10 638static void wl1271_flush_deferred_work(struct wl1271 *wl)
639{
640 struct sk_buff *skb;
641
642 /* Pass all received frames to the network stack */
643 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
644 ieee80211_rx_ni(wl->hw, skb);
645
646 /* Return sent skbs to the network stack */
647 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
648 ieee80211_tx_status(wl->hw, skb);
649}
650
651static void wl1271_netstack_work(struct work_struct *work)
652{
653 struct wl1271 *wl =
654 container_of(work, struct wl1271, netstack_work);
655
656 do {
657 wl1271_flush_deferred_work(wl);
658 } while (skb_queue_len(&wl->deferred_rx_queue));
659}
660
661#define WL1271_IRQ_MAX_LOOPS 256
517 662
518static void wl1271_irq_work(struct work_struct *work) 663irqreturn_t wl1271_irq(int irq, void *cookie)
519{ 664{
520 int ret; 665 int ret;
521 u32 intr; 666 u32 intr;
522 int loopcount = WL1271_IRQ_MAX_LOOPS; 667 int loopcount = WL1271_IRQ_MAX_LOOPS;
668 struct wl1271 *wl = (struct wl1271 *)cookie;
669 bool done = false;
670 unsigned int defer_count;
523 unsigned long flags; 671 unsigned long flags;
524 struct wl1271 *wl = 672
525 container_of(work, struct wl1271, irq_work); 673 /* TX might be handled here, avoid redundant work */
674 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
675 cancel_work_sync(&wl->tx_work);
526 676
527 mutex_lock(&wl->mutex); 677 mutex_lock(&wl->mutex);
528 678
@@ -531,26 +681,27 @@ static void wl1271_irq_work(struct work_struct *work)
531 if (unlikely(wl->state == WL1271_STATE_OFF)) 681 if (unlikely(wl->state == WL1271_STATE_OFF))
532 goto out; 682 goto out;
533 683
534 ret = wl1271_ps_elp_wakeup(wl, true); 684 ret = wl1271_ps_elp_wakeup(wl);
535 if (ret < 0) 685 if (ret < 0)
536 goto out; 686 goto out;
537 687
538 spin_lock_irqsave(&wl->wl_lock, flags); 688 while (!done && loopcount--) {
539 while (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags) && loopcount) { 689 /*
540 clear_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags); 690 * In order to avoid a race with the hardirq, clear the flag
541 spin_unlock_irqrestore(&wl->wl_lock, flags); 691 * before acknowledging the chip. Since the mutex is held,
542 loopcount--; 692 * wl1271_ps_elp_wakeup cannot be called concurrently.
693 */
694 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
695 smp_mb__after_clear_bit();
543 696
544 wl1271_fw_status(wl, wl->fw_status); 697 wl1271_fw_status(wl, wl->fw_status);
545 intr = le32_to_cpu(wl->fw_status->intr); 698 intr = le32_to_cpu(wl->fw_status->common.intr);
699 intr &= WL1271_INTR_MASK;
546 if (!intr) { 700 if (!intr) {
547 wl1271_debug(DEBUG_IRQ, "Zero interrupt received."); 701 done = true;
548 spin_lock_irqsave(&wl->wl_lock, flags);
549 continue; 702 continue;
550 } 703 }
551 704
552 intr &= WL1271_INTR_MASK;
553
554 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) { 705 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
555 wl1271_error("watchdog interrupt received! " 706 wl1271_error("watchdog interrupt received! "
556 "starting recovery."); 707 "starting recovery.");
@@ -560,25 +711,35 @@ static void wl1271_irq_work(struct work_struct *work)
560 goto out; 711 goto out;
561 } 712 }
562 713
563 if (intr & WL1271_ACX_INTR_DATA) { 714 if (likely(intr & WL1271_ACX_INTR_DATA)) {
564 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA"); 715 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
565 716
566 /* check for tx results */ 717 wl1271_rx(wl, &wl->fw_status->common);
567 if (wl->fw_status->tx_results_counter !=
568 (wl->tx_results_count & 0xff))
569 wl1271_tx_complete(wl);
570 718
571 /* Check if any tx blocks were freed */ 719 /* Check if any tx blocks were freed */
720 spin_lock_irqsave(&wl->wl_lock, flags);
572 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) && 721 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
573 wl->tx_queue_count) { 722 wl->tx_queue_count) {
723 spin_unlock_irqrestore(&wl->wl_lock, flags);
574 /* 724 /*
575 * In order to avoid starvation of the TX path, 725 * In order to avoid starvation of the TX path,
576 * call the work function directly. 726 * call the work function directly.
577 */ 727 */
578 wl1271_tx_work_locked(wl); 728 wl1271_tx_work_locked(wl);
729 } else {
730 spin_unlock_irqrestore(&wl->wl_lock, flags);
579 } 731 }
580 732
581 wl1271_rx(wl, wl->fw_status); 733 /* check for tx results */
734 if (wl->fw_status->common.tx_results_counter !=
735 (wl->tx_results_count & 0xff))
736 wl1271_tx_complete(wl);
737
738 /* Make sure the deferred queues don't get too long */
739 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
740 skb_queue_len(&wl->deferred_rx_queue);
741 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
742 wl1271_flush_deferred_work(wl);
582 } 743 }
583 744
584 if (intr & WL1271_ACX_INTR_EVENT_A) { 745 if (intr & WL1271_ACX_INTR_EVENT_A) {
@@ -597,28 +758,48 @@ static void wl1271_irq_work(struct work_struct *work)
597 758
598 if (intr & WL1271_ACX_INTR_HW_AVAILABLE) 759 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
599 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE"); 760 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
600
601 spin_lock_irqsave(&wl->wl_lock, flags);
602 } 761 }
603 762
604 if (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags))
605 ieee80211_queue_work(wl->hw, &wl->irq_work);
606 else
607 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
608 spin_unlock_irqrestore(&wl->wl_lock, flags);
609
610 wl1271_ps_elp_sleep(wl); 763 wl1271_ps_elp_sleep(wl);
611 764
612out: 765out:
766 spin_lock_irqsave(&wl->wl_lock, flags);
767 /* In case TX was not handled here, queue TX work */
768 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
769 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
770 wl->tx_queue_count)
771 ieee80211_queue_work(wl->hw, &wl->tx_work);
772 spin_unlock_irqrestore(&wl->wl_lock, flags);
773
613 mutex_unlock(&wl->mutex); 774 mutex_unlock(&wl->mutex);
775
776 return IRQ_HANDLED;
614} 777}
778EXPORT_SYMBOL_GPL(wl1271_irq);
615 779
616static int wl1271_fetch_firmware(struct wl1271 *wl) 780static int wl1271_fetch_firmware(struct wl1271 *wl)
617{ 781{
618 const struct firmware *fw; 782 const struct firmware *fw;
783 const char *fw_name;
619 int ret; 784 int ret;
620 785
621 ret = request_firmware(&fw, WL1271_FW_NAME, wl1271_wl_to_dev(wl)); 786 switch (wl->bss_type) {
787 case BSS_TYPE_AP_BSS:
788 fw_name = WL1271_AP_FW_NAME;
789 break;
790 case BSS_TYPE_IBSS:
791 case BSS_TYPE_STA_BSS:
792 fw_name = WL1271_FW_NAME;
793 break;
794 default:
795 wl1271_error("no compatible firmware for bss_type %d",
796 wl->bss_type);
797 return -EINVAL;
798 }
799
800 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
801
802 ret = request_firmware(&fw, fw_name, wl1271_wl_to_dev(wl));
622 803
623 if (ret < 0) { 804 if (ret < 0) {
624 wl1271_error("could not get firmware: %d", ret); 805 wl1271_error("could not get firmware: %d", ret);
@@ -632,6 +813,7 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
632 goto out; 813 goto out;
633 } 814 }
634 815
816 vfree(wl->fw);
635 wl->fw_len = fw->size; 817 wl->fw_len = fw->size;
636 wl->fw = vmalloc(wl->fw_len); 818 wl->fw = vmalloc(wl->fw_len);
637 819
@@ -642,7 +824,7 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
642 } 824 }
643 825
644 memcpy(wl->fw, fw->data, wl->fw_len); 826 memcpy(wl->fw, fw->data, wl->fw_len);
645 827 wl->fw_bss_type = wl->bss_type;
646 ret = 0; 828 ret = 0;
647 829
648out: 830out:
@@ -778,7 +960,8 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
778 goto out; 960 goto out;
779 } 961 }
780 962
781 if (wl->fw == NULL) { 963 /* Make sure the firmware type matches the BSS type */
964 if (wl->fw == NULL || wl->fw_bss_type != wl->bss_type) {
782 ret = wl1271_fetch_firmware(wl); 965 ret = wl1271_fetch_firmware(wl);
783 if (ret < 0) 966 if (ret < 0)
784 goto out; 967 goto out;
@@ -811,6 +994,8 @@ int wl1271_plt_start(struct wl1271 *wl)
811 goto out; 994 goto out;
812 } 995 }
813 996
997 wl->bss_type = BSS_TYPE_STA_BSS;
998
814 while (retries) { 999 while (retries) {
815 retries--; 1000 retries--;
816 ret = wl1271_chip_wakeup(wl); 1001 ret = wl1271_chip_wakeup(wl);
@@ -827,11 +1012,10 @@ int wl1271_plt_start(struct wl1271 *wl)
827 1012
828 wl->state = WL1271_STATE_PLT; 1013 wl->state = WL1271_STATE_PLT;
829 wl1271_notice("firmware booted in PLT mode (%s)", 1014 wl1271_notice("firmware booted in PLT mode (%s)",
830 wl->chip.fw_ver); 1015 wl->chip.fw_ver_str);
831 goto out; 1016 goto out;
832 1017
833irq_disable: 1018irq_disable:
834 wl1271_disable_interrupts(wl);
835 mutex_unlock(&wl->mutex); 1019 mutex_unlock(&wl->mutex);
836 /* Unlocking the mutex in the middle of handling is 1020 /* Unlocking the mutex in the middle of handling is
837 inherently unsafe. In this case we deem it safe to do, 1021 inherently unsafe. In this case we deem it safe to do,
@@ -840,7 +1024,9 @@ irq_disable:
840 work function will not do anything.) Also, any other 1024 work function will not do anything.) Also, any other
841 possible concurrent operations will fail due to the 1025 possible concurrent operations will fail due to the
842 current state, hence the wl1271 struct should be safe. */ 1026 current state, hence the wl1271 struct should be safe. */
843 cancel_work_sync(&wl->irq_work); 1027 wl1271_disable_interrupts(wl);
1028 wl1271_flush_deferred_work(wl);
1029 cancel_work_sync(&wl->netstack_work);
844 mutex_lock(&wl->mutex); 1030 mutex_lock(&wl->mutex);
845power_off: 1031power_off:
846 wl1271_power_off(wl); 1032 wl1271_power_off(wl);
@@ -854,12 +1040,10 @@ out:
854 return ret; 1040 return ret;
855} 1041}
856 1042
857int wl1271_plt_stop(struct wl1271 *wl) 1043int __wl1271_plt_stop(struct wl1271 *wl)
858{ 1044{
859 int ret = 0; 1045 int ret = 0;
860 1046
861 mutex_lock(&wl->mutex);
862
863 wl1271_notice("power down"); 1047 wl1271_notice("power down");
864 1048
865 if (wl->state != WL1271_STATE_PLT) { 1049 if (wl->state != WL1271_STATE_PLT) {
@@ -869,70 +1053,46 @@ int wl1271_plt_stop(struct wl1271 *wl)
869 goto out; 1053 goto out;
870 } 1054 }
871 1055
872 wl1271_disable_interrupts(wl);
873 wl1271_power_off(wl); 1056 wl1271_power_off(wl);
874 1057
875 wl->state = WL1271_STATE_OFF; 1058 wl->state = WL1271_STATE_OFF;
876 wl->rx_counter = 0; 1059 wl->rx_counter = 0;
877 1060
878out:
879 mutex_unlock(&wl->mutex); 1061 mutex_unlock(&wl->mutex);
880 1062 wl1271_disable_interrupts(wl);
881 cancel_work_sync(&wl->irq_work); 1063 wl1271_flush_deferred_work(wl);
1064 cancel_work_sync(&wl->netstack_work);
882 cancel_work_sync(&wl->recovery_work); 1065 cancel_work_sync(&wl->recovery_work);
1066 mutex_lock(&wl->mutex);
1067out:
1068 return ret;
1069}
1070
1071int wl1271_plt_stop(struct wl1271 *wl)
1072{
1073 int ret;
883 1074
1075 mutex_lock(&wl->mutex);
1076 ret = __wl1271_plt_stop(wl);
1077 mutex_unlock(&wl->mutex);
884 return ret; 1078 return ret;
885} 1079}
886 1080
887static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 1081static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
888{ 1082{
889 struct wl1271 *wl = hw->priv; 1083 struct wl1271 *wl = hw->priv;
890 struct ieee80211_conf *conf = &hw->conf;
891 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
892 struct ieee80211_sta *sta = txinfo->control.sta;
893 unsigned long flags; 1084 unsigned long flags;
894 int q; 1085 int q;
1086 u8 hlid = 0;
895 1087
896 /*
897 * peek into the rates configured in the STA entry.
898 * The rates set after connection stage, The first block only BG sets:
899 * the compare is for bit 0-16 of sta_rate_set. The second block add
900 * HT rates in case of HT supported.
901 */
902 spin_lock_irqsave(&wl->wl_lock, flags);
903 if (sta &&
904 (sta->supp_rates[conf->channel->band] !=
905 (wl->sta_rate_set & HW_BG_RATES_MASK))) {
906 wl->sta_rate_set = sta->supp_rates[conf->channel->band];
907 set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags);
908 }
909
910#ifdef CONFIG_WL12XX_HT
911 if (sta &&
912 sta->ht_cap.ht_supported &&
913 ((wl->sta_rate_set >> HW_HT_RATES_OFFSET) !=
914 sta->ht_cap.mcs.rx_mask[0])) {
915 /* Clean MCS bits before setting them */
916 wl->sta_rate_set &= HW_BG_RATES_MASK;
917 wl->sta_rate_set |=
918 (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET);
919 set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags);
920 }
921#endif
922 wl->tx_queue_count++;
923 spin_unlock_irqrestore(&wl->wl_lock, flags);
924
925 /* queue the packet */
926 q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 1088 q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
927 skb_queue_tail(&wl->tx_queue[q], skb);
928 1089
929 /* 1090 if (wl->bss_type == BSS_TYPE_AP_BSS)
930 * The chip specific setup must run before the first TX packet - 1091 hlid = wl1271_tx_get_hlid(skb);
931 * before that, the tx_work will not be initialized!
932 */
933 1092
934 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) 1093 spin_lock_irqsave(&wl->wl_lock, flags);
935 ieee80211_queue_work(wl->hw, &wl->tx_work); 1094
1095 wl->tx_queue_count++;
936 1096
937 /* 1097 /*
938 * The workqueue is slow to process the tx_queue and we need stop 1098 * The workqueue is slow to process the tx_queue and we need stop
@@ -940,14 +1100,28 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
940 */ 1100 */
941 if (wl->tx_queue_count >= WL1271_TX_QUEUE_HIGH_WATERMARK) { 1101 if (wl->tx_queue_count >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
942 wl1271_debug(DEBUG_TX, "op_tx: stopping queues"); 1102 wl1271_debug(DEBUG_TX, "op_tx: stopping queues");
943
944 spin_lock_irqsave(&wl->wl_lock, flags);
945 ieee80211_stop_queues(wl->hw); 1103 ieee80211_stop_queues(wl->hw);
946 set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags); 1104 set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
947 spin_unlock_irqrestore(&wl->wl_lock, flags);
948 } 1105 }
949 1106
950 return NETDEV_TX_OK; 1107 /* queue the packet */
1108 if (wl->bss_type == BSS_TYPE_AP_BSS) {
1109 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
1110 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1111 } else {
1112 skb_queue_tail(&wl->tx_queue[q], skb);
1113 }
1114
1115 /*
1116 * The chip specific setup must run before the first TX packet -
1117 * before that, the tx_work will not be initialized!
1118 */
1119
1120 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1121 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1122 ieee80211_queue_work(wl->hw, &wl->tx_work);
1123
1124 spin_unlock_irqrestore(&wl->wl_lock, flags);
951} 1125}
952 1126
953static struct notifier_block wl1271_dev_notifier = { 1127static struct notifier_block wl1271_dev_notifier = {
@@ -967,6 +1141,9 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
967 * 1141 *
968 * The MAC address is first known when the corresponding interface 1142 * The MAC address is first known when the corresponding interface
969 * is added. That is where we will initialize the hardware. 1143 * is added. That is where we will initialize the hardware.
1144 *
1145 * In addition, we currently have different firmwares for AP and managed
1146 * operation. We will know which to boot according to interface type.
970 */ 1147 */
971 1148
972 return 0; 1149 return 0;
@@ -1006,6 +1183,9 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
1006 wl->bss_type = BSS_TYPE_IBSS; 1183 wl->bss_type = BSS_TYPE_IBSS;
1007 wl->set_bss_type = BSS_TYPE_STA_BSS; 1184 wl->set_bss_type = BSS_TYPE_STA_BSS;
1008 break; 1185 break;
1186 case NL80211_IFTYPE_AP:
1187 wl->bss_type = BSS_TYPE_AP_BSS;
1188 break;
1009 default: 1189 default:
1010 ret = -EOPNOTSUPP; 1190 ret = -EOPNOTSUPP;
1011 goto out; 1191 goto out;
@@ -1038,7 +1218,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
1038 break; 1218 break;
1039 1219
1040irq_disable: 1220irq_disable:
1041 wl1271_disable_interrupts(wl);
1042 mutex_unlock(&wl->mutex); 1221 mutex_unlock(&wl->mutex);
1043 /* Unlocking the mutex in the middle of handling is 1222 /* Unlocking the mutex in the middle of handling is
1044 inherently unsafe. In this case we deem it safe to do, 1223 inherently unsafe. In this case we deem it safe to do,
@@ -1047,7 +1226,9 @@ irq_disable:
1047 work function will not do anything.) Also, any other 1226 work function will not do anything.) Also, any other
1048 possible concurrent operations will fail due to the 1227 possible concurrent operations will fail due to the
1049 current state, hence the wl1271 struct should be safe. */ 1228 current state, hence the wl1271 struct should be safe. */
1050 cancel_work_sync(&wl->irq_work); 1229 wl1271_disable_interrupts(wl);
1230 wl1271_flush_deferred_work(wl);
1231 cancel_work_sync(&wl->netstack_work);
1051 mutex_lock(&wl->mutex); 1232 mutex_lock(&wl->mutex);
1052power_off: 1233power_off:
1053 wl1271_power_off(wl); 1234 wl1271_power_off(wl);
@@ -1061,11 +1242,11 @@ power_off:
1061 1242
1062 wl->vif = vif; 1243 wl->vif = vif;
1063 wl->state = WL1271_STATE_ON; 1244 wl->state = WL1271_STATE_ON;
1064 wl1271_info("firmware booted (%s)", wl->chip.fw_ver); 1245 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
1065 1246
1066 /* update hw/fw version info in wiphy struct */ 1247 /* update hw/fw version info in wiphy struct */
1067 wiphy->hw_version = wl->chip.id; 1248 wiphy->hw_version = wl->chip.id;
1068 strncpy(wiphy->fw_version, wl->chip.fw_ver, 1249 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1069 sizeof(wiphy->fw_version)); 1250 sizeof(wiphy->fw_version));
1070 1251
1071 /* 1252 /*
@@ -1113,12 +1294,12 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl)
1113 1294
1114 wl->state = WL1271_STATE_OFF; 1295 wl->state = WL1271_STATE_OFF;
1115 1296
1116 wl1271_disable_interrupts(wl);
1117
1118 mutex_unlock(&wl->mutex); 1297 mutex_unlock(&wl->mutex);
1119 1298
1299 wl1271_disable_interrupts(wl);
1300 wl1271_flush_deferred_work(wl);
1120 cancel_delayed_work_sync(&wl->scan_complete_work); 1301 cancel_delayed_work_sync(&wl->scan_complete_work);
1121 cancel_work_sync(&wl->irq_work); 1302 cancel_work_sync(&wl->netstack_work);
1122 cancel_work_sync(&wl->tx_work); 1303 cancel_work_sync(&wl->tx_work);
1123 cancel_delayed_work_sync(&wl->pspoll_work); 1304 cancel_delayed_work_sync(&wl->pspoll_work);
1124 cancel_delayed_work_sync(&wl->elp_work); 1305 cancel_delayed_work_sync(&wl->elp_work);
@@ -1147,10 +1328,13 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl)
1147 wl->time_offset = 0; 1328 wl->time_offset = 0;
1148 wl->session_counter = 0; 1329 wl->session_counter = 0;
1149 wl->rate_set = CONF_TX_RATE_MASK_BASIC; 1330 wl->rate_set = CONF_TX_RATE_MASK_BASIC;
1150 wl->sta_rate_set = 0;
1151 wl->flags = 0; 1331 wl->flags = 0;
1152 wl->vif = NULL; 1332 wl->vif = NULL;
1153 wl->filters = 0; 1333 wl->filters = 0;
1334 wl1271_free_ap_keys(wl);
1335 memset(wl->ap_hlid_map, 0, sizeof(wl->ap_hlid_map));
1336 wl->ap_fw_ps_map = 0;
1337 wl->ap_ps_map = 0;
1154 1338
1155 for (i = 0; i < NUM_TX_QUEUES; i++) 1339 for (i = 0; i < NUM_TX_QUEUES; i++)
1156 wl->tx_blocks_freed[i] = 0; 1340 wl->tx_blocks_freed[i] = 0;
@@ -1186,8 +1370,7 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
1186 1370
1187static void wl1271_configure_filters(struct wl1271 *wl, unsigned int filters) 1371static void wl1271_configure_filters(struct wl1271 *wl, unsigned int filters)
1188{ 1372{
1189 wl->rx_config = WL1271_DEFAULT_RX_CONFIG; 1373 wl1271_set_default_filters(wl);
1190 wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
1191 1374
1192 /* combine requested filters with current filter config */ 1375 /* combine requested filters with current filter config */
1193 filters = wl->filters | filters; 1376 filters = wl->filters | filters;
@@ -1322,25 +1505,7 @@ static void wl1271_set_band_rate(struct wl1271 *wl)
1322 wl->basic_rate_set = wl->conf.tx.basic_rate_5; 1505 wl->basic_rate_set = wl->conf.tx.basic_rate_5;
1323} 1506}
1324 1507
1325static u32 wl1271_min_rate_get(struct wl1271 *wl) 1508static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle)
1326{
1327 int i;
1328 u32 rate = 0;
1329
1330 if (!wl->basic_rate_set) {
1331 WARN_ON(1);
1332 wl->basic_rate_set = wl->conf.tx.basic_rate;
1333 }
1334
1335 for (i = 0; !rate; i++) {
1336 if ((wl->basic_rate_set >> i) & 0x1)
1337 rate = 1 << i;
1338 }
1339
1340 return rate;
1341}
1342
1343static int wl1271_handle_idle(struct wl1271 *wl, bool idle)
1344{ 1509{
1345 int ret; 1510 int ret;
1346 1511
@@ -1350,9 +1515,8 @@ static int wl1271_handle_idle(struct wl1271 *wl, bool idle)
1350 if (ret < 0) 1515 if (ret < 0)
1351 goto out; 1516 goto out;
1352 } 1517 }
1353 wl->rate_set = wl1271_min_rate_get(wl); 1518 wl->rate_set = wl1271_tx_min_rate_get(wl);
1354 wl->sta_rate_set = 0; 1519 ret = wl1271_acx_sta_rate_policies(wl);
1355 ret = wl1271_acx_rate_policies(wl);
1356 if (ret < 0) 1520 if (ret < 0)
1357 goto out; 1521 goto out;
1358 ret = wl1271_acx_keep_alive_config( 1522 ret = wl1271_acx_keep_alive_config(
@@ -1381,14 +1545,17 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1381 struct wl1271 *wl = hw->priv; 1545 struct wl1271 *wl = hw->priv;
1382 struct ieee80211_conf *conf = &hw->conf; 1546 struct ieee80211_conf *conf = &hw->conf;
1383 int channel, ret = 0; 1547 int channel, ret = 0;
1548 bool is_ap;
1384 1549
1385 channel = ieee80211_frequency_to_channel(conf->channel->center_freq); 1550 channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
1386 1551
1387 wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s", 1552 wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
1553 " changed 0x%x",
1388 channel, 1554 channel,
1389 conf->flags & IEEE80211_CONF_PS ? "on" : "off", 1555 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
1390 conf->power_level, 1556 conf->power_level,
1391 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use"); 1557 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
1558 changed);
1392 1559
1393 /* 1560 /*
1394 * mac80211 will go to idle nearly immediately after transmitting some 1561 * mac80211 will go to idle nearly immediately after transmitting some
@@ -1406,7 +1573,9 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1406 goto out; 1573 goto out;
1407 } 1574 }
1408 1575
1409 ret = wl1271_ps_elp_wakeup(wl, false); 1576 is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
1577
1578 ret = wl1271_ps_elp_wakeup(wl);
1410 if (ret < 0) 1579 if (ret < 0)
1411 goto out; 1580 goto out;
1412 1581
@@ -1417,31 +1586,34 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1417 wl->band = conf->channel->band; 1586 wl->band = conf->channel->band;
1418 wl->channel = channel; 1587 wl->channel = channel;
1419 1588
1420 /* 1589 if (!is_ap) {
1421 * FIXME: the mac80211 should really provide a fixed rate 1590 /*
1422 * to use here. for now, just use the smallest possible rate 1591 * FIXME: the mac80211 should really provide a fixed
1423 * for the band as a fixed rate for association frames and 1592 * rate to use here. for now, just use the smallest
1424 * other control messages. 1593 * possible rate for the band as a fixed rate for
1425 */ 1594 * association frames and other control messages.
1426 if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) 1595 */
1427 wl1271_set_band_rate(wl); 1596 if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
1428 1597 wl1271_set_band_rate(wl);
1429 wl->basic_rate = wl1271_min_rate_get(wl);
1430 ret = wl1271_acx_rate_policies(wl);
1431 if (ret < 0)
1432 wl1271_warning("rate policy for update channel "
1433 "failed %d", ret);
1434 1598
1435 if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) { 1599 wl->basic_rate = wl1271_tx_min_rate_get(wl);
1436 ret = wl1271_join(wl, false); 1600 ret = wl1271_acx_sta_rate_policies(wl);
1437 if (ret < 0) 1601 if (ret < 0)
1438 wl1271_warning("cmd join to update channel " 1602 wl1271_warning("rate policy for channel "
1439 "failed %d", ret); 1603 "failed %d", ret);
1604
1605 if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
1606 ret = wl1271_join(wl, false);
1607 if (ret < 0)
1608 wl1271_warning("cmd join on channel "
1609 "failed %d", ret);
1610 }
1440 } 1611 }
1441 } 1612 }
1442 1613
1443 if (changed & IEEE80211_CONF_CHANGE_IDLE) { 1614 if (changed & IEEE80211_CONF_CHANGE_IDLE && !is_ap) {
1444 ret = wl1271_handle_idle(wl, conf->flags & IEEE80211_CONF_IDLE); 1615 ret = wl1271_sta_handle_idle(wl,
1616 conf->flags & IEEE80211_CONF_IDLE);
1445 if (ret < 0) 1617 if (ret < 0)
1446 wl1271_warning("idle mode change failed %d", ret); 1618 wl1271_warning("idle mode change failed %d", ret);
1447 } 1619 }
@@ -1548,7 +1720,8 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
1548 struct wl1271 *wl = hw->priv; 1720 struct wl1271 *wl = hw->priv;
1549 int ret; 1721 int ret;
1550 1722
1551 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter"); 1723 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
1724 " total %x", changed, *total);
1552 1725
1553 mutex_lock(&wl->mutex); 1726 mutex_lock(&wl->mutex);
1554 1727
@@ -1558,19 +1731,20 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
1558 if (unlikely(wl->state == WL1271_STATE_OFF)) 1731 if (unlikely(wl->state == WL1271_STATE_OFF))
1559 goto out; 1732 goto out;
1560 1733
1561 ret = wl1271_ps_elp_wakeup(wl, false); 1734 ret = wl1271_ps_elp_wakeup(wl);
1562 if (ret < 0) 1735 if (ret < 0)
1563 goto out; 1736 goto out;
1564 1737
1565 1738 if (wl->bss_type != BSS_TYPE_AP_BSS) {
1566 if (*total & FIF_ALLMULTI) 1739 if (*total & FIF_ALLMULTI)
1567 ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0); 1740 ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
1568 else if (fp) 1741 else if (fp)
1569 ret = wl1271_acx_group_address_tbl(wl, fp->enabled, 1742 ret = wl1271_acx_group_address_tbl(wl, fp->enabled,
1570 fp->mc_list, 1743 fp->mc_list,
1571 fp->mc_list_length); 1744 fp->mc_list_length);
1572 if (ret < 0) 1745 if (ret < 0)
1573 goto out_sleep; 1746 goto out_sleep;
1747 }
1574 1748
1575 /* determine, whether supported filter values have changed */ 1749 /* determine, whether supported filter values have changed */
1576 if (changed == 0) 1750 if (changed == 0)
@@ -1593,38 +1767,192 @@ out:
1593 kfree(fp); 1767 kfree(fp);
1594} 1768}
1595 1769
1770static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type,
1771 u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
1772 u16 tx_seq_16)
1773{
1774 struct wl1271_ap_key *ap_key;
1775 int i;
1776
1777 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
1778
1779 if (key_size > MAX_KEY_SIZE)
1780 return -EINVAL;
1781
1782 /*
1783 * Find next free entry in ap_keys. Also check we are not replacing
1784 * an existing key.
1785 */
1786 for (i = 0; i < MAX_NUM_KEYS; i++) {
1787 if (wl->recorded_ap_keys[i] == NULL)
1788 break;
1789
1790 if (wl->recorded_ap_keys[i]->id == id) {
1791 wl1271_warning("trying to record key replacement");
1792 return -EINVAL;
1793 }
1794 }
1795
1796 if (i == MAX_NUM_KEYS)
1797 return -EBUSY;
1798
1799 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
1800 if (!ap_key)
1801 return -ENOMEM;
1802
1803 ap_key->id = id;
1804 ap_key->key_type = key_type;
1805 ap_key->key_size = key_size;
1806 memcpy(ap_key->key, key, key_size);
1807 ap_key->hlid = hlid;
1808 ap_key->tx_seq_32 = tx_seq_32;
1809 ap_key->tx_seq_16 = tx_seq_16;
1810
1811 wl->recorded_ap_keys[i] = ap_key;
1812 return 0;
1813}
1814
1815static void wl1271_free_ap_keys(struct wl1271 *wl)
1816{
1817 int i;
1818
1819 for (i = 0; i < MAX_NUM_KEYS; i++) {
1820 kfree(wl->recorded_ap_keys[i]);
1821 wl->recorded_ap_keys[i] = NULL;
1822 }
1823}
1824
1825static int wl1271_ap_init_hwenc(struct wl1271 *wl)
1826{
1827 int i, ret = 0;
1828 struct wl1271_ap_key *key;
1829 bool wep_key_added = false;
1830
1831 for (i = 0; i < MAX_NUM_KEYS; i++) {
1832 if (wl->recorded_ap_keys[i] == NULL)
1833 break;
1834
1835 key = wl->recorded_ap_keys[i];
1836 ret = wl1271_cmd_set_ap_key(wl, KEY_ADD_OR_REPLACE,
1837 key->id, key->key_type,
1838 key->key_size, key->key,
1839 key->hlid, key->tx_seq_32,
1840 key->tx_seq_16);
1841 if (ret < 0)
1842 goto out;
1843
1844 if (key->key_type == KEY_WEP)
1845 wep_key_added = true;
1846 }
1847
1848 if (wep_key_added) {
1849 ret = wl1271_cmd_set_ap_default_wep_key(wl, wl->default_key);
1850 if (ret < 0)
1851 goto out;
1852 }
1853
1854out:
1855 wl1271_free_ap_keys(wl);
1856 return ret;
1857}
1858
1859static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
1860 u8 key_size, const u8 *key, u32 tx_seq_32,
1861 u16 tx_seq_16, struct ieee80211_sta *sta)
1862{
1863 int ret;
1864 bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
1865
1866 if (is_ap) {
1867 struct wl1271_station *wl_sta;
1868 u8 hlid;
1869
1870 if (sta) {
1871 wl_sta = (struct wl1271_station *)sta->drv_priv;
1872 hlid = wl_sta->hlid;
1873 } else {
1874 hlid = WL1271_AP_BROADCAST_HLID;
1875 }
1876
1877 if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
1878 /*
1879 * We do not support removing keys after AP shutdown.
1880 * Pretend we do to make mac80211 happy.
1881 */
1882 if (action != KEY_ADD_OR_REPLACE)
1883 return 0;
1884
1885 ret = wl1271_record_ap_key(wl, id,
1886 key_type, key_size,
1887 key, hlid, tx_seq_32,
1888 tx_seq_16);
1889 } else {
1890 ret = wl1271_cmd_set_ap_key(wl, action,
1891 id, key_type, key_size,
1892 key, hlid, tx_seq_32,
1893 tx_seq_16);
1894 }
1895
1896 if (ret < 0)
1897 return ret;
1898 } else {
1899 const u8 *addr;
1900 static const u8 bcast_addr[ETH_ALEN] = {
1901 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
1902 };
1903
1904 addr = sta ? sta->addr : bcast_addr;
1905
1906 if (is_zero_ether_addr(addr)) {
1907 /* We dont support TX only encryption */
1908 return -EOPNOTSUPP;
1909 }
1910
1911 /* The wl1271 does not allow to remove unicast keys - they
1912 will be cleared automatically on next CMD_JOIN. Ignore the
1913 request silently, as we dont want the mac80211 to emit
1914 an error message. */
1915 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
1916 return 0;
1917
1918 ret = wl1271_cmd_set_sta_key(wl, action,
1919 id, key_type, key_size,
1920 key, addr, tx_seq_32,
1921 tx_seq_16);
1922 if (ret < 0)
1923 return ret;
1924
1925 /* the default WEP key needs to be configured at least once */
1926 if (key_type == KEY_WEP) {
1927 ret = wl1271_cmd_set_sta_default_wep_key(wl,
1928 wl->default_key);
1929 if (ret < 0)
1930 return ret;
1931 }
1932 }
1933
1934 return 0;
1935}
1936
1596static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 1937static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1597 struct ieee80211_vif *vif, 1938 struct ieee80211_vif *vif,
1598 struct ieee80211_sta *sta, 1939 struct ieee80211_sta *sta,
1599 struct ieee80211_key_conf *key_conf) 1940 struct ieee80211_key_conf *key_conf)
1600{ 1941{
1601 struct wl1271 *wl = hw->priv; 1942 struct wl1271 *wl = hw->priv;
1602 const u8 *addr;
1603 int ret; 1943 int ret;
1604 u32 tx_seq_32 = 0; 1944 u32 tx_seq_32 = 0;
1605 u16 tx_seq_16 = 0; 1945 u16 tx_seq_16 = 0;
1606 u8 key_type; 1946 u8 key_type;
1607 1947
1608 static const u8 bcast_addr[ETH_ALEN] =
1609 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1610
1611 wl1271_debug(DEBUG_MAC80211, "mac80211 set key"); 1948 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
1612 1949
1613 addr = sta ? sta->addr : bcast_addr; 1950 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
1614
1615 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x", cmd);
1616 wl1271_dump(DEBUG_CRYPT, "ADDR: ", addr, ETH_ALEN);
1617 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x", 1951 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
1618 key_conf->cipher, key_conf->keyidx, 1952 key_conf->cipher, key_conf->keyidx,
1619 key_conf->keylen, key_conf->flags); 1953 key_conf->keylen, key_conf->flags);
1620 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen); 1954 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
1621 1955
1622 if (is_zero_ether_addr(addr)) {
1623 /* We dont support TX only encryption */
1624 ret = -EOPNOTSUPP;
1625 goto out;
1626 }
1627
1628 mutex_lock(&wl->mutex); 1956 mutex_lock(&wl->mutex);
1629 1957
1630 if (unlikely(wl->state == WL1271_STATE_OFF)) { 1958 if (unlikely(wl->state == WL1271_STATE_OFF)) {
@@ -1632,7 +1960,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1632 goto out_unlock; 1960 goto out_unlock;
1633 } 1961 }
1634 1962
1635 ret = wl1271_ps_elp_wakeup(wl, false); 1963 ret = wl1271_ps_elp_wakeup(wl);
1636 if (ret < 0) 1964 if (ret < 0)
1637 goto out_unlock; 1965 goto out_unlock;
1638 1966
@@ -1671,36 +1999,21 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1671 1999
1672 switch (cmd) { 2000 switch (cmd) {
1673 case SET_KEY: 2001 case SET_KEY:
1674 ret = wl1271_cmd_set_key(wl, KEY_ADD_OR_REPLACE, 2002 ret = wl1271_set_key(wl, KEY_ADD_OR_REPLACE,
1675 key_conf->keyidx, key_type, 2003 key_conf->keyidx, key_type,
1676 key_conf->keylen, key_conf->key, 2004 key_conf->keylen, key_conf->key,
1677 addr, tx_seq_32, tx_seq_16); 2005 tx_seq_32, tx_seq_16, sta);
1678 if (ret < 0) { 2006 if (ret < 0) {
1679 wl1271_error("Could not add or replace key"); 2007 wl1271_error("Could not add or replace key");
1680 goto out_sleep; 2008 goto out_sleep;
1681 } 2009 }
1682
1683 /* the default WEP key needs to be configured at least once */
1684 if (key_type == KEY_WEP) {
1685 ret = wl1271_cmd_set_default_wep_key(wl,
1686 wl->default_key);
1687 if (ret < 0)
1688 goto out_sleep;
1689 }
1690 break; 2010 break;
1691 2011
1692 case DISABLE_KEY: 2012 case DISABLE_KEY:
1693 /* The wl1271 does not allow to remove unicast keys - they 2013 ret = wl1271_set_key(wl, KEY_REMOVE,
1694 will be cleared automatically on next CMD_JOIN. Ignore the 2014 key_conf->keyidx, key_type,
1695 request silently, as we dont want the mac80211 to emit 2015 key_conf->keylen, key_conf->key,
1696 an error message. */ 2016 0, 0, sta);
1697 if (!is_broadcast_ether_addr(addr))
1698 break;
1699
1700 ret = wl1271_cmd_set_key(wl, KEY_REMOVE,
1701 key_conf->keyidx, key_type,
1702 key_conf->keylen, key_conf->key,
1703 addr, 0, 0);
1704 if (ret < 0) { 2017 if (ret < 0) {
1705 wl1271_error("Could not remove key"); 2018 wl1271_error("Could not remove key");
1706 goto out_sleep; 2019 goto out_sleep;
@@ -1719,7 +2032,6 @@ out_sleep:
1719out_unlock: 2032out_unlock:
1720 mutex_unlock(&wl->mutex); 2033 mutex_unlock(&wl->mutex);
1721 2034
1722out:
1723 return ret; 2035 return ret;
1724} 2036}
1725 2037
@@ -1751,7 +2063,7 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
1751 goto out; 2063 goto out;
1752 } 2064 }
1753 2065
1754 ret = wl1271_ps_elp_wakeup(wl, false); 2066 ret = wl1271_ps_elp_wakeup(wl);
1755 if (ret < 0) 2067 if (ret < 0)
1756 goto out; 2068 goto out;
1757 2069
@@ -1777,7 +2089,7 @@ static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
1777 goto out; 2089 goto out;
1778 } 2090 }
1779 2091
1780 ret = wl1271_ps_elp_wakeup(wl, false); 2092 ret = wl1271_ps_elp_wakeup(wl);
1781 if (ret < 0) 2093 if (ret < 0)
1782 goto out; 2094 goto out;
1783 2095
@@ -1805,7 +2117,7 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
1805 goto out; 2117 goto out;
1806 } 2118 }
1807 2119
1808 ret = wl1271_ps_elp_wakeup(wl, false); 2120 ret = wl1271_ps_elp_wakeup(wl);
1809 if (ret < 0) 2121 if (ret < 0)
1810 goto out; 2122 goto out;
1811 2123
@@ -1821,7 +2133,7 @@ out:
1821 return ret; 2133 return ret;
1822} 2134}
1823 2135
1824static void wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb, 2136static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
1825 int offset) 2137 int offset)
1826{ 2138{
1827 u8 *ptr = skb->data + offset; 2139 u8 *ptr = skb->data + offset;
@@ -1831,89 +2143,213 @@ static void wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
1831 if (ptr[0] == WLAN_EID_SSID) { 2143 if (ptr[0] == WLAN_EID_SSID) {
1832 wl->ssid_len = ptr[1]; 2144 wl->ssid_len = ptr[1];
1833 memcpy(wl->ssid, ptr+2, wl->ssid_len); 2145 memcpy(wl->ssid, ptr+2, wl->ssid_len);
1834 return; 2146 return 0;
1835 } 2147 }
1836 ptr += (ptr[1] + 2); 2148 ptr += (ptr[1] + 2);
1837 } 2149 }
2150
1838 wl1271_error("No SSID in IEs!\n"); 2151 wl1271_error("No SSID in IEs!\n");
2152 return -ENOENT;
1839} 2153}
1840 2154
1841static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, 2155static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
1842 struct ieee80211_vif *vif,
1843 struct ieee80211_bss_conf *bss_conf, 2156 struct ieee80211_bss_conf *bss_conf,
1844 u32 changed) 2157 u32 changed)
1845{ 2158{
1846 enum wl1271_cmd_ps_mode mode; 2159 int ret = 0;
1847 struct wl1271 *wl = hw->priv;
1848 struct ieee80211_sta *sta = ieee80211_find_sta(vif, bss_conf->bssid);
1849 bool do_join = false;
1850 bool set_assoc = false;
1851 int ret;
1852 2160
1853 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed"); 2161 if (changed & BSS_CHANGED_ERP_SLOT) {
2162 if (bss_conf->use_short_slot)
2163 ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT);
2164 else
2165 ret = wl1271_acx_slot(wl, SLOT_TIME_LONG);
2166 if (ret < 0) {
2167 wl1271_warning("Set slot time failed %d", ret);
2168 goto out;
2169 }
2170 }
1854 2171
1855 mutex_lock(&wl->mutex); 2172 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
2173 if (bss_conf->use_short_preamble)
2174 wl1271_acx_set_preamble(wl, ACX_PREAMBLE_SHORT);
2175 else
2176 wl1271_acx_set_preamble(wl, ACX_PREAMBLE_LONG);
2177 }
1856 2178
1857 if (unlikely(wl->state == WL1271_STATE_OFF)) 2179 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
1858 goto out; 2180 if (bss_conf->use_cts_prot)
2181 ret = wl1271_acx_cts_protect(wl, CTSPROTECT_ENABLE);
2182 else
2183 ret = wl1271_acx_cts_protect(wl, CTSPROTECT_DISABLE);
2184 if (ret < 0) {
2185 wl1271_warning("Set ctsprotect failed %d", ret);
2186 goto out;
2187 }
2188 }
1859 2189
1860 ret = wl1271_ps_elp_wakeup(wl, false); 2190out:
1861 if (ret < 0) 2191 return ret;
1862 goto out; 2192}
2193
2194static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
2195 struct ieee80211_vif *vif,
2196 struct ieee80211_bss_conf *bss_conf,
2197 u32 changed)
2198{
2199 bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
2200 int ret = 0;
1863 2201
1864 if ((changed & BSS_CHANGED_BEACON_INT) && 2202 if ((changed & BSS_CHANGED_BEACON_INT)) {
1865 (wl->bss_type == BSS_TYPE_IBSS)) { 2203 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
1866 wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon interval updated: %d",
1867 bss_conf->beacon_int); 2204 bss_conf->beacon_int);
1868 2205
1869 wl->beacon_int = bss_conf->beacon_int; 2206 wl->beacon_int = bss_conf->beacon_int;
1870 do_join = true;
1871 } 2207 }
1872 2208
1873 if ((changed & BSS_CHANGED_BEACON) && 2209 if ((changed & BSS_CHANGED_BEACON)) {
1874 (wl->bss_type == BSS_TYPE_IBSS)) { 2210 struct ieee80211_hdr *hdr;
1875 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif); 2211 int ieoffset = offsetof(struct ieee80211_mgmt,
2212 u.beacon.variable);
2213 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
2214 u16 tmpl_id;
1876 2215
1877 wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon updated"); 2216 if (!beacon)
2217 goto out;
1878 2218
1879 if (beacon) { 2219 wl1271_debug(DEBUG_MASTER, "beacon updated");
1880 struct ieee80211_hdr *hdr;
1881 int ieoffset = offsetof(struct ieee80211_mgmt,
1882 u.beacon.variable);
1883 2220
1884 wl1271_ssid_set(wl, beacon, ieoffset); 2221 ret = wl1271_ssid_set(wl, beacon, ieoffset);
2222 if (ret < 0) {
2223 dev_kfree_skb(beacon);
2224 goto out;
2225 }
2226 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
2227 CMD_TEMPL_BEACON;
2228 ret = wl1271_cmd_template_set(wl, tmpl_id,
2229 beacon->data,
2230 beacon->len, 0,
2231 wl1271_tx_min_rate_get(wl));
2232 if (ret < 0) {
2233 dev_kfree_skb(beacon);
2234 goto out;
2235 }
1885 2236
1886 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON, 2237 hdr = (struct ieee80211_hdr *) beacon->data;
1887 beacon->data, 2238 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1888 beacon->len, 0, 2239 IEEE80211_STYPE_PROBE_RESP);
1889 wl1271_min_rate_get(wl)); 2240
2241 tmpl_id = is_ap ? CMD_TEMPL_AP_PROBE_RESPONSE :
2242 CMD_TEMPL_PROBE_RESPONSE;
2243 ret = wl1271_cmd_template_set(wl,
2244 tmpl_id,
2245 beacon->data,
2246 beacon->len, 0,
2247 wl1271_tx_min_rate_get(wl));
2248 dev_kfree_skb(beacon);
2249 if (ret < 0)
2250 goto out;
2251 }
1890 2252
1891 if (ret < 0) { 2253out:
1892 dev_kfree_skb(beacon); 2254 return ret;
1893 goto out_sleep; 2255}
1894 }
1895 2256
1896 hdr = (struct ieee80211_hdr *) beacon->data; 2257/* AP mode changes */
1897 hdr->frame_control = cpu_to_le16( 2258static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
1898 IEEE80211_FTYPE_MGMT | 2259 struct ieee80211_vif *vif,
1899 IEEE80211_STYPE_PROBE_RESP); 2260 struct ieee80211_bss_conf *bss_conf,
2261 u32 changed)
2262{
2263 int ret = 0;
1900 2264
1901 ret = wl1271_cmd_template_set(wl, 2265 if ((changed & BSS_CHANGED_BASIC_RATES)) {
1902 CMD_TEMPL_PROBE_RESPONSE, 2266 u32 rates = bss_conf->basic_rates;
1903 beacon->data, 2267 struct conf_tx_rate_class mgmt_rc;
1904 beacon->len, 0, 2268
1905 wl1271_min_rate_get(wl)); 2269 wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates);
1906 dev_kfree_skb(beacon); 2270 wl->basic_rate = wl1271_tx_min_rate_get(wl);
1907 if (ret < 0) 2271 wl1271_debug(DEBUG_AP, "basic rates: 0x%x",
1908 goto out_sleep; 2272 wl->basic_rate_set);
2273
2274 /* update the AP management rate policy with the new rates */
2275 mgmt_rc.enabled_rates = wl->basic_rate_set;
2276 mgmt_rc.long_retry_limit = 10;
2277 mgmt_rc.short_retry_limit = 10;
2278 mgmt_rc.aflags = 0;
2279 ret = wl1271_acx_ap_rate_policy(wl, &mgmt_rc,
2280 ACX_TX_AP_MODE_MGMT_RATE);
2281 if (ret < 0) {
2282 wl1271_error("AP mgmt policy change failed %d", ret);
2283 goto out;
2284 }
2285 }
1909 2286
1910 /* Need to update the SSID (for filtering etc) */ 2287 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
1911 do_join = true; 2288 if (ret < 0)
2289 goto out;
2290
2291 if ((changed & BSS_CHANGED_BEACON_ENABLED)) {
2292 if (bss_conf->enable_beacon) {
2293 if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
2294 ret = wl1271_cmd_start_bss(wl);
2295 if (ret < 0)
2296 goto out;
2297
2298 set_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
2299 wl1271_debug(DEBUG_AP, "started AP");
2300
2301 ret = wl1271_ap_init_hwenc(wl);
2302 if (ret < 0)
2303 goto out;
2304 }
2305 } else {
2306 if (test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
2307 ret = wl1271_cmd_stop_bss(wl);
2308 if (ret < 0)
2309 goto out;
2310
2311 clear_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
2312 wl1271_debug(DEBUG_AP, "stopped AP");
2313 }
1912 } 2314 }
1913 } 2315 }
1914 2316
1915 if ((changed & BSS_CHANGED_BEACON_ENABLED) && 2317 ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
1916 (wl->bss_type == BSS_TYPE_IBSS)) { 2318 if (ret < 0)
2319 goto out;
2320out:
2321 return;
2322}
2323
2324/* STA/IBSS mode changes */
2325static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
2326 struct ieee80211_vif *vif,
2327 struct ieee80211_bss_conf *bss_conf,
2328 u32 changed)
2329{
2330 bool do_join = false, set_assoc = false;
2331 bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
2332 u32 sta_rate_set = 0;
2333 int ret;
2334 struct ieee80211_sta *sta;
2335 bool sta_exists = false;
2336 struct ieee80211_sta_ht_cap sta_ht_cap;
2337
2338 if (is_ibss) {
2339 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
2340 changed);
2341 if (ret < 0)
2342 goto out;
2343 }
2344
2345 if ((changed & BSS_CHANGED_BEACON_INT) && is_ibss)
2346 do_join = true;
2347
2348 /* Need to update the SSID (for filtering etc) */
2349 if ((changed & BSS_CHANGED_BEACON) && is_ibss)
2350 do_join = true;
2351
2352 if ((changed & BSS_CHANGED_BEACON_ENABLED) && is_ibss) {
1917 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s", 2353 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
1918 bss_conf->enable_beacon ? "enabled" : "disabled"); 2354 bss_conf->enable_beacon ? "enabled" : "disabled");
1919 2355
@@ -1924,7 +2360,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1924 do_join = true; 2360 do_join = true;
1925 } 2361 }
1926 2362
1927 if (changed & BSS_CHANGED_CQM) { 2363 if ((changed & BSS_CHANGED_CQM)) {
1928 bool enable = false; 2364 bool enable = false;
1929 if (bss_conf->cqm_rssi_thold) 2365 if (bss_conf->cqm_rssi_thold)
1930 enable = true; 2366 enable = true;
@@ -1942,24 +2378,70 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1942 * and enable the BSSID filter 2378 * and enable the BSSID filter
1943 */ 2379 */
1944 memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) { 2380 memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
1945 memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN); 2381 memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
1946 2382
2383 if (!is_zero_ether_addr(wl->bssid)) {
1947 ret = wl1271_cmd_build_null_data(wl); 2384 ret = wl1271_cmd_build_null_data(wl);
1948 if (ret < 0) 2385 if (ret < 0)
1949 goto out_sleep; 2386 goto out;
1950 2387
1951 ret = wl1271_build_qos_null_data(wl); 2388 ret = wl1271_build_qos_null_data(wl);
1952 if (ret < 0) 2389 if (ret < 0)
1953 goto out_sleep; 2390 goto out;
1954 2391
1955 /* filter out all packets not from this BSSID */ 2392 /* filter out all packets not from this BSSID */
1956 wl1271_configure_filters(wl, 0); 2393 wl1271_configure_filters(wl, 0);
1957 2394
1958 /* Need to update the BSSID (for filtering etc) */ 2395 /* Need to update the BSSID (for filtering etc) */
1959 do_join = true; 2396 do_join = true;
2397 }
1960 } 2398 }
1961 2399
1962 if (changed & BSS_CHANGED_ASSOC) { 2400 rcu_read_lock();
2401 sta = ieee80211_find_sta(vif, bss_conf->bssid);
2402 if (sta) {
2403 /* save the supp_rates of the ap */
2404 sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band];
2405 if (sta->ht_cap.ht_supported)
2406 sta_rate_set |=
2407 (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET);
2408 sta_ht_cap = sta->ht_cap;
2409 sta_exists = true;
2410 }
2411 rcu_read_unlock();
2412
2413 if (sta_exists) {
2414 /* handle new association with HT and HT information change */
2415 if ((changed & BSS_CHANGED_HT) &&
2416 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
2417 ret = wl1271_acx_set_ht_capabilities(wl, &sta_ht_cap,
2418 true);
2419 if (ret < 0) {
2420 wl1271_warning("Set ht cap true failed %d",
2421 ret);
2422 goto out;
2423 }
2424 ret = wl1271_acx_set_ht_information(wl,
2425 bss_conf->ht_operation_mode);
2426 if (ret < 0) {
2427 wl1271_warning("Set ht information failed %d",
2428 ret);
2429 goto out;
2430 }
2431 }
2432 /* handle new association without HT and disassociation */
2433 else if (changed & BSS_CHANGED_ASSOC) {
2434 ret = wl1271_acx_set_ht_capabilities(wl, &sta_ht_cap,
2435 false);
2436 if (ret < 0) {
2437 wl1271_warning("Set ht cap false failed %d",
2438 ret);
2439 goto out;
2440 }
2441 }
2442 }
2443
2444 if ((changed & BSS_CHANGED_ASSOC)) {
1963 if (bss_conf->assoc) { 2445 if (bss_conf->assoc) {
1964 u32 rates; 2446 u32 rates;
1965 int ieoffset; 2447 int ieoffset;
@@ -1975,10 +2457,13 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1975 rates = bss_conf->basic_rates; 2457 rates = bss_conf->basic_rates;
1976 wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl, 2458 wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl,
1977 rates); 2459 rates);
1978 wl->basic_rate = wl1271_min_rate_get(wl); 2460 wl->basic_rate = wl1271_tx_min_rate_get(wl);
1979 ret = wl1271_acx_rate_policies(wl); 2461 if (sta_rate_set)
2462 wl->rate_set = wl1271_tx_enabled_rates_get(wl,
2463 sta_rate_set);
2464 ret = wl1271_acx_sta_rate_policies(wl);
1980 if (ret < 0) 2465 if (ret < 0)
1981 goto out_sleep; 2466 goto out;
1982 2467
1983 /* 2468 /*
1984 * with wl1271, we don't need to update the 2469 * with wl1271, we don't need to update the
@@ -1988,7 +2473,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1988 */ 2473 */
1989 ret = wl1271_cmd_build_ps_poll(wl, wl->aid); 2474 ret = wl1271_cmd_build_ps_poll(wl, wl->aid);
1990 if (ret < 0) 2475 if (ret < 0)
1991 goto out_sleep; 2476 goto out;
1992 2477
1993 /* 2478 /*
1994 * Get a template for hardware connection maintenance 2479 * Get a template for hardware connection maintenance
@@ -2002,17 +2487,19 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
2002 /* enable the connection monitoring feature */ 2487 /* enable the connection monitoring feature */
2003 ret = wl1271_acx_conn_monit_params(wl, true); 2488 ret = wl1271_acx_conn_monit_params(wl, true);
2004 if (ret < 0) 2489 if (ret < 0)
2005 goto out_sleep; 2490 goto out;
2006 2491
2007 /* If we want to go in PSM but we're not there yet */ 2492 /* If we want to go in PSM but we're not there yet */
2008 if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) && 2493 if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) &&
2009 !test_bit(WL1271_FLAG_PSM, &wl->flags)) { 2494 !test_bit(WL1271_FLAG_PSM, &wl->flags)) {
2495 enum wl1271_cmd_ps_mode mode;
2496
2010 mode = STATION_POWER_SAVE_MODE; 2497 mode = STATION_POWER_SAVE_MODE;
2011 ret = wl1271_ps_set_mode(wl, mode, 2498 ret = wl1271_ps_set_mode(wl, mode,
2012 wl->basic_rate, 2499 wl->basic_rate,
2013 true); 2500 true);
2014 if (ret < 0) 2501 if (ret < 0)
2015 goto out_sleep; 2502 goto out;
2016 } 2503 }
2017 } else { 2504 } else {
2018 /* use defaults when not associated */ 2505 /* use defaults when not associated */
@@ -2029,10 +2516,10 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
2029 2516
2030 /* revert back to minimum rates for the current band */ 2517 /* revert back to minimum rates for the current band */
2031 wl1271_set_band_rate(wl); 2518 wl1271_set_band_rate(wl);
2032 wl->basic_rate = wl1271_min_rate_get(wl); 2519 wl->basic_rate = wl1271_tx_min_rate_get(wl);
2033 ret = wl1271_acx_rate_policies(wl); 2520 ret = wl1271_acx_sta_rate_policies(wl);
2034 if (ret < 0) 2521 if (ret < 0)
2035 goto out_sleep; 2522 goto out;
2036 2523
2037 /* disable connection monitor features */ 2524 /* disable connection monitor features */
2038 ret = wl1271_acx_conn_monit_params(wl, false); 2525 ret = wl1271_acx_conn_monit_params(wl, false);
@@ -2040,74 +2527,17 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
2040 /* Disable the keep-alive feature */ 2527 /* Disable the keep-alive feature */
2041 ret = wl1271_acx_keep_alive_mode(wl, false); 2528 ret = wl1271_acx_keep_alive_mode(wl, false);
2042 if (ret < 0) 2529 if (ret < 0)
2043 goto out_sleep; 2530 goto out;
2044 2531
2045 /* restore the bssid filter and go to dummy bssid */ 2532 /* restore the bssid filter and go to dummy bssid */
2046 wl1271_unjoin(wl); 2533 wl1271_unjoin(wl);
2047 wl1271_dummy_join(wl); 2534 wl1271_dummy_join(wl);
2048 } 2535 }
2049
2050 }
2051
2052 if (changed & BSS_CHANGED_ERP_SLOT) {
2053 if (bss_conf->use_short_slot)
2054 ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT);
2055 else
2056 ret = wl1271_acx_slot(wl, SLOT_TIME_LONG);
2057 if (ret < 0) {
2058 wl1271_warning("Set slot time failed %d", ret);
2059 goto out_sleep;
2060 }
2061 } 2536 }
2062 2537
2063 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 2538 ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
2064 if (bss_conf->use_short_preamble) 2539 if (ret < 0)
2065 wl1271_acx_set_preamble(wl, ACX_PREAMBLE_SHORT); 2540 goto out;
2066 else
2067 wl1271_acx_set_preamble(wl, ACX_PREAMBLE_LONG);
2068 }
2069
2070 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
2071 if (bss_conf->use_cts_prot)
2072 ret = wl1271_acx_cts_protect(wl, CTSPROTECT_ENABLE);
2073 else
2074 ret = wl1271_acx_cts_protect(wl, CTSPROTECT_DISABLE);
2075 if (ret < 0) {
2076 wl1271_warning("Set ctsprotect failed %d", ret);
2077 goto out_sleep;
2078 }
2079 }
2080
2081 /*
2082 * Takes care of: New association with HT enable,
2083 * HT information change in beacon.
2084 */
2085 if (sta &&
2086 (changed & BSS_CHANGED_HT) &&
2087 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
2088 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true);
2089 if (ret < 0) {
2090 wl1271_warning("Set ht cap true failed %d", ret);
2091 goto out_sleep;
2092 }
2093 ret = wl1271_acx_set_ht_information(wl,
2094 bss_conf->ht_operation_mode);
2095 if (ret < 0) {
2096 wl1271_warning("Set ht information failed %d", ret);
2097 goto out_sleep;
2098 }
2099 }
2100 /*
2101 * Takes care of: New association without HT,
2102 * Disassociation.
2103 */
2104 else if (sta && (changed & BSS_CHANGED_ASSOC)) {
2105 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, false);
2106 if (ret < 0) {
2107 wl1271_warning("Set ht cap false failed %d", ret);
2108 goto out_sleep;
2109 }
2110 }
2111 2541
2112 if (changed & BSS_CHANGED_ARP_FILTER) { 2542 if (changed & BSS_CHANGED_ARP_FILTER) {
2113 __be32 addr = bss_conf->arp_addr_list[0]; 2543 __be32 addr = bss_conf->arp_addr_list[0];
@@ -2124,29 +2554,57 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
2124 ret = wl1271_cmd_build_arp_rsp(wl, addr); 2554 ret = wl1271_cmd_build_arp_rsp(wl, addr);
2125 if (ret < 0) { 2555 if (ret < 0) {
2126 wl1271_warning("build arp rsp failed: %d", ret); 2556 wl1271_warning("build arp rsp failed: %d", ret);
2127 goto out_sleep; 2557 goto out;
2128 } 2558 }
2129 2559
2130 ret = wl1271_acx_arp_ip_filter(wl, 2560 ret = wl1271_acx_arp_ip_filter(wl,
2131 (ACX_ARP_FILTER_ARP_FILTERING | 2561 ACX_ARP_FILTER_ARP_FILTERING,
2132 ACX_ARP_FILTER_AUTO_ARP),
2133 addr); 2562 addr);
2134 } else 2563 } else
2135 ret = wl1271_acx_arp_ip_filter(wl, 0, addr); 2564 ret = wl1271_acx_arp_ip_filter(wl, 0, addr);
2136 2565
2137 if (ret < 0) 2566 if (ret < 0)
2138 goto out_sleep; 2567 goto out;
2139 } 2568 }
2140 2569
2141 if (do_join) { 2570 if (do_join) {
2142 ret = wl1271_join(wl, set_assoc); 2571 ret = wl1271_join(wl, set_assoc);
2143 if (ret < 0) { 2572 if (ret < 0) {
2144 wl1271_warning("cmd join failed %d", ret); 2573 wl1271_warning("cmd join failed %d", ret);
2145 goto out_sleep; 2574 goto out;
2146 } 2575 }
2147 } 2576 }
2148 2577
2149out_sleep: 2578out:
2579 return;
2580}
2581
2582static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
2583 struct ieee80211_vif *vif,
2584 struct ieee80211_bss_conf *bss_conf,
2585 u32 changed)
2586{
2587 struct wl1271 *wl = hw->priv;
2588 bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
2589 int ret;
2590
2591 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
2592 (int)changed);
2593
2594 mutex_lock(&wl->mutex);
2595
2596 if (unlikely(wl->state == WL1271_STATE_OFF))
2597 goto out;
2598
2599 ret = wl1271_ps_elp_wakeup(wl);
2600 if (ret < 0)
2601 goto out;
2602
2603 if (is_ap)
2604 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
2605 else
2606 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
2607
2150 wl1271_ps_elp_sleep(wl); 2608 wl1271_ps_elp_sleep(wl);
2151 2609
2152out: 2610out:
@@ -2158,42 +2616,66 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
2158{ 2616{
2159 struct wl1271 *wl = hw->priv; 2617 struct wl1271 *wl = hw->priv;
2160 u8 ps_scheme; 2618 u8 ps_scheme;
2161 int ret; 2619 int ret = 0;
2162 2620
2163 mutex_lock(&wl->mutex); 2621 mutex_lock(&wl->mutex);
2164 2622
2165 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue); 2623 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
2166 2624
2167 if (unlikely(wl->state == WL1271_STATE_OFF)) {
2168 ret = -EAGAIN;
2169 goto out;
2170 }
2171
2172 ret = wl1271_ps_elp_wakeup(wl, false);
2173 if (ret < 0)
2174 goto out;
2175
2176 /* the txop is confed in units of 32us by the mac80211, we need us */
2177 ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
2178 params->cw_min, params->cw_max,
2179 params->aifs, params->txop << 5);
2180 if (ret < 0)
2181 goto out_sleep;
2182
2183 if (params->uapsd) 2625 if (params->uapsd)
2184 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER; 2626 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
2185 else 2627 else
2186 ps_scheme = CONF_PS_SCHEME_LEGACY; 2628 ps_scheme = CONF_PS_SCHEME_LEGACY;
2187 2629
2188 ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue), 2630 if (wl->state == WL1271_STATE_OFF) {
2189 CONF_CHANNEL_TYPE_EDCF, 2631 /*
2190 wl1271_tx_get_queue(queue), 2632 * If the state is off, the parameters will be recorded and
2191 ps_scheme, CONF_ACK_POLICY_LEGACY, 0, 0); 2633 * configured on init. This happens in AP-mode.
2192 if (ret < 0) 2634 */
2193 goto out_sleep; 2635 struct conf_tx_ac_category *conf_ac =
2636 &wl->conf.tx.ac_conf[wl1271_tx_get_queue(queue)];
2637 struct conf_tx_tid *conf_tid =
2638 &wl->conf.tx.tid_conf[wl1271_tx_get_queue(queue)];
2639
2640 conf_ac->ac = wl1271_tx_get_queue(queue);
2641 conf_ac->cw_min = (u8)params->cw_min;
2642 conf_ac->cw_max = params->cw_max;
2643 conf_ac->aifsn = params->aifs;
2644 conf_ac->tx_op_limit = params->txop << 5;
2645
2646 conf_tid->queue_id = wl1271_tx_get_queue(queue);
2647 conf_tid->channel_type = CONF_CHANNEL_TYPE_EDCF;
2648 conf_tid->tsid = wl1271_tx_get_queue(queue);
2649 conf_tid->ps_scheme = ps_scheme;
2650 conf_tid->ack_policy = CONF_ACK_POLICY_LEGACY;
2651 conf_tid->apsd_conf[0] = 0;
2652 conf_tid->apsd_conf[1] = 0;
2653 } else {
2654 ret = wl1271_ps_elp_wakeup(wl);
2655 if (ret < 0)
2656 goto out;
2657
2658 /*
2659 * the txop is confed in units of 32us by the mac80211,
2660 * we need us
2661 */
2662 ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
2663 params->cw_min, params->cw_max,
2664 params->aifs, params->txop << 5);
2665 if (ret < 0)
2666 goto out_sleep;
2667
2668 ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue),
2669 CONF_CHANNEL_TYPE_EDCF,
2670 wl1271_tx_get_queue(queue),
2671 ps_scheme, CONF_ACK_POLICY_LEGACY,
2672 0, 0);
2673 if (ret < 0)
2674 goto out_sleep;
2194 2675
2195out_sleep: 2676out_sleep:
2196 wl1271_ps_elp_sleep(wl); 2677 wl1271_ps_elp_sleep(wl);
2678 }
2197 2679
2198out: 2680out:
2199 mutex_unlock(&wl->mutex); 2681 mutex_unlock(&wl->mutex);
@@ -2215,7 +2697,7 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw)
2215 if (unlikely(wl->state == WL1271_STATE_OFF)) 2697 if (unlikely(wl->state == WL1271_STATE_OFF))
2216 goto out; 2698 goto out;
2217 2699
2218 ret = wl1271_ps_elp_wakeup(wl, false); 2700 ret = wl1271_ps_elp_wakeup(wl);
2219 if (ret < 0) 2701 if (ret < 0)
2220 goto out; 2702 goto out;
2221 2703
@@ -2247,6 +2729,184 @@ static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
2247 return 0; 2729 return 0;
2248} 2730}
2249 2731
2732static int wl1271_allocate_sta(struct wl1271 *wl,
2733 struct ieee80211_sta *sta,
2734 u8 *hlid)
2735{
2736 struct wl1271_station *wl_sta;
2737 int id;
2738
2739 id = find_first_zero_bit(wl->ap_hlid_map, AP_MAX_STATIONS);
2740 if (id >= AP_MAX_STATIONS) {
2741 wl1271_warning("could not allocate HLID - too much stations");
2742 return -EBUSY;
2743 }
2744
2745 wl_sta = (struct wl1271_station *)sta->drv_priv;
2746 __set_bit(id, wl->ap_hlid_map);
2747 wl_sta->hlid = WL1271_AP_STA_HLID_START + id;
2748 *hlid = wl_sta->hlid;
2749 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
2750 return 0;
2751}
2752
2753static void wl1271_free_sta(struct wl1271 *wl, u8 hlid)
2754{
2755 int id = hlid - WL1271_AP_STA_HLID_START;
2756
2757 if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
2758 return;
2759
2760 __clear_bit(id, wl->ap_hlid_map);
2761 memset(wl->links[hlid].addr, 0, ETH_ALEN);
2762 wl1271_tx_reset_link_queues(wl, hlid);
2763 __clear_bit(hlid, &wl->ap_ps_map);
2764 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
2765}
2766
2767static int wl1271_op_sta_add(struct ieee80211_hw *hw,
2768 struct ieee80211_vif *vif,
2769 struct ieee80211_sta *sta)
2770{
2771 struct wl1271 *wl = hw->priv;
2772 int ret = 0;
2773 u8 hlid;
2774
2775 mutex_lock(&wl->mutex);
2776
2777 if (unlikely(wl->state == WL1271_STATE_OFF))
2778 goto out;
2779
2780 if (wl->bss_type != BSS_TYPE_AP_BSS)
2781 goto out;
2782
2783 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
2784
2785 ret = wl1271_allocate_sta(wl, sta, &hlid);
2786 if (ret < 0)
2787 goto out;
2788
2789 ret = wl1271_ps_elp_wakeup(wl);
2790 if (ret < 0)
2791 goto out_free_sta;
2792
2793 ret = wl1271_cmd_add_sta(wl, sta, hlid);
2794 if (ret < 0)
2795 goto out_sleep;
2796
2797out_sleep:
2798 wl1271_ps_elp_sleep(wl);
2799
2800out_free_sta:
2801 if (ret < 0)
2802 wl1271_free_sta(wl, hlid);
2803
2804out:
2805 mutex_unlock(&wl->mutex);
2806 return ret;
2807}
2808
2809static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
2810 struct ieee80211_vif *vif,
2811 struct ieee80211_sta *sta)
2812{
2813 struct wl1271 *wl = hw->priv;
2814 struct wl1271_station *wl_sta;
2815 int ret = 0, id;
2816
2817 mutex_lock(&wl->mutex);
2818
2819 if (unlikely(wl->state == WL1271_STATE_OFF))
2820 goto out;
2821
2822 if (wl->bss_type != BSS_TYPE_AP_BSS)
2823 goto out;
2824
2825 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
2826
2827 wl_sta = (struct wl1271_station *)sta->drv_priv;
2828 id = wl_sta->hlid - WL1271_AP_STA_HLID_START;
2829 if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
2830 goto out;
2831
2832 ret = wl1271_ps_elp_wakeup(wl);
2833 if (ret < 0)
2834 goto out;
2835
2836 ret = wl1271_cmd_remove_sta(wl, wl_sta->hlid);
2837 if (ret < 0)
2838 goto out_sleep;
2839
2840 wl1271_free_sta(wl, wl_sta->hlid);
2841
2842out_sleep:
2843 wl1271_ps_elp_sleep(wl);
2844
2845out:
2846 mutex_unlock(&wl->mutex);
2847 return ret;
2848}
2849
2850int wl1271_op_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2851 enum ieee80211_ampdu_mlme_action action,
2852 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
2853 u8 buf_size)
2854{
2855 struct wl1271 *wl = hw->priv;
2856 int ret;
2857
2858 mutex_lock(&wl->mutex);
2859
2860 if (unlikely(wl->state == WL1271_STATE_OFF)) {
2861 ret = -EAGAIN;
2862 goto out;
2863 }
2864
2865 ret = wl1271_ps_elp_wakeup(wl);
2866 if (ret < 0)
2867 goto out;
2868
2869 switch (action) {
2870 case IEEE80211_AMPDU_RX_START:
2871 if (wl->ba_support) {
2872 ret = wl1271_acx_set_ba_receiver_session(wl, tid, *ssn,
2873 true);
2874 if (!ret)
2875 wl->ba_rx_bitmap |= BIT(tid);
2876 } else {
2877 ret = -ENOTSUPP;
2878 }
2879 break;
2880
2881 case IEEE80211_AMPDU_RX_STOP:
2882 ret = wl1271_acx_set_ba_receiver_session(wl, tid, 0, false);
2883 if (!ret)
2884 wl->ba_rx_bitmap &= ~BIT(tid);
2885 break;
2886
2887 /*
2888 * The BA initiator session management in FW independently.
2889 * Falling break here on purpose for all TX APDU commands.
2890 */
2891 case IEEE80211_AMPDU_TX_START:
2892 case IEEE80211_AMPDU_TX_STOP:
2893 case IEEE80211_AMPDU_TX_OPERATIONAL:
2894 ret = -EINVAL;
2895 break;
2896
2897 default:
2898 wl1271_error("Incorrect ampdu action id=%x\n", action);
2899 ret = -EINVAL;
2900 }
2901
2902 wl1271_ps_elp_sleep(wl);
2903
2904out:
2905 mutex_unlock(&wl->mutex);
2906
2907 return ret;
2908}
2909
2250/* can't be const, mac80211 writes to this */ 2910/* can't be const, mac80211 writes to this */
2251static struct ieee80211_rate wl1271_rates[] = { 2911static struct ieee80211_rate wl1271_rates[] = {
2252 { .bitrate = 10, 2912 { .bitrate = 10,
@@ -2305,6 +2965,7 @@ static struct ieee80211_channel wl1271_channels[] = {
2305 { .hw_value = 11, .center_freq = 2462, .max_power = 25 }, 2965 { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
2306 { .hw_value = 12, .center_freq = 2467, .max_power = 25 }, 2966 { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
2307 { .hw_value = 13, .center_freq = 2472, .max_power = 25 }, 2967 { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
2968 { .hw_value = 14, .center_freq = 2484, .max_power = 25 },
2308}; 2969};
2309 2970
2310/* mapping to indexes for wl1271_rates */ 2971/* mapping to indexes for wl1271_rates */
@@ -2493,6 +3154,9 @@ static const struct ieee80211_ops wl1271_ops = {
2493 .conf_tx = wl1271_op_conf_tx, 3154 .conf_tx = wl1271_op_conf_tx,
2494 .get_tsf = wl1271_op_get_tsf, 3155 .get_tsf = wl1271_op_get_tsf,
2495 .get_survey = wl1271_op_get_survey, 3156 .get_survey = wl1271_op_get_survey,
3157 .sta_add = wl1271_op_sta_add,
3158 .sta_remove = wl1271_op_sta_remove,
3159 .ampdu_action = wl1271_op_ampdu_action,
2496 CFG80211_TESTMODE_CMD(wl1271_tm_cmd) 3160 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
2497}; 3161};
2498 3162
@@ -2562,7 +3226,7 @@ static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
2562 if (wl->state == WL1271_STATE_OFF) 3226 if (wl->state == WL1271_STATE_OFF)
2563 goto out; 3227 goto out;
2564 3228
2565 ret = wl1271_ps_elp_wakeup(wl, false); 3229 ret = wl1271_ps_elp_wakeup(wl);
2566 if (ret < 0) 3230 if (ret < 0)
2567 goto out; 3231 goto out;
2568 3232
@@ -2607,6 +3271,18 @@ int wl1271_register_hw(struct wl1271 *wl)
2607 if (wl->mac80211_registered) 3271 if (wl->mac80211_registered)
2608 return 0; 3272 return 0;
2609 3273
3274 ret = wl1271_fetch_nvs(wl);
3275 if (ret == 0) {
3276 u8 *nvs_ptr = (u8 *)wl->nvs->nvs;
3277
3278 wl->mac_addr[0] = nvs_ptr[11];
3279 wl->mac_addr[1] = nvs_ptr[10];
3280 wl->mac_addr[2] = nvs_ptr[6];
3281 wl->mac_addr[3] = nvs_ptr[5];
3282 wl->mac_addr[4] = nvs_ptr[4];
3283 wl->mac_addr[5] = nvs_ptr[3];
3284 }
3285
2610 SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr); 3286 SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
2611 3287
2612 ret = ieee80211_register_hw(wl->hw); 3288 ret = ieee80211_register_hw(wl->hw);
@@ -2629,6 +3305,9 @@ EXPORT_SYMBOL_GPL(wl1271_register_hw);
2629 3305
2630void wl1271_unregister_hw(struct wl1271 *wl) 3306void wl1271_unregister_hw(struct wl1271 *wl)
2631{ 3307{
3308 if (wl->state == WL1271_STATE_PLT)
3309 __wl1271_plt_stop(wl);
3310
2632 unregister_netdevice_notifier(&wl1271_dev_notifier); 3311 unregister_netdevice_notifier(&wl1271_dev_notifier);
2633 ieee80211_unregister_hw(wl->hw); 3312 ieee80211_unregister_hw(wl->hw);
2634 wl->mac80211_registered = false; 3313 wl->mac80211_registered = false;
@@ -2661,13 +3340,15 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
2661 IEEE80211_HW_SUPPORTS_UAPSD | 3340 IEEE80211_HW_SUPPORTS_UAPSD |
2662 IEEE80211_HW_HAS_RATE_CONTROL | 3341 IEEE80211_HW_HAS_RATE_CONTROL |
2663 IEEE80211_HW_CONNECTION_MONITOR | 3342 IEEE80211_HW_CONNECTION_MONITOR |
2664 IEEE80211_HW_SUPPORTS_CQM_RSSI; 3343 IEEE80211_HW_SUPPORTS_CQM_RSSI |
3344 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
3345 IEEE80211_HW_AP_LINK_PS;
2665 3346
2666 wl->hw->wiphy->cipher_suites = cipher_suites; 3347 wl->hw->wiphy->cipher_suites = cipher_suites;
2667 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); 3348 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
2668 3349
2669 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 3350 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
2670 BIT(NL80211_IFTYPE_ADHOC); 3351 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
2671 wl->hw->wiphy->max_scan_ssids = 1; 3352 wl->hw->wiphy->max_scan_ssids = 1;
2672 /* 3353 /*
2673 * Maximum length of elements in scanning probe request templates 3354 * Maximum length of elements in scanning probe request templates
@@ -2676,8 +3357,20 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
2676 */ 3357 */
2677 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE - 3358 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
2678 sizeof(struct ieee80211_header); 3359 sizeof(struct ieee80211_header);
2679 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz; 3360
2680 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wl1271_band_5ghz; 3361 /*
3362 * We keep local copies of the band structs because we need to
3363 * modify them on a per-device basis.
3364 */
3365 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
3366 sizeof(wl1271_band_2ghz));
3367 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
3368 sizeof(wl1271_band_5ghz));
3369
3370 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
3371 &wl->bands[IEEE80211_BAND_2GHZ];
3372 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
3373 &wl->bands[IEEE80211_BAND_5GHZ];
2681 3374
2682 wl->hw->queues = 4; 3375 wl->hw->queues = 4;
2683 wl->hw->max_rates = 1; 3376 wl->hw->max_rates = 1;
@@ -2686,6 +3379,10 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
2686 3379
2687 SET_IEEE80211_DEV(wl->hw, wl1271_wl_to_dev(wl)); 3380 SET_IEEE80211_DEV(wl->hw, wl1271_wl_to_dev(wl));
2688 3381
3382 wl->hw->sta_data_size = sizeof(struct wl1271_station);
3383
3384 wl->hw->max_rx_aggregation_subframes = 8;
3385
2689 return 0; 3386 return 0;
2690} 3387}
2691EXPORT_SYMBOL_GPL(wl1271_init_ieee80211); 3388EXPORT_SYMBOL_GPL(wl1271_init_ieee80211);
@@ -2697,7 +3394,7 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
2697 struct ieee80211_hw *hw; 3394 struct ieee80211_hw *hw;
2698 struct platform_device *plat_dev = NULL; 3395 struct platform_device *plat_dev = NULL;
2699 struct wl1271 *wl; 3396 struct wl1271 *wl;
2700 int i, ret; 3397 int i, j, ret;
2701 unsigned int order; 3398 unsigned int order;
2702 3399
2703 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops); 3400 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
@@ -2725,9 +3422,16 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
2725 for (i = 0; i < NUM_TX_QUEUES; i++) 3422 for (i = 0; i < NUM_TX_QUEUES; i++)
2726 skb_queue_head_init(&wl->tx_queue[i]); 3423 skb_queue_head_init(&wl->tx_queue[i]);
2727 3424
3425 for (i = 0; i < NUM_TX_QUEUES; i++)
3426 for (j = 0; j < AP_MAX_LINKS; j++)
3427 skb_queue_head_init(&wl->links[j].tx_queue[i]);
3428
3429 skb_queue_head_init(&wl->deferred_rx_queue);
3430 skb_queue_head_init(&wl->deferred_tx_queue);
3431
2728 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work); 3432 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
2729 INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work); 3433 INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work);
2730 INIT_WORK(&wl->irq_work, wl1271_irq_work); 3434 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
2731 INIT_WORK(&wl->tx_work, wl1271_tx_work); 3435 INIT_WORK(&wl->tx_work, wl1271_tx_work);
2732 INIT_WORK(&wl->recovery_work, wl1271_recovery_work); 3436 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
2733 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work); 3437 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
@@ -2735,19 +3439,25 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
2735 wl->beacon_int = WL1271_DEFAULT_BEACON_INT; 3439 wl->beacon_int = WL1271_DEFAULT_BEACON_INT;
2736 wl->default_key = 0; 3440 wl->default_key = 0;
2737 wl->rx_counter = 0; 3441 wl->rx_counter = 0;
2738 wl->rx_config = WL1271_DEFAULT_RX_CONFIG; 3442 wl->rx_config = WL1271_DEFAULT_STA_RX_CONFIG;
2739 wl->rx_filter = WL1271_DEFAULT_RX_FILTER; 3443 wl->rx_filter = WL1271_DEFAULT_STA_RX_FILTER;
2740 wl->psm_entry_retry = 0; 3444 wl->psm_entry_retry = 0;
2741 wl->power_level = WL1271_DEFAULT_POWER_LEVEL; 3445 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
2742 wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC; 3446 wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2743 wl->basic_rate = CONF_TX_RATE_MASK_BASIC; 3447 wl->basic_rate = CONF_TX_RATE_MASK_BASIC;
2744 wl->rate_set = CONF_TX_RATE_MASK_BASIC; 3448 wl->rate_set = CONF_TX_RATE_MASK_BASIC;
2745 wl->sta_rate_set = 0;
2746 wl->band = IEEE80211_BAND_2GHZ; 3449 wl->band = IEEE80211_BAND_2GHZ;
2747 wl->vif = NULL; 3450 wl->vif = NULL;
2748 wl->flags = 0; 3451 wl->flags = 0;
2749 wl->sg_enabled = true; 3452 wl->sg_enabled = true;
2750 wl->hw_pg_ver = -1; 3453 wl->hw_pg_ver = -1;
3454 wl->bss_type = MAX_BSS_TYPE;
3455 wl->set_bss_type = MAX_BSS_TYPE;
3456 wl->fw_bss_type = MAX_BSS_TYPE;
3457 wl->last_tx_hlid = 0;
3458 wl->ap_ps_map = 0;
3459 wl->ap_fw_ps_map = 0;
3460 wl->quirks = 0;
2751 3461
2752 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map)); 3462 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
2753 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) 3463 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
@@ -2837,11 +3547,11 @@ int wl1271_free_hw(struct wl1271 *wl)
2837} 3547}
2838EXPORT_SYMBOL_GPL(wl1271_free_hw); 3548EXPORT_SYMBOL_GPL(wl1271_free_hw);
2839 3549
2840u32 wl12xx_debug_level; 3550u32 wl12xx_debug_level = DEBUG_NONE;
2841EXPORT_SYMBOL_GPL(wl12xx_debug_level); 3551EXPORT_SYMBOL_GPL(wl12xx_debug_level);
2842module_param_named(debug_level, wl12xx_debug_level, uint, DEBUG_NONE); 3552module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
2843MODULE_PARM_DESC(debug_level, "wl12xx debugging level"); 3553MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
2844 3554
2845MODULE_LICENSE("GPL"); 3555MODULE_LICENSE("GPL");
2846MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>"); 3556MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
2847MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); 3557MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
diff --git a/drivers/net/wireless/wl12xx/ps.c b/drivers/net/wireless/wl12xx/ps.c
index 60a3738eadb0..971f13e792da 100644
--- a/drivers/net/wireless/wl12xx/ps.c
+++ b/drivers/net/wireless/wl12xx/ps.c
@@ -24,6 +24,7 @@
24#include "reg.h" 24#include "reg.h"
25#include "ps.h" 25#include "ps.h"
26#include "io.h" 26#include "io.h"
27#include "tx.h"
27 28
28#define WL1271_WAKEUP_TIMEOUT 500 29#define WL1271_WAKEUP_TIMEOUT 500
29 30
@@ -68,7 +69,7 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
68 } 69 }
69} 70}
70 71
71int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake) 72int wl1271_ps_elp_wakeup(struct wl1271 *wl)
72{ 73{
73 DECLARE_COMPLETION_ONSTACK(compl); 74 DECLARE_COMPLETION_ONSTACK(compl);
74 unsigned long flags; 75 unsigned long flags;
@@ -86,7 +87,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
86 * the completion variable in one entity. 87 * the completion variable in one entity.
87 */ 88 */
88 spin_lock_irqsave(&wl->wl_lock, flags); 89 spin_lock_irqsave(&wl->wl_lock, flags);
89 if (work_pending(&wl->irq_work) || chip_awake) 90 if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
90 pending = true; 91 pending = true;
91 else 92 else
92 wl->elp_compl = &compl; 93 wl->elp_compl = &compl;
@@ -139,8 +140,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
139 return ret; 140 return ret;
140 } 141 }
141 142
142 ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE, 143 ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE);
143 rates, send);
144 if (ret < 0) 144 if (ret < 0)
145 return ret; 145 return ret;
146 146
@@ -149,7 +149,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
149 case STATION_ACTIVE_MODE: 149 case STATION_ACTIVE_MODE:
150 default: 150 default:
151 wl1271_debug(DEBUG_PSM, "leaving psm"); 151 wl1271_debug(DEBUG_PSM, "leaving psm");
152 ret = wl1271_ps_elp_wakeup(wl, false); 152 ret = wl1271_ps_elp_wakeup(wl);
153 if (ret < 0) 153 if (ret < 0)
154 return ret; 154 return ret;
155 155
@@ -163,8 +163,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
163 if (ret < 0) 163 if (ret < 0)
164 return ret; 164 return ret;
165 165
166 ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE, 166 ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE);
167 rates, send);
168 if (ret < 0) 167 if (ret < 0)
169 return ret; 168 return ret;
170 169
@@ -175,4 +174,81 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
175 return ret; 174 return ret;
176} 175}
177 176
177static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid)
178{
179 int i, filtered = 0;
180 struct sk_buff *skb;
181 struct ieee80211_tx_info *info;
182 unsigned long flags;
183
184 /* filter all frames currently the low level queus for this hlid */
185 for (i = 0; i < NUM_TX_QUEUES; i++) {
186 while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
187 info = IEEE80211_SKB_CB(skb);
188 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
189 info->status.rates[0].idx = -1;
190 ieee80211_tx_status(wl->hw, skb);
191 filtered++;
192 }
193 }
194
195 spin_lock_irqsave(&wl->wl_lock, flags);
196 wl->tx_queue_count -= filtered;
197 spin_unlock_irqrestore(&wl->wl_lock, flags);
198
199 wl1271_handle_tx_low_watermark(wl);
200}
201
202void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
203{
204 struct ieee80211_sta *sta;
205
206 if (test_bit(hlid, &wl->ap_ps_map))
207 return;
208
209 wl1271_debug(DEBUG_PSM, "start mac80211 PSM on hlid %d blks %d "
210 "clean_queues %d", hlid, wl->links[hlid].allocated_blks,
211 clean_queues);
212
213 rcu_read_lock();
214 sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr);
215 if (!sta) {
216 wl1271_error("could not find sta %pM for starting ps",
217 wl->links[hlid].addr);
218 rcu_read_unlock();
219 return;
220 }
178 221
222 ieee80211_sta_ps_transition_ni(sta, true);
223 rcu_read_unlock();
224
225 /* do we want to filter all frames from this link's queues? */
226 if (clean_queues)
227 wl1271_ps_filter_frames(wl, hlid);
228
229 __set_bit(hlid, &wl->ap_ps_map);
230}
231
232void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid)
233{
234 struct ieee80211_sta *sta;
235
236 if (!test_bit(hlid, &wl->ap_ps_map))
237 return;
238
239 wl1271_debug(DEBUG_PSM, "end mac80211 PSM on hlid %d", hlid);
240
241 __clear_bit(hlid, &wl->ap_ps_map);
242
243 rcu_read_lock();
244 sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr);
245 if (!sta) {
246 wl1271_error("could not find sta %pM for ending ps",
247 wl->links[hlid].addr);
248 goto end;
249 }
250
251 ieee80211_sta_ps_transition_ni(sta, false);
252end:
253 rcu_read_unlock();
254}
diff --git a/drivers/net/wireless/wl12xx/ps.h b/drivers/net/wireless/wl12xx/ps.h
index 8415060f08e5..c41bd0a711bc 100644
--- a/drivers/net/wireless/wl12xx/ps.h
+++ b/drivers/net/wireless/wl12xx/ps.h
@@ -30,7 +30,9 @@
30int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode, 30int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
31 u32 rates, bool send); 31 u32 rates, bool send);
32void wl1271_ps_elp_sleep(struct wl1271 *wl); 32void wl1271_ps_elp_sleep(struct wl1271 *wl);
33int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake); 33int wl1271_ps_elp_wakeup(struct wl1271 *wl);
34void wl1271_elp_work(struct work_struct *work); 34void wl1271_elp_work(struct work_struct *work);
35void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues);
36void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid);
35 37
36#endif /* __WL1271_PS_H__ */ 38#endif /* __WL1271_PS_H__ */
diff --git a/drivers/net/wireless/wl12xx/rx.c b/drivers/net/wireless/wl12xx/rx.c
index 682304c30b81..919b59f00301 100644
--- a/drivers/net/wireless/wl12xx/rx.c
+++ b/drivers/net/wireless/wl12xx/rx.c
@@ -29,14 +29,14 @@
29#include "rx.h" 29#include "rx.h"
30#include "io.h" 30#include "io.h"
31 31
32static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status, 32static u8 wl1271_rx_get_mem_block(struct wl1271_fw_common_status *status,
33 u32 drv_rx_counter) 33 u32 drv_rx_counter)
34{ 34{
35 return le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) & 35 return le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
36 RX_MEM_BLOCK_MASK; 36 RX_MEM_BLOCK_MASK;
37} 37}
38 38
39static u32 wl1271_rx_get_buf_size(struct wl1271_fw_status *status, 39static u32 wl1271_rx_get_buf_size(struct wl1271_fw_common_status *status,
40 u32 drv_rx_counter) 40 u32 drv_rx_counter)
41{ 41{
42 return (le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) & 42 return (le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
@@ -76,7 +76,7 @@ static void wl1271_rx_status(struct wl1271 *wl,
76 */ 76 */
77 wl->noise = desc->rssi - (desc->snr >> 1); 77 wl->noise = desc->rssi - (desc->snr >> 1);
78 78
79 status->freq = ieee80211_channel_to_frequency(desc->channel); 79 status->freq = ieee80211_channel_to_frequency(desc->channel, desc_band);
80 80
81 if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) { 81 if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) {
82 status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED; 82 status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
@@ -92,7 +92,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
92{ 92{
93 struct wl1271_rx_descriptor *desc; 93 struct wl1271_rx_descriptor *desc;
94 struct sk_buff *skb; 94 struct sk_buff *skb;
95 u16 *fc; 95 struct ieee80211_hdr *hdr;
96 u8 *buf; 96 u8 *buf;
97 u8 beacon = 0; 97 u8 beacon = 0;
98 98
@@ -118,8 +118,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
118 /* now we pull the descriptor out of the buffer */ 118 /* now we pull the descriptor out of the buffer */
119 skb_pull(skb, sizeof(*desc)); 119 skb_pull(skb, sizeof(*desc));
120 120
121 fc = (u16 *)skb->data; 121 hdr = (struct ieee80211_hdr *)skb->data;
122 if ((*fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON) 122 if (ieee80211_is_beacon(hdr->frame_control))
123 beacon = 1; 123 beacon = 1;
124 124
125 wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon); 125 wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
@@ -129,12 +129,13 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
129 129
130 skb_trim(skb, skb->len - desc->pad_len); 130 skb_trim(skb, skb->len - desc->pad_len);
131 131
132 ieee80211_rx_ni(wl->hw, skb); 132 skb_queue_tail(&wl->deferred_rx_queue, skb);
133 ieee80211_queue_work(wl->hw, &wl->netstack_work);
133 134
134 return 0; 135 return 0;
135} 136}
136 137
137void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status) 138void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
138{ 139{
139 struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map; 140 struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
140 u32 buf_size; 141 u32 buf_size;
@@ -198,6 +199,22 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
198 pkt_offset += pkt_length; 199 pkt_offset += pkt_length;
199 } 200 }
200 } 201 }
201 wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, 202
202 cpu_to_le32(wl->rx_counter)); 203 /*
204 * Write the driver's packet counter to the FW. This is only required
205 * for older hardware revisions
206 */
207 if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
208 wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
209}
210
211void wl1271_set_default_filters(struct wl1271 *wl)
212{
213 if (wl->bss_type == BSS_TYPE_AP_BSS) {
214 wl->rx_config = WL1271_DEFAULT_AP_RX_CONFIG;
215 wl->rx_filter = WL1271_DEFAULT_AP_RX_FILTER;
216 } else {
217 wl->rx_config = WL1271_DEFAULT_STA_RX_CONFIG;
218 wl->rx_filter = WL1271_DEFAULT_STA_RX_FILTER;
219 }
203} 220}
diff --git a/drivers/net/wireless/wl12xx/rx.h b/drivers/net/wireless/wl12xx/rx.h
index 3abb26fe0364..75fabf836491 100644
--- a/drivers/net/wireless/wl12xx/rx.h
+++ b/drivers/net/wireless/wl12xx/rx.h
@@ -30,10 +30,6 @@
30#define WL1271_RX_MAX_RSSI -30 30#define WL1271_RX_MAX_RSSI -30
31#define WL1271_RX_MIN_RSSI -95 31#define WL1271_RX_MIN_RSSI -95
32 32
33#define WL1271_RX_ALIGN_TO 4
34#define WL1271_RX_ALIGN(len) (((len) + WL1271_RX_ALIGN_TO - 1) & \
35 ~(WL1271_RX_ALIGN_TO - 1))
36
37#define SHORT_PREAMBLE_BIT BIT(0) 33#define SHORT_PREAMBLE_BIT BIT(0)
38#define OFDM_RATE_BIT BIT(6) 34#define OFDM_RATE_BIT BIT(6)
39#define PBCC_RATE_BIT BIT(7) 35#define PBCC_RATE_BIT BIT(7)
@@ -86,8 +82,9 @@
86/* 82/*
87 * RX Descriptor status 83 * RX Descriptor status
88 * 84 *
89 * Bits 0-2 - status 85 * Bits 0-2 - error code
90 * Bits 3-7 - reserved 86 * Bits 3-5 - process_id tag (AP mode FW)
87 * Bits 6-7 - reserved
91 */ 88 */
92#define WL1271_RX_DESC_STATUS_MASK 0x07 89#define WL1271_RX_DESC_STATUS_MASK 0x07
93 90
@@ -110,12 +107,16 @@ struct wl1271_rx_descriptor {
110 u8 snr; 107 u8 snr;
111 __le32 timestamp; 108 __le32 timestamp;
112 u8 packet_class; 109 u8 packet_class;
113 u8 process_id; 110 union {
111 u8 process_id; /* STA FW */
112 u8 hlid; /* AP FW */
113 } __packed;
114 u8 pad_len; 114 u8 pad_len;
115 u8 reserved; 115 u8 reserved;
116} __packed; 116} __packed;
117 117
118void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status); 118void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status);
119u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band); 119u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
120void wl1271_set_default_filters(struct wl1271 *wl);
120 121
121#endif 122#endif
diff --git a/drivers/net/wireless/wl12xx/scan.c b/drivers/net/wireless/wl12xx/scan.c
index 6f897b9d90ca..420653a2859c 100644
--- a/drivers/net/wireless/wl12xx/scan.c
+++ b/drivers/net/wireless/wl12xx/scan.c
@@ -27,6 +27,7 @@
27#include "cmd.h" 27#include "cmd.h"
28#include "scan.h" 28#include "scan.h"
29#include "acx.h" 29#include "acx.h"
30#include "ps.h"
30 31
31void wl1271_scan_complete_work(struct work_struct *work) 32void wl1271_scan_complete_work(struct work_struct *work)
32{ 33{
@@ -40,10 +41,11 @@ void wl1271_scan_complete_work(struct work_struct *work)
40 41
41 mutex_lock(&wl->mutex); 42 mutex_lock(&wl->mutex);
42 43
43 if (wl->scan.state == WL1271_SCAN_STATE_IDLE) { 44 if (wl->state == WL1271_STATE_OFF)
44 mutex_unlock(&wl->mutex); 45 goto out;
45 return; 46
46 } 47 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
48 goto out;
47 49
48 wl->scan.state = WL1271_SCAN_STATE_IDLE; 50 wl->scan.state = WL1271_SCAN_STATE_IDLE;
49 kfree(wl->scan.scanned_ch); 51 kfree(wl->scan.scanned_ch);
@@ -52,13 +54,19 @@ void wl1271_scan_complete_work(struct work_struct *work)
52 ieee80211_scan_completed(wl->hw, false); 54 ieee80211_scan_completed(wl->hw, false);
53 55
54 /* restore hardware connection monitoring template */ 56 /* restore hardware connection monitoring template */
55 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) 57 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
56 wl1271_cmd_build_ap_probe_req(wl, wl->probereq); 58 if (wl1271_ps_elp_wakeup(wl) == 0) {
59 wl1271_cmd_build_ap_probe_req(wl, wl->probereq);
60 wl1271_ps_elp_sleep(wl);
61 }
62 }
57 63
58 if (wl->scan.failed) { 64 if (wl->scan.failed) {
59 wl1271_info("Scan completed due to error."); 65 wl1271_info("Scan completed due to error.");
60 ieee80211_queue_work(wl->hw, &wl->recovery_work); 66 ieee80211_queue_work(wl->hw, &wl->recovery_work);
61 } 67 }
68
69out:
62 mutex_unlock(&wl->mutex); 70 mutex_unlock(&wl->mutex);
63 71
64} 72}
diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/wl12xx/sdio.c
index 93cbb8d5aba9..5b9dbeafec06 100644
--- a/drivers/net/wireless/wl12xx/sdio.c
+++ b/drivers/net/wireless/wl12xx/sdio.c
@@ -28,6 +28,7 @@
28#include <linux/mmc/sdio_func.h> 28#include <linux/mmc/sdio_func.h>
29#include <linux/mmc/sdio_ids.h> 29#include <linux/mmc/sdio_ids.h>
30#include <linux/mmc/card.h> 30#include <linux/mmc/card.h>
31#include <linux/mmc/host.h>
31#include <linux/gpio.h> 32#include <linux/gpio.h>
32#include <linux/wl12xx.h> 33#include <linux/wl12xx.h>
33#include <linux/pm_runtime.h> 34#include <linux/pm_runtime.h>
@@ -60,7 +61,7 @@ static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl)
60 return &(wl_to_func(wl)->dev); 61 return &(wl_to_func(wl)->dev);
61} 62}
62 63
63static irqreturn_t wl1271_irq(int irq, void *cookie) 64static irqreturn_t wl1271_hardirq(int irq, void *cookie)
64{ 65{
65 struct wl1271 *wl = cookie; 66 struct wl1271 *wl = cookie;
66 unsigned long flags; 67 unsigned long flags;
@@ -69,17 +70,14 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
69 70
70 /* complete the ELP completion */ 71 /* complete the ELP completion */
71 spin_lock_irqsave(&wl->wl_lock, flags); 72 spin_lock_irqsave(&wl->wl_lock, flags);
73 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
72 if (wl->elp_compl) { 74 if (wl->elp_compl) {
73 complete(wl->elp_compl); 75 complete(wl->elp_compl);
74 wl->elp_compl = NULL; 76 wl->elp_compl = NULL;
75 } 77 }
76
77 if (!test_and_set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
78 ieee80211_queue_work(wl->hw, &wl->irq_work);
79 set_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
80 spin_unlock_irqrestore(&wl->wl_lock, flags); 78 spin_unlock_irqrestore(&wl->wl_lock, flags);
81 79
82 return IRQ_HANDLED; 80 return IRQ_WAKE_THREAD;
83} 81}
84 82
85static void wl1271_sdio_disable_interrupts(struct wl1271 *wl) 83static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
@@ -106,8 +104,6 @@ static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
106 int ret; 104 int ret;
107 struct sdio_func *func = wl_to_func(wl); 105 struct sdio_func *func = wl_to_func(wl);
108 106
109 sdio_claim_host(func);
110
111 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { 107 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
112 ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret); 108 ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
113 wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x", 109 wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
@@ -123,8 +119,6 @@ static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
123 wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len); 119 wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
124 } 120 }
125 121
126 sdio_release_host(func);
127
128 if (ret) 122 if (ret)
129 wl1271_error("sdio read failed (%d)", ret); 123 wl1271_error("sdio read failed (%d)", ret);
130} 124}
@@ -135,8 +129,6 @@ static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
135 int ret; 129 int ret;
136 struct sdio_func *func = wl_to_func(wl); 130 struct sdio_func *func = wl_to_func(wl);
137 131
138 sdio_claim_host(func);
139
140 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { 132 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
141 sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret); 133 sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
142 wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x", 134 wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
@@ -152,8 +144,6 @@ static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
152 ret = sdio_memcpy_toio(func, addr, buf, len); 144 ret = sdio_memcpy_toio(func, addr, buf, len);
153 } 145 }
154 146
155 sdio_release_host(func);
156
157 if (ret) 147 if (ret)
158 wl1271_error("sdio write failed (%d)", ret); 148 wl1271_error("sdio write failed (%d)", ret);
159} 149}
@@ -163,14 +153,18 @@ static int wl1271_sdio_power_on(struct wl1271 *wl)
163 struct sdio_func *func = wl_to_func(wl); 153 struct sdio_func *func = wl_to_func(wl);
164 int ret; 154 int ret;
165 155
166 /* Power up the card */ 156 /* Make sure the card will not be powered off by runtime PM */
167 ret = pm_runtime_get_sync(&func->dev); 157 ret = pm_runtime_get_sync(&func->dev);
168 if (ret < 0) 158 if (ret < 0)
169 goto out; 159 goto out;
170 160
161 /* Runtime PM might be disabled, so power up the card manually */
162 ret = mmc_power_restore_host(func->card->host);
163 if (ret < 0)
164 goto out;
165
171 sdio_claim_host(func); 166 sdio_claim_host(func);
172 sdio_enable_func(func); 167 sdio_enable_func(func);
173 sdio_release_host(func);
174 168
175out: 169out:
176 return ret; 170 return ret;
@@ -179,12 +173,17 @@ out:
179static int wl1271_sdio_power_off(struct wl1271 *wl) 173static int wl1271_sdio_power_off(struct wl1271 *wl)
180{ 174{
181 struct sdio_func *func = wl_to_func(wl); 175 struct sdio_func *func = wl_to_func(wl);
176 int ret;
182 177
183 sdio_claim_host(func);
184 sdio_disable_func(func); 178 sdio_disable_func(func);
185 sdio_release_host(func); 179 sdio_release_host(func);
186 180
187 /* Power down the card */ 181 /* Runtime PM might be disabled, so power off the card manually */
182 ret = mmc_power_save_host(func->card->host);
183 if (ret < 0)
184 return ret;
185
186 /* Let runtime PM know the card is powered off */
188 return pm_runtime_put_sync(&func->dev); 187 return pm_runtime_put_sync(&func->dev);
189} 188}
190 189
@@ -241,14 +240,14 @@ static int __devinit wl1271_probe(struct sdio_func *func,
241 wl->irq = wlan_data->irq; 240 wl->irq = wlan_data->irq;
242 wl->ref_clock = wlan_data->board_ref_clock; 241 wl->ref_clock = wlan_data->board_ref_clock;
243 242
244 ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl); 243 ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
244 IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
245 DRIVER_NAME, wl);
245 if (ret < 0) { 246 if (ret < 0) {
246 wl1271_error("request_irq() failed: %d", ret); 247 wl1271_error("request_irq() failed: %d", ret);
247 goto out_free; 248 goto out_free;
248 } 249 }
249 250
250 set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
251
252 disable_irq(wl->irq); 251 disable_irq(wl->irq);
253 252
254 ret = wl1271_init_ieee80211(wl); 253 ret = wl1271_init_ieee80211(wl);
@@ -271,7 +270,6 @@ static int __devinit wl1271_probe(struct sdio_func *func,
271 out_irq: 270 out_irq:
272 free_irq(wl->irq, wl); 271 free_irq(wl->irq, wl);
273 272
274
275 out_free: 273 out_free:
276 wl1271_free_hw(wl); 274 wl1271_free_hw(wl);
277 275
@@ -345,3 +343,4 @@ MODULE_LICENSE("GPL");
345MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>"); 343MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
346MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); 344MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
347MODULE_FIRMWARE(WL1271_FW_NAME); 345MODULE_FIRMWARE(WL1271_FW_NAME);
346MODULE_FIRMWARE(WL1271_AP_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
index 7145ea543783..18cf01719ae0 100644
--- a/drivers/net/wireless/wl12xx/spi.c
+++ b/drivers/net/wireless/wl12xx/spi.c
@@ -110,6 +110,7 @@ static void wl1271_spi_reset(struct wl1271 *wl)
110 spi_message_add_tail(&t, &m); 110 spi_message_add_tail(&t, &m);
111 111
112 spi_sync(wl_to_spi(wl), &m); 112 spi_sync(wl_to_spi(wl), &m);
113
113 wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN); 114 wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
114 kfree(cmd); 115 kfree(cmd);
115} 116}
@@ -319,28 +320,23 @@ static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
319 spi_sync(wl_to_spi(wl), &m); 320 spi_sync(wl_to_spi(wl), &m);
320} 321}
321 322
322static irqreturn_t wl1271_irq(int irq, void *cookie) 323static irqreturn_t wl1271_hardirq(int irq, void *cookie)
323{ 324{
324 struct wl1271 *wl; 325 struct wl1271 *wl = cookie;
325 unsigned long flags; 326 unsigned long flags;
326 327
327 wl1271_debug(DEBUG_IRQ, "IRQ"); 328 wl1271_debug(DEBUG_IRQ, "IRQ");
328 329
329 wl = cookie;
330
331 /* complete the ELP completion */ 330 /* complete the ELP completion */
332 spin_lock_irqsave(&wl->wl_lock, flags); 331 spin_lock_irqsave(&wl->wl_lock, flags);
332 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
333 if (wl->elp_compl) { 333 if (wl->elp_compl) {
334 complete(wl->elp_compl); 334 complete(wl->elp_compl);
335 wl->elp_compl = NULL; 335 wl->elp_compl = NULL;
336 } 336 }
337
338 if (!test_and_set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
339 ieee80211_queue_work(wl->hw, &wl->irq_work);
340 set_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
341 spin_unlock_irqrestore(&wl->wl_lock, flags); 337 spin_unlock_irqrestore(&wl->wl_lock, flags);
342 338
343 return IRQ_HANDLED; 339 return IRQ_WAKE_THREAD;
344} 340}
345 341
346static int wl1271_spi_set_power(struct wl1271 *wl, bool enable) 342static int wl1271_spi_set_power(struct wl1271 *wl, bool enable)
@@ -412,14 +408,14 @@ static int __devinit wl1271_probe(struct spi_device *spi)
412 goto out_free; 408 goto out_free;
413 } 409 }
414 410
415 ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl); 411 ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
412 IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
413 DRIVER_NAME, wl);
416 if (ret < 0) { 414 if (ret < 0) {
417 wl1271_error("request_irq() failed: %d", ret); 415 wl1271_error("request_irq() failed: %d", ret);
418 goto out_free; 416 goto out_free;
419 } 417 }
420 418
421 set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
422
423 disable_irq(wl->irq); 419 disable_irq(wl->irq);
424 420
425 ret = wl1271_init_ieee80211(wl); 421 ret = wl1271_init_ieee80211(wl);
@@ -494,4 +490,5 @@ MODULE_LICENSE("GPL");
494MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>"); 490MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
495MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); 491MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
496MODULE_FIRMWARE(WL1271_FW_NAME); 492MODULE_FIRMWARE(WL1271_FW_NAME);
493MODULE_FIRMWARE(WL1271_AP_FW_NAME);
497MODULE_ALIAS("spi:wl1271"); 494MODULE_ALIAS("spi:wl1271");
diff --git a/drivers/net/wireless/wl12xx/tx.c b/drivers/net/wireless/wl12xx/tx.c
index b44c75cd8c1e..5e9ef7d53e7e 100644
--- a/drivers/net/wireless/wl12xx/tx.c
+++ b/drivers/net/wireless/wl12xx/tx.c
@@ -23,6 +23,7 @@
23 23
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/etherdevice.h>
26 27
27#include "wl12xx.h" 28#include "wl12xx.h"
28#include "io.h" 29#include "io.h"
@@ -30,6 +31,23 @@
30#include "ps.h" 31#include "ps.h"
31#include "tx.h" 32#include "tx.h"
32 33
34static int wl1271_set_default_wep_key(struct wl1271 *wl, u8 id)
35{
36 int ret;
37 bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
38
39 if (is_ap)
40 ret = wl1271_cmd_set_ap_default_wep_key(wl, id);
41 else
42 ret = wl1271_cmd_set_sta_default_wep_key(wl, id);
43
44 if (ret < 0)
45 return ret;
46
47 wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id);
48 return 0;
49}
50
33static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb) 51static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
34{ 52{
35 int id; 53 int id;
@@ -52,8 +70,65 @@ static void wl1271_free_tx_id(struct wl1271 *wl, int id)
52 } 70 }
53} 71}
54 72
73static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
74 struct sk_buff *skb)
75{
76 struct ieee80211_hdr *hdr;
77
78 /*
79 * add the station to the known list before transmitting the
80 * authentication response. this way it won't get de-authed by FW
81 * when transmitting too soon.
82 */
83 hdr = (struct ieee80211_hdr *)(skb->data +
84 sizeof(struct wl1271_tx_hw_descr));
85 if (ieee80211_is_auth(hdr->frame_control))
86 wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
87}
88
89static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid)
90{
91 bool fw_ps;
92 u8 tx_blks;
93
94 /* only regulate station links */
95 if (hlid < WL1271_AP_STA_HLID_START)
96 return;
97
98 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
99 tx_blks = wl->links[hlid].allocated_blks;
100
101 /*
102 * if in FW PS and there is enough data in FW we can put the link
103 * into high-level PS and clean out its TX queues.
104 */
105 if (fw_ps && tx_blks >= WL1271_PS_STA_MAX_BLOCKS)
106 wl1271_ps_link_start(wl, hlid, true);
107}
108
109u8 wl1271_tx_get_hlid(struct sk_buff *skb)
110{
111 struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb);
112
113 if (control->control.sta) {
114 struct wl1271_station *wl_sta;
115
116 wl_sta = (struct wl1271_station *)
117 control->control.sta->drv_priv;
118 return wl_sta->hlid;
119 } else {
120 struct ieee80211_hdr *hdr;
121
122 hdr = (struct ieee80211_hdr *)skb->data;
123 if (ieee80211_is_mgmt(hdr->frame_control))
124 return WL1271_AP_GLOBAL_HLID;
125 else
126 return WL1271_AP_BROADCAST_HLID;
127 }
128}
129
55static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra, 130static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
56 u32 buf_offset) 131 u32 buf_offset, u8 hlid)
57{ 132{
58 struct wl1271_tx_hw_descr *desc; 133 struct wl1271_tx_hw_descr *desc;
59 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; 134 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
@@ -82,6 +157,9 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
82 157
83 wl->tx_blocks_available -= total_blocks; 158 wl->tx_blocks_available -= total_blocks;
84 159
160 if (wl->bss_type == BSS_TYPE_AP_BSS)
161 wl->links[hlid].allocated_blks += total_blocks;
162
85 ret = 0; 163 ret = 0;
86 164
87 wl1271_debug(DEBUG_TX, 165 wl1271_debug(DEBUG_TX,
@@ -95,11 +173,12 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
95} 173}
96 174
97static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb, 175static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
98 u32 extra, struct ieee80211_tx_info *control) 176 u32 extra, struct ieee80211_tx_info *control,
177 u8 hlid)
99{ 178{
100 struct timespec ts; 179 struct timespec ts;
101 struct wl1271_tx_hw_descr *desc; 180 struct wl1271_tx_hw_descr *desc;
102 int pad, ac; 181 int pad, ac, rate_idx;
103 s64 hosttime; 182 s64 hosttime;
104 u16 tx_attr; 183 u16 tx_attr;
105 184
@@ -117,7 +196,11 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
117 getnstimeofday(&ts); 196 getnstimeofday(&ts);
118 hosttime = (timespec_to_ns(&ts) >> 10); 197 hosttime = (timespec_to_ns(&ts) >> 10);
119 desc->start_time = cpu_to_le32(hosttime - wl->time_offset); 198 desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
120 desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU); 199
200 if (wl->bss_type != BSS_TYPE_AP_BSS)
201 desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
202 else
203 desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
121 204
122 /* configure the tx attributes */ 205 /* configure the tx attributes */
123 tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER; 206 tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
@@ -125,25 +208,49 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
125 /* queue (we use same identifiers for tid's and ac's */ 208 /* queue (we use same identifiers for tid's and ac's */
126 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 209 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
127 desc->tid = ac; 210 desc->tid = ac;
128 desc->aid = TX_HW_DEFAULT_AID; 211
212 if (wl->bss_type != BSS_TYPE_AP_BSS) {
213 desc->aid = hlid;
214
215 /* if the packets are destined for AP (have a STA entry)
216 send them with AP rate policies, otherwise use default
217 basic rates */
218 if (control->control.sta)
219 rate_idx = ACX_TX_AP_FULL_RATE;
220 else
221 rate_idx = ACX_TX_BASIC_RATE;
222 } else {
223 desc->hlid = hlid;
224 switch (hlid) {
225 case WL1271_AP_GLOBAL_HLID:
226 rate_idx = ACX_TX_AP_MODE_MGMT_RATE;
227 break;
228 case WL1271_AP_BROADCAST_HLID:
229 rate_idx = ACX_TX_AP_MODE_BCST_RATE;
230 break;
231 default:
232 rate_idx = ac;
233 break;
234 }
235 }
236
237 tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
129 desc->reserved = 0; 238 desc->reserved = 0;
130 239
131 /* align the length (and store in terms of words) */ 240 /* align the length (and store in terms of words) */
132 pad = WL1271_TX_ALIGN(skb->len); 241 pad = ALIGN(skb->len, WL1271_TX_ALIGN_TO);
133 desc->length = cpu_to_le16(pad >> 2); 242 desc->length = cpu_to_le16(pad >> 2);
134 243
135 /* calculate number of padding bytes */ 244 /* calculate number of padding bytes */
136 pad = pad - skb->len; 245 pad = pad - skb->len;
137 tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD; 246 tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
138 247
139 /* if the packets are destined for AP (have a STA entry) send them
140 with AP rate policies, otherwise use default basic rates */
141 if (control->control.sta)
142 tx_attr |= ACX_TX_AP_FULL_RATE << TX_HW_ATTR_OFST_RATE_POLICY;
143
144 desc->tx_attr = cpu_to_le16(tx_attr); 248 desc->tx_attr = cpu_to_le16(tx_attr);
145 249
146 wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad); 250 wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d hlid: %d "
251 "tx_attr: 0x%x len: %d life: %d mem: %d", pad, desc->hlid,
252 le16_to_cpu(desc->tx_attr), le16_to_cpu(desc->length),
253 le16_to_cpu(desc->life_time), desc->total_mem_blocks);
147} 254}
148 255
149/* caller must hold wl->mutex */ 256/* caller must hold wl->mutex */
@@ -153,8 +260,8 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
153 struct ieee80211_tx_info *info; 260 struct ieee80211_tx_info *info;
154 u32 extra = 0; 261 u32 extra = 0;
155 int ret = 0; 262 int ret = 0;
156 u8 idx;
157 u32 total_len; 263 u32 total_len;
264 u8 hlid;
158 265
159 if (!skb) 266 if (!skb)
160 return -EINVAL; 267 return -EINVAL;
@@ -166,29 +273,43 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
166 extra = WL1271_TKIP_IV_SPACE; 273 extra = WL1271_TKIP_IV_SPACE;
167 274
168 if (info->control.hw_key) { 275 if (info->control.hw_key) {
169 idx = info->control.hw_key->hw_key_idx; 276 bool is_wep;
277 u8 idx = info->control.hw_key->hw_key_idx;
278 u32 cipher = info->control.hw_key->cipher;
170 279
171 /* FIXME: do we have to do this if we're not using WEP? */ 280 is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
172 if (unlikely(wl->default_key != idx)) { 281 (cipher == WLAN_CIPHER_SUITE_WEP104);
173 ret = wl1271_cmd_set_default_wep_key(wl, idx); 282
283 if (unlikely(is_wep && wl->default_key != idx)) {
284 ret = wl1271_set_default_wep_key(wl, idx);
174 if (ret < 0) 285 if (ret < 0)
175 return ret; 286 return ret;
176 wl->default_key = idx; 287 wl->default_key = idx;
177 } 288 }
178 } 289 }
179 290
180 ret = wl1271_tx_allocate(wl, skb, extra, buf_offset); 291 if (wl->bss_type == BSS_TYPE_AP_BSS)
292 hlid = wl1271_tx_get_hlid(skb);
293 else
294 hlid = TX_HW_DEFAULT_AID;
295
296 ret = wl1271_tx_allocate(wl, skb, extra, buf_offset, hlid);
181 if (ret < 0) 297 if (ret < 0)
182 return ret; 298 return ret;
183 299
184 wl1271_tx_fill_hdr(wl, skb, extra, info); 300 if (wl->bss_type == BSS_TYPE_AP_BSS) {
301 wl1271_tx_ap_update_inconnection_sta(wl, skb);
302 wl1271_tx_regulate_link(wl, hlid);
303 }
304
305 wl1271_tx_fill_hdr(wl, skb, extra, info, hlid);
185 306
186 /* 307 /*
187 * The length of each packet is stored in terms of words. Thus, we must 308 * The length of each packet is stored in terms of words. Thus, we must
188 * pad the skb data to make sure its length is aligned. 309 * pad the skb data to make sure its length is aligned.
189 * The number of padding bytes is computed and set in wl1271_tx_fill_hdr 310 * The number of padding bytes is computed and set in wl1271_tx_fill_hdr
190 */ 311 */
191 total_len = WL1271_TX_ALIGN(skb->len); 312 total_len = ALIGN(skb->len, WL1271_TX_ALIGN_TO);
192 memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len); 313 memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
193 memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len); 314 memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
194 315
@@ -222,7 +343,7 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
222 return enabled_rates; 343 return enabled_rates;
223} 344}
224 345
225static void handle_tx_low_watermark(struct wl1271 *wl) 346void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
226{ 347{
227 unsigned long flags; 348 unsigned long flags;
228 349
@@ -236,7 +357,7 @@ static void handle_tx_low_watermark(struct wl1271 *wl)
236 } 357 }
237} 358}
238 359
239static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) 360static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl)
240{ 361{
241 struct sk_buff *skb = NULL; 362 struct sk_buff *skb = NULL;
242 unsigned long flags; 363 unsigned long flags;
@@ -262,12 +383,69 @@ out:
262 return skb; 383 return skb;
263} 384}
264 385
386static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl)
387{
388 struct sk_buff *skb = NULL;
389 unsigned long flags;
390 int i, h, start_hlid;
391
392 /* start from the link after the last one */
393 start_hlid = (wl->last_tx_hlid + 1) % AP_MAX_LINKS;
394
395 /* dequeue according to AC, round robin on each link */
396 for (i = 0; i < AP_MAX_LINKS; i++) {
397 h = (start_hlid + i) % AP_MAX_LINKS;
398
399 skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_VO]);
400 if (skb)
401 goto out;
402 skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_VI]);
403 if (skb)
404 goto out;
405 skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_BE]);
406 if (skb)
407 goto out;
408 skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_BK]);
409 if (skb)
410 goto out;
411 }
412
413out:
414 if (skb) {
415 wl->last_tx_hlid = h;
416 spin_lock_irqsave(&wl->wl_lock, flags);
417 wl->tx_queue_count--;
418 spin_unlock_irqrestore(&wl->wl_lock, flags);
419 } else {
420 wl->last_tx_hlid = 0;
421 }
422
423 return skb;
424}
425
426static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
427{
428 if (wl->bss_type == BSS_TYPE_AP_BSS)
429 return wl1271_ap_skb_dequeue(wl);
430
431 return wl1271_sta_skb_dequeue(wl);
432}
433
265static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb) 434static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb)
266{ 435{
267 unsigned long flags; 436 unsigned long flags;
268 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 437 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
269 438
270 skb_queue_head(&wl->tx_queue[q], skb); 439 if (wl->bss_type == BSS_TYPE_AP_BSS) {
440 u8 hlid = wl1271_tx_get_hlid(skb);
441 skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
442
443 /* make sure we dequeue the same packet next time */
444 wl->last_tx_hlid = (hlid + AP_MAX_LINKS - 1) % AP_MAX_LINKS;
445 } else {
446 skb_queue_head(&wl->tx_queue[q], skb);
447 }
448
271 spin_lock_irqsave(&wl->wl_lock, flags); 449 spin_lock_irqsave(&wl->wl_lock, flags);
272 wl->tx_queue_count++; 450 wl->tx_queue_count++;
273 spin_unlock_irqrestore(&wl->wl_lock, flags); 451 spin_unlock_irqrestore(&wl->wl_lock, flags);
@@ -277,38 +455,16 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
277{ 455{
278 struct sk_buff *skb; 456 struct sk_buff *skb;
279 bool woken_up = false; 457 bool woken_up = false;
280 u32 sta_rates = 0;
281 u32 buf_offset = 0; 458 u32 buf_offset = 0;
282 bool sent_packets = false; 459 bool sent_packets = false;
283 int ret; 460 int ret;
284 461
285 /* check if the rates supported by the AP have changed */
286 if (unlikely(test_and_clear_bit(WL1271_FLAG_STA_RATES_CHANGED,
287 &wl->flags))) {
288 unsigned long flags;
289
290 spin_lock_irqsave(&wl->wl_lock, flags);
291 sta_rates = wl->sta_rate_set;
292 spin_unlock_irqrestore(&wl->wl_lock, flags);
293 }
294
295 if (unlikely(wl->state == WL1271_STATE_OFF)) 462 if (unlikely(wl->state == WL1271_STATE_OFF))
296 goto out; 463 goto out;
297 464
298 /* if rates have changed, re-configure the rate policy */
299 if (unlikely(sta_rates)) {
300 ret = wl1271_ps_elp_wakeup(wl, false);
301 if (ret < 0)
302 goto out;
303 woken_up = true;
304
305 wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates);
306 wl1271_acx_rate_policies(wl);
307 }
308
309 while ((skb = wl1271_skb_dequeue(wl))) { 465 while ((skb = wl1271_skb_dequeue(wl))) {
310 if (!woken_up) { 466 if (!woken_up) {
311 ret = wl1271_ps_elp_wakeup(wl, false); 467 ret = wl1271_ps_elp_wakeup(wl);
312 if (ret < 0) 468 if (ret < 0)
313 goto out_ack; 469 goto out_ack;
314 woken_up = true; 470 woken_up = true;
@@ -350,9 +506,15 @@ out_ack:
350 sent_packets = true; 506 sent_packets = true;
351 } 507 }
352 if (sent_packets) { 508 if (sent_packets) {
353 /* interrupt the firmware with the new packets */ 509 /*
354 wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count); 510 * Interrupt the firmware with the new packets. This is only
355 handle_tx_low_watermark(wl); 511 * required for older hardware revisions
512 */
513 if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
514 wl1271_write32(wl, WL1271_HOST_WR_ACCESS,
515 wl->tx_packets_count);
516
517 wl1271_handle_tx_low_watermark(wl);
356 } 518 }
357 519
358out: 520out:
@@ -427,7 +589,8 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
427 result->rate_class_index, result->status); 589 result->rate_class_index, result->status);
428 590
429 /* return the packet to the stack */ 591 /* return the packet to the stack */
430 ieee80211_tx_status(wl->hw, skb); 592 skb_queue_tail(&wl->deferred_tx_queue, skb);
593 ieee80211_queue_work(wl->hw, &wl->netstack_work);
431 wl1271_free_tx_id(wl, result->id); 594 wl1271_free_tx_id(wl, result->id);
432} 595}
433 596
@@ -469,34 +632,92 @@ void wl1271_tx_complete(struct wl1271 *wl)
469 } 632 }
470} 633}
471 634
635void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
636{
637 struct sk_buff *skb;
638 int i, total = 0;
639 unsigned long flags;
640 struct ieee80211_tx_info *info;
641
642 for (i = 0; i < NUM_TX_QUEUES; i++) {
643 while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
644 wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
645 info = IEEE80211_SKB_CB(skb);
646 info->status.rates[0].idx = -1;
647 info->status.rates[0].count = 0;
648 ieee80211_tx_status(wl->hw, skb);
649 total++;
650 }
651 }
652
653 spin_lock_irqsave(&wl->wl_lock, flags);
654 wl->tx_queue_count -= total;
655 spin_unlock_irqrestore(&wl->wl_lock, flags);
656
657 wl1271_handle_tx_low_watermark(wl);
658}
659
472/* caller must hold wl->mutex */ 660/* caller must hold wl->mutex */
473void wl1271_tx_reset(struct wl1271 *wl) 661void wl1271_tx_reset(struct wl1271 *wl)
474{ 662{
475 int i; 663 int i;
476 struct sk_buff *skb; 664 struct sk_buff *skb;
665 struct ieee80211_tx_info *info;
477 666
478 /* TX failure */ 667 /* TX failure */
479 for (i = 0; i < NUM_TX_QUEUES; i++) { 668 if (wl->bss_type == BSS_TYPE_AP_BSS) {
480 while ((skb = skb_dequeue(&wl->tx_queue[i]))) { 669 for (i = 0; i < AP_MAX_LINKS; i++) {
481 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); 670 wl1271_tx_reset_link_queues(wl, i);
482 ieee80211_tx_status(wl->hw, skb); 671 wl->links[i].allocated_blks = 0;
672 wl->links[i].prev_freed_blks = 0;
673 }
674
675 wl->last_tx_hlid = 0;
676 } else {
677 for (i = 0; i < NUM_TX_QUEUES; i++) {
678 while ((skb = skb_dequeue(&wl->tx_queue[i]))) {
679 wl1271_debug(DEBUG_TX, "freeing skb 0x%p",
680 skb);
681 info = IEEE80211_SKB_CB(skb);
682 info->status.rates[0].idx = -1;
683 info->status.rates[0].count = 0;
684 ieee80211_tx_status(wl->hw, skb);
685 }
483 } 686 }
484 } 687 }
688
485 wl->tx_queue_count = 0; 689 wl->tx_queue_count = 0;
486 690
487 /* 691 /*
488 * Make sure the driver is at a consistent state, in case this 692 * Make sure the driver is at a consistent state, in case this
489 * function is called from a context other than interface removal. 693 * function is called from a context other than interface removal.
490 */ 694 */
491 handle_tx_low_watermark(wl); 695 wl1271_handle_tx_low_watermark(wl);
492 696
493 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) 697 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) {
494 if (wl->tx_frames[i] != NULL) { 698 if (wl->tx_frames[i] == NULL)
495 skb = wl->tx_frames[i]; 699 continue;
496 wl1271_free_tx_id(wl, i); 700
497 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); 701 skb = wl->tx_frames[i];
498 ieee80211_tx_status(wl->hw, skb); 702 wl1271_free_tx_id(wl, i);
703 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
704
705 /* Remove private headers before passing the skb to mac80211 */
706 info = IEEE80211_SKB_CB(skb);
707 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
708 if (info->control.hw_key &&
709 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
710 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
711 memmove(skb->data + WL1271_TKIP_IV_SPACE, skb->data,
712 hdrlen);
713 skb_pull(skb, WL1271_TKIP_IV_SPACE);
499 } 714 }
715
716 info->status.rates[0].idx = -1;
717 info->status.rates[0].count = 0;
718
719 ieee80211_tx_status(wl->hw, skb);
720 }
500} 721}
501 722
502#define WL1271_TX_FLUSH_TIMEOUT 500000 723#define WL1271_TX_FLUSH_TIMEOUT 500000
@@ -509,8 +730,8 @@ void wl1271_tx_flush(struct wl1271 *wl)
509 730
510 while (!time_after(jiffies, timeout)) { 731 while (!time_after(jiffies, timeout)) {
511 mutex_lock(&wl->mutex); 732 mutex_lock(&wl->mutex);
512 wl1271_debug(DEBUG_TX, "flushing tx buffer: %d", 733 wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d",
513 wl->tx_frames_cnt); 734 wl->tx_frames_cnt, wl->tx_queue_count);
514 if ((wl->tx_frames_cnt == 0) && (wl->tx_queue_count == 0)) { 735 if ((wl->tx_frames_cnt == 0) && (wl->tx_queue_count == 0)) {
515 mutex_unlock(&wl->mutex); 736 mutex_unlock(&wl->mutex);
516 return; 737 return;
@@ -521,3 +742,21 @@ void wl1271_tx_flush(struct wl1271 *wl)
521 742
522 wl1271_warning("Unable to flush all TX buffers, timed out."); 743 wl1271_warning("Unable to flush all TX buffers, timed out.");
523} 744}
745
746u32 wl1271_tx_min_rate_get(struct wl1271 *wl)
747{
748 int i;
749 u32 rate = 0;
750
751 if (!wl->basic_rate_set) {
752 WARN_ON(1);
753 wl->basic_rate_set = wl->conf.tx.basic_rate;
754 }
755
756 for (i = 0; !rate; i++) {
757 if ((wl->basic_rate_set >> i) & 0x1)
758 rate = 1 << i;
759 }
760
761 return rate;
762}
diff --git a/drivers/net/wireless/wl12xx/tx.h b/drivers/net/wireless/wl12xx/tx.h
index 903e5dc69b7a..02f07fa66e82 100644
--- a/drivers/net/wireless/wl12xx/tx.h
+++ b/drivers/net/wireless/wl12xx/tx.h
@@ -29,6 +29,7 @@
29#define TX_HW_BLOCK_SIZE 252 29#define TX_HW_BLOCK_SIZE 252
30 30
31#define TX_HW_MGMT_PKT_LIFETIME_TU 2000 31#define TX_HW_MGMT_PKT_LIFETIME_TU 2000
32#define TX_HW_AP_MODE_PKT_LIFETIME_TU 8000
32/* The chipset reference driver states, that the "aid" value 1 33/* The chipset reference driver states, that the "aid" value 1
33 * is for infra-BSS, but is still always used */ 34 * is for infra-BSS, but is still always used */
34#define TX_HW_DEFAULT_AID 1 35#define TX_HW_DEFAULT_AID 1
@@ -52,8 +53,6 @@
52#define TX_HW_RESULT_QUEUE_LEN_MASK 0xf 53#define TX_HW_RESULT_QUEUE_LEN_MASK 0xf
53 54
54#define WL1271_TX_ALIGN_TO 4 55#define WL1271_TX_ALIGN_TO 4
55#define WL1271_TX_ALIGN(len) (((len) + WL1271_TX_ALIGN_TO - 1) & \
56 ~(WL1271_TX_ALIGN_TO - 1))
57#define WL1271_TKIP_IV_SPACE 4 56#define WL1271_TKIP_IV_SPACE 4
58 57
59struct wl1271_tx_hw_descr { 58struct wl1271_tx_hw_descr {
@@ -77,8 +76,12 @@ struct wl1271_tx_hw_descr {
77 u8 id; 76 u8 id;
78 /* The packet TID value (as User-Priority) */ 77 /* The packet TID value (as User-Priority) */
79 u8 tid; 78 u8 tid;
80 /* Identifier of the remote STA in IBSS, 1 in infra-BSS */ 79 union {
81 u8 aid; 80 /* STA - Identifier of the remote STA in IBSS, 1 in infra-BSS */
81 u8 aid;
82 /* AP - host link ID (HLID) */
83 u8 hlid;
84 } __packed;
82 u8 reserved; 85 u8 reserved;
83} __packed; 86} __packed;
84 87
@@ -146,5 +149,9 @@ void wl1271_tx_reset(struct wl1271 *wl);
146void wl1271_tx_flush(struct wl1271 *wl); 149void wl1271_tx_flush(struct wl1271 *wl);
147u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band); 150u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
148u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set); 151u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set);
152u32 wl1271_tx_min_rate_get(struct wl1271 *wl);
153u8 wl1271_tx_get_hlid(struct sk_buff *skb);
154void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid);
155void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
149 156
150#endif 157#endif
diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/wl12xx/wl12xx.h
index 9050dd9b62d2..86be83e25ec5 100644
--- a/drivers/net/wireless/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/wl12xx/wl12xx.h
@@ -38,6 +38,13 @@
38#define DRIVER_NAME "wl1271" 38#define DRIVER_NAME "wl1271"
39#define DRIVER_PREFIX DRIVER_NAME ": " 39#define DRIVER_PREFIX DRIVER_NAME ": "
40 40
41/*
42 * FW versions support BA 11n
43 * versions marks x.x.x.50-60.x
44 */
45#define WL12XX_BA_SUPPORT_FW_COST_VER2_START 50
46#define WL12XX_BA_SUPPORT_FW_COST_VER2_END 60
47
41enum { 48enum {
42 DEBUG_NONE = 0, 49 DEBUG_NONE = 0,
43 DEBUG_IRQ = BIT(0), 50 DEBUG_IRQ = BIT(0),
@@ -57,6 +64,8 @@ enum {
57 DEBUG_SDIO = BIT(14), 64 DEBUG_SDIO = BIT(14),
58 DEBUG_FILTERS = BIT(15), 65 DEBUG_FILTERS = BIT(15),
59 DEBUG_ADHOC = BIT(16), 66 DEBUG_ADHOC = BIT(16),
67 DEBUG_AP = BIT(17),
68 DEBUG_MASTER = (DEBUG_ADHOC | DEBUG_AP),
60 DEBUG_ALL = ~0, 69 DEBUG_ALL = ~0,
61}; 70};
62 71
@@ -103,17 +112,28 @@ extern u32 wl12xx_debug_level;
103 true); \ 112 true); \
104 } while (0) 113 } while (0)
105 114
106#define WL1271_DEFAULT_RX_CONFIG (CFG_UNI_FILTER_EN | \ 115#define WL1271_DEFAULT_STA_RX_CONFIG (CFG_UNI_FILTER_EN | \
107 CFG_BSSID_FILTER_EN | \ 116 CFG_BSSID_FILTER_EN | \
108 CFG_MC_FILTER_EN) 117 CFG_MC_FILTER_EN)
109 118
110#define WL1271_DEFAULT_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN | \ 119#define WL1271_DEFAULT_STA_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN | \
111 CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \ 120 CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \
112 CFG_RX_CTL_EN | CFG_RX_BCN_EN | \ 121 CFG_RX_CTL_EN | CFG_RX_BCN_EN | \
113 CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN) 122 CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN)
114 123
115#define WL1271_FW_NAME "wl1271-fw.bin" 124#define WL1271_DEFAULT_AP_RX_CONFIG 0
116#define WL1271_NVS_NAME "wl1271-nvs.bin" 125
126#define WL1271_DEFAULT_AP_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PREQ_EN | \
127 CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \
128 CFG_RX_CTL_EN | CFG_RX_AUTH_EN | \
129 CFG_RX_ASSOC_EN)
130
131
132
133#define WL1271_FW_NAME "ti-connectivity/wl1271-fw-2.bin"
134#define WL1271_AP_FW_NAME "ti-connectivity/wl1271-fw-ap.bin"
135
136#define WL1271_NVS_NAME "ti-connectivity/wl1271-nvs.bin"
117 137
118#define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff)) 138#define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff))
119#define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff)) 139#define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff))
@@ -129,6 +149,25 @@ extern u32 wl12xx_debug_level;
129#define WL1271_DEFAULT_BEACON_INT 100 149#define WL1271_DEFAULT_BEACON_INT 100
130#define WL1271_DEFAULT_DTIM_PERIOD 1 150#define WL1271_DEFAULT_DTIM_PERIOD 1
131 151
152#define WL1271_AP_GLOBAL_HLID 0
153#define WL1271_AP_BROADCAST_HLID 1
154#define WL1271_AP_STA_HLID_START 2
155
156/*
157 * When in AP-mode, we allow (at least) this number of mem-blocks
158 * to be transmitted to FW for a STA in PS-mode. Only when packets are
159 * present in the FW buffers it will wake the sleeping STA. We want to put
160 * enough packets for the driver to transmit all of its buffered data before
161 * the STA goes to sleep again. But we don't want to take too much mem-blocks
162 * as it might hurt the throughput of active STAs.
163 * The number of blocks (18) is enough for 2 large packets.
164 */
165#define WL1271_PS_STA_MAX_BLOCKS (2 * 9)
166
167#define WL1271_AP_BSS_INDEX 0
168#define WL1271_AP_DEF_INACTIV_SEC 300
169#define WL1271_AP_DEF_BEACON_EXP 20
170
132#define ACX_TX_DESCRIPTORS 32 171#define ACX_TX_DESCRIPTORS 32
133 172
134#define WL1271_AGGR_BUFFER_SIZE (4 * PAGE_SIZE) 173#define WL1271_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
@@ -161,10 +200,13 @@ struct wl1271_partition_set {
161 200
162struct wl1271; 201struct wl1271;
163 202
203#define WL12XX_NUM_FW_VER 5
204
164/* FIXME: I'm not sure about this structure name */ 205/* FIXME: I'm not sure about this structure name */
165struct wl1271_chip { 206struct wl1271_chip {
166 u32 id; 207 u32 id;
167 char fw_ver[21]; 208 char fw_ver_str[ETHTOOL_BUSINFO_LEN];
209 unsigned int fw_ver[WL12XX_NUM_FW_VER];
168}; 210};
169 211
170struct wl1271_stats { 212struct wl1271_stats {
@@ -178,8 +220,13 @@ struct wl1271_stats {
178#define NUM_TX_QUEUES 4 220#define NUM_TX_QUEUES 4
179#define NUM_RX_PKT_DESC 8 221#define NUM_RX_PKT_DESC 8
180 222
181/* FW status registers */ 223#define AP_MAX_STATIONS 5
182struct wl1271_fw_status { 224
225/* Broadcast and Global links + links to stations */
226#define AP_MAX_LINKS (AP_MAX_STATIONS + 2)
227
228/* FW status registers common for AP/STA */
229struct wl1271_fw_common_status {
183 __le32 intr; 230 __le32 intr;
184 u8 fw_rx_counter; 231 u8 fw_rx_counter;
185 u8 drv_rx_counter; 232 u8 drv_rx_counter;
@@ -188,9 +235,43 @@ struct wl1271_fw_status {
188 __le32 rx_pkt_descs[NUM_RX_PKT_DESC]; 235 __le32 rx_pkt_descs[NUM_RX_PKT_DESC];
189 __le32 tx_released_blks[NUM_TX_QUEUES]; 236 __le32 tx_released_blks[NUM_TX_QUEUES];
190 __le32 fw_localtime; 237 __le32 fw_localtime;
191 __le32 padding[2];
192} __packed; 238} __packed;
193 239
240/* FW status registers for AP */
241struct wl1271_fw_ap_status {
242 struct wl1271_fw_common_status common;
243
244 /* Next fields valid only in AP FW */
245
246 /*
247 * A bitmap (where each bit represents a single HLID)
248 * to indicate if the station is in PS mode.
249 */
250 __le32 link_ps_bitmap;
251
252 /* Number of freed MBs per HLID */
253 u8 tx_lnk_free_blks[AP_MAX_LINKS];
254 u8 padding_1[1];
255} __packed;
256
257/* FW status registers for STA */
258struct wl1271_fw_sta_status {
259 struct wl1271_fw_common_status common;
260
261 u8 tx_total;
262 u8 reserved1;
263 __le16 reserved2;
264} __packed;
265
266struct wl1271_fw_full_status {
267 union {
268 struct wl1271_fw_common_status common;
269 struct wl1271_fw_sta_status sta;
270 struct wl1271_fw_ap_status ap;
271 };
272} __packed;
273
274
194struct wl1271_rx_mem_pool_addr { 275struct wl1271_rx_mem_pool_addr {
195 u32 addr; 276 u32 addr;
196 u32 addr_extra; 277 u32 addr_extra;
@@ -218,6 +299,48 @@ struct wl1271_if_operations {
218 void (*disable_irq)(struct wl1271 *wl); 299 void (*disable_irq)(struct wl1271 *wl);
219}; 300};
220 301
302#define MAX_NUM_KEYS 14
303#define MAX_KEY_SIZE 32
304
305struct wl1271_ap_key {
306 u8 id;
307 u8 key_type;
308 u8 key_size;
309 u8 key[MAX_KEY_SIZE];
310 u8 hlid;
311 u32 tx_seq_32;
312 u16 tx_seq_16;
313};
314
315enum wl12xx_flags {
316 WL1271_FLAG_STA_ASSOCIATED,
317 WL1271_FLAG_JOINED,
318 WL1271_FLAG_GPIO_POWER,
319 WL1271_FLAG_TX_QUEUE_STOPPED,
320 WL1271_FLAG_TX_PENDING,
321 WL1271_FLAG_IN_ELP,
322 WL1271_FLAG_PSM,
323 WL1271_FLAG_PSM_REQUESTED,
324 WL1271_FLAG_IRQ_RUNNING,
325 WL1271_FLAG_IDLE,
326 WL1271_FLAG_IDLE_REQUESTED,
327 WL1271_FLAG_PSPOLL_FAILURE,
328 WL1271_FLAG_STA_STATE_SENT,
329 WL1271_FLAG_FW_TX_BUSY,
330 WL1271_FLAG_AP_STARTED
331};
332
333struct wl1271_link {
334 /* AP-mode - TX queue per AC in link */
335 struct sk_buff_head tx_queue[NUM_TX_QUEUES];
336
337 /* accounting for allocated / available TX blocks in FW */
338 u8 allocated_blks;
339 u8 prev_freed_blks;
340
341 u8 addr[ETH_ALEN];
342};
343
221struct wl1271 { 344struct wl1271 {
222 struct platform_device *plat_dev; 345 struct platform_device *plat_dev;
223 struct ieee80211_hw *hw; 346 struct ieee80211_hw *hw;
@@ -236,21 +359,6 @@ struct wl1271 {
236 enum wl1271_state state; 359 enum wl1271_state state;
237 struct mutex mutex; 360 struct mutex mutex;
238 361
239#define WL1271_FLAG_STA_RATES_CHANGED (0)
240#define WL1271_FLAG_STA_ASSOCIATED (1)
241#define WL1271_FLAG_JOINED (2)
242#define WL1271_FLAG_GPIO_POWER (3)
243#define WL1271_FLAG_TX_QUEUE_STOPPED (4)
244#define WL1271_FLAG_IN_ELP (5)
245#define WL1271_FLAG_PSM (6)
246#define WL1271_FLAG_PSM_REQUESTED (7)
247#define WL1271_FLAG_IRQ_PENDING (8)
248#define WL1271_FLAG_IRQ_RUNNING (9)
249#define WL1271_FLAG_IDLE (10)
250#define WL1271_FLAG_IDLE_REQUESTED (11)
251#define WL1271_FLAG_PSPOLL_FAILURE (12)
252#define WL1271_FLAG_STA_STATE_SENT (13)
253#define WL1271_FLAG_FW_TX_BUSY (14)
254 unsigned long flags; 362 unsigned long flags;
255 363
256 struct wl1271_partition_set part; 364 struct wl1271_partition_set part;
@@ -262,6 +370,7 @@ struct wl1271 {
262 370
263 u8 *fw; 371 u8 *fw;
264 size_t fw_len; 372 size_t fw_len;
373 u8 fw_bss_type;
265 struct wl1271_nvs_file *nvs; 374 struct wl1271_nvs_file *nvs;
266 size_t nvs_len; 375 size_t nvs_len;
267 376
@@ -295,6 +404,12 @@ struct wl1271 {
295 struct sk_buff_head tx_queue[NUM_TX_QUEUES]; 404 struct sk_buff_head tx_queue[NUM_TX_QUEUES];
296 int tx_queue_count; 405 int tx_queue_count;
297 406
407 /* Frames received, not handled yet by mac80211 */
408 struct sk_buff_head deferred_rx_queue;
409
410 /* Frames sent, not returned yet to mac80211 */
411 struct sk_buff_head deferred_tx_queue;
412
298 struct work_struct tx_work; 413 struct work_struct tx_work;
299 414
300 /* Pending TX frames */ 415 /* Pending TX frames */
@@ -315,8 +430,8 @@ struct wl1271 {
315 /* Intermediate buffer, used for packet aggregation */ 430 /* Intermediate buffer, used for packet aggregation */
316 u8 *aggr_buf; 431 u8 *aggr_buf;
317 432
318 /* The target interrupt mask */ 433 /* Network stack work */
319 struct work_struct irq_work; 434 struct work_struct netstack_work;
320 435
321 /* Hardware recovery work */ 436 /* Hardware recovery work */
322 struct work_struct recovery_work; 437 struct work_struct recovery_work;
@@ -343,7 +458,6 @@ struct wl1271 {
343 * bits 16-23 - 802.11n MCS index mask 458 * bits 16-23 - 802.11n MCS index mask
344 * support only 1 stream, thus only 8 bits for the MCS rates (0-7). 459 * support only 1 stream, thus only 8 bits for the MCS rates (0-7).
345 */ 460 */
346 u32 sta_rate_set;
347 u32 basic_rate_set; 461 u32 basic_rate_set;
348 u32 basic_rate; 462 u32 basic_rate;
349 u32 rate_set; 463 u32 rate_set;
@@ -378,13 +492,12 @@ struct wl1271 {
378 int last_rssi_event; 492 int last_rssi_event;
379 493
380 struct wl1271_stats stats; 494 struct wl1271_stats stats;
381 struct dentry *rootdir;
382 495
383 __le32 buffer_32; 496 __le32 buffer_32;
384 u32 buffer_cmd; 497 u32 buffer_cmd;
385 u32 buffer_busyword[WL1271_BUSY_WORD_CNT]; 498 u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
386 499
387 struct wl1271_fw_status *fw_status; 500 struct wl1271_fw_full_status *fw_status;
388 struct wl1271_tx_hw_res_if *tx_res_if; 501 struct wl1271_tx_hw_res_if *tx_res_if;
389 502
390 struct ieee80211_vif *vif; 503 struct ieee80211_vif *vif;
@@ -400,6 +513,41 @@ struct wl1271 {
400 513
401 /* Most recently reported noise in dBm */ 514 /* Most recently reported noise in dBm */
402 s8 noise; 515 s8 noise;
516
517 /* map for HLIDs of associated stations - when operating in AP mode */
518 unsigned long ap_hlid_map[BITS_TO_LONGS(AP_MAX_STATIONS)];
519
520 /* recoreded keys for AP-mode - set here before AP startup */
521 struct wl1271_ap_key *recorded_ap_keys[MAX_NUM_KEYS];
522
523 /* bands supported by this instance of wl12xx */
524 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
525
526 /* RX BA constraint value */
527 bool ba_support;
528 u8 ba_rx_bitmap;
529
530 /*
531 * AP-mode - links indexed by HLID. The global and broadcast links
532 * are always active.
533 */
534 struct wl1271_link links[AP_MAX_LINKS];
535
536 /* the hlid of the link where the last transmitted skb came from */
537 int last_tx_hlid;
538
539 /* AP-mode - a bitmap of links currently in PS mode according to FW */
540 u32 ap_fw_ps_map;
541
542 /* AP-mode - a bitmap of links currently in PS mode in mac80211 */
543 unsigned long ap_ps_map;
544
545 /* Quirks of specific hardware revisions */
546 unsigned int quirks;
547};
548
549struct wl1271_station {
550 u8 hlid;
403}; 551};
404 552
405int wl1271_plt_start(struct wl1271 *wl); 553int wl1271_plt_start(struct wl1271 *wl);
@@ -414,6 +562,8 @@ int wl1271_plt_stop(struct wl1271 *wl);
414#define WL1271_TX_QUEUE_LOW_WATERMARK 10 562#define WL1271_TX_QUEUE_LOW_WATERMARK 10
415#define WL1271_TX_QUEUE_HIGH_WATERMARK 25 563#define WL1271_TX_QUEUE_HIGH_WATERMARK 25
416 564
565#define WL1271_DEFERRED_QUEUE_LIMIT 64
566
417/* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power 567/* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power
418 on in case is has been shut down shortly before */ 568 on in case is has been shut down shortly before */
419#define WL1271_PRE_POWER_ON_SLEEP 20 /* in milliseconds */ 569#define WL1271_PRE_POWER_ON_SLEEP 20 /* in milliseconds */
@@ -423,4 +573,9 @@ int wl1271_plt_stop(struct wl1271 *wl);
423#define HW_BG_RATES_MASK 0xffff 573#define HW_BG_RATES_MASK 0xffff
424#define HW_HT_RATES_OFFSET 16 574#define HW_HT_RATES_OFFSET 16
425 575
576/* Quirks */
577
578/* Each RX/TX transaction requires an end-of-transaction transfer */
579#define WL12XX_QUIRK_END_OF_TRANSACTION BIT(0)
580
426#endif 581#endif
diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h
index be21032f4dc1..18fe542360f2 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_80211.h
+++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h
@@ -55,7 +55,6 @@
55 55
56/* This really should be 8, but not for our firmware */ 56/* This really should be 8, but not for our firmware */
57#define MAX_SUPPORTED_RATES 32 57#define MAX_SUPPORTED_RATES 32
58#define COUNTRY_STRING_LEN 3
59#define MAX_COUNTRY_TRIPLETS 32 58#define MAX_COUNTRY_TRIPLETS 32
60 59
61/* Headers */ 60/* Headers */
@@ -99,7 +98,7 @@ struct country_triplet {
99 98
100struct wl12xx_ie_country { 99struct wl12xx_ie_country {
101 struct wl12xx_ie_header header; 100 struct wl12xx_ie_header header;
102 u8 country_string[COUNTRY_STRING_LEN]; 101 u8 country_string[IEEE80211_COUNTRY_STRING_LEN];
103 struct country_triplet triplets[MAX_COUNTRY_TRIPLETS]; 102 struct country_triplet triplets[MAX_COUNTRY_TRIPLETS];
104} __packed; 103} __packed;
105 104
@@ -138,13 +137,13 @@ struct wl12xx_arp_rsp_template {
138 struct ieee80211_hdr_3addr hdr; 137 struct ieee80211_hdr_3addr hdr;
139 138
140 u8 llc_hdr[sizeof(rfc1042_header)]; 139 u8 llc_hdr[sizeof(rfc1042_header)];
141 u16 llc_type; 140 __be16 llc_type;
142 141
143 struct arphdr arp_hdr; 142 struct arphdr arp_hdr;
144 u8 sender_hw[ETH_ALEN]; 143 u8 sender_hw[ETH_ALEN];
145 u32 sender_ip; 144 __be32 sender_ip;
146 u8 target_hw[ETH_ALEN]; 145 u8 target_hw[ETH_ALEN];
147 u32 target_ip; 146 __be32 target_ip;
148} __packed; 147} __packed;
149 148
150 149
@@ -160,4 +159,9 @@ struct wl12xx_probe_resp_template {
160 struct wl12xx_ie_country country; 159 struct wl12xx_ie_country country;
161} __packed; 160} __packed;
162 161
162struct wl12xx_disconn_template {
163 struct ieee80211_header header;
164 __le16 disconn_reason;
165} __packed;
166
163#endif 167#endif
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 6a9b66051cf7..a73a305d3cba 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -108,25 +108,17 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
108{ 108{
109 int r; 109 int r;
110 int i; 110 int i;
111 zd_addr_t *a16; 111 zd_addr_t a16[USB_MAX_IOREAD32_COUNT * 2];
112 u16 *v16; 112 u16 v16[USB_MAX_IOREAD32_COUNT * 2];
113 unsigned int count16; 113 unsigned int count16;
114 114
115 if (count > USB_MAX_IOREAD32_COUNT) 115 if (count > USB_MAX_IOREAD32_COUNT)
116 return -EINVAL; 116 return -EINVAL;
117 117
118 /* Allocate a single memory block for values and addresses. */ 118 /* Use stack for values and addresses. */
119 count16 = 2*count; 119 count16 = 2 * count;
120 /* zd_addr_t is __nocast, so the kmalloc needs an explicit cast */ 120 BUG_ON(count16 * sizeof(zd_addr_t) > sizeof(a16));
121 a16 = (zd_addr_t *) kmalloc(count16 * (sizeof(zd_addr_t) + sizeof(u16)), 121 BUG_ON(count16 * sizeof(u16) > sizeof(v16));
122 GFP_KERNEL);
123 if (!a16) {
124 dev_dbg_f(zd_chip_dev(chip),
125 "error ENOMEM in allocation of a16\n");
126 r = -ENOMEM;
127 goto out;
128 }
129 v16 = (u16 *)(a16 + count16);
130 122
131 for (i = 0; i < count; i++) { 123 for (i = 0; i < count; i++) {
132 int j = 2*i; 124 int j = 2*i;
@@ -139,7 +131,7 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
139 if (r) { 131 if (r) {
140 dev_dbg_f(zd_chip_dev(chip), 132 dev_dbg_f(zd_chip_dev(chip),
141 "error: zd_ioread16v_locked. Error number %d\n", r); 133 "error: zd_ioread16v_locked. Error number %d\n", r);
142 goto out; 134 return r;
143 } 135 }
144 136
145 for (i = 0; i < count; i++) { 137 for (i = 0; i < count; i++) {
@@ -147,18 +139,19 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
147 values[i] = (v16[j] << 16) | v16[j+1]; 139 values[i] = (v16[j] << 16) | v16[j+1];
148 } 140 }
149 141
150out: 142 return 0;
151 kfree((void *)a16);
152 return r;
153} 143}
154 144
155int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs, 145static int _zd_iowrite32v_async_locked(struct zd_chip *chip,
156 unsigned int count) 146 const struct zd_ioreq32 *ioreqs,
147 unsigned int count)
157{ 148{
158 int i, j, r; 149 int i, j, r;
159 struct zd_ioreq16 *ioreqs16; 150 struct zd_ioreq16 ioreqs16[USB_MAX_IOWRITE32_COUNT * 2];
160 unsigned int count16; 151 unsigned int count16;
161 152
153 /* Use stack for values and addresses. */
154
162 ZD_ASSERT(mutex_is_locked(&chip->mutex)); 155 ZD_ASSERT(mutex_is_locked(&chip->mutex));
163 156
164 if (count == 0) 157 if (count == 0)
@@ -166,15 +159,8 @@ int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
166 if (count > USB_MAX_IOWRITE32_COUNT) 159 if (count > USB_MAX_IOWRITE32_COUNT)
167 return -EINVAL; 160 return -EINVAL;
168 161
169 /* Allocate a single memory block for values and addresses. */ 162 count16 = 2 * count;
170 count16 = 2*count; 163 BUG_ON(count16 * sizeof(struct zd_ioreq16) > sizeof(ioreqs16));
171 ioreqs16 = kmalloc(count16 * sizeof(struct zd_ioreq16), GFP_KERNEL);
172 if (!ioreqs16) {
173 r = -ENOMEM;
174 dev_dbg_f(zd_chip_dev(chip),
175 "error %d in ioreqs16 allocation\n", r);
176 goto out;
177 }
178 164
179 for (i = 0; i < count; i++) { 165 for (i = 0; i < count; i++) {
180 j = 2*i; 166 j = 2*i;
@@ -185,18 +171,30 @@ int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
185 ioreqs16[j+1].addr = ioreqs[i].addr; 171 ioreqs16[j+1].addr = ioreqs[i].addr;
186 } 172 }
187 173
188 r = zd_usb_iowrite16v(&chip->usb, ioreqs16, count16); 174 r = zd_usb_iowrite16v_async(&chip->usb, ioreqs16, count16);
189#ifdef DEBUG 175#ifdef DEBUG
190 if (r) { 176 if (r) {
191 dev_dbg_f(zd_chip_dev(chip), 177 dev_dbg_f(zd_chip_dev(chip),
192 "error %d in zd_usb_write16v\n", r); 178 "error %d in zd_usb_write16v\n", r);
193 } 179 }
194#endif /* DEBUG */ 180#endif /* DEBUG */
195out:
196 kfree(ioreqs16);
197 return r; 181 return r;
198} 182}
199 183
184int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
185 unsigned int count)
186{
187 int r;
188
189 zd_usb_iowrite16v_async_start(&chip->usb);
190 r = _zd_iowrite32v_async_locked(chip, ioreqs, count);
191 if (r) {
192 zd_usb_iowrite16v_async_end(&chip->usb, 0);
193 return r;
194 }
195 return zd_usb_iowrite16v_async_end(&chip->usb, 50 /* ms */);
196}
197
200int zd_iowrite16a_locked(struct zd_chip *chip, 198int zd_iowrite16a_locked(struct zd_chip *chip,
201 const struct zd_ioreq16 *ioreqs, unsigned int count) 199 const struct zd_ioreq16 *ioreqs, unsigned int count)
202{ 200{
@@ -204,6 +202,8 @@ int zd_iowrite16a_locked(struct zd_chip *chip,
204 unsigned int i, j, t, max; 202 unsigned int i, j, t, max;
205 203
206 ZD_ASSERT(mutex_is_locked(&chip->mutex)); 204 ZD_ASSERT(mutex_is_locked(&chip->mutex));
205 zd_usb_iowrite16v_async_start(&chip->usb);
206
207 for (i = 0; i < count; i += j + t) { 207 for (i = 0; i < count; i += j + t) {
208 t = 0; 208 t = 0;
209 max = count-i; 209 max = count-i;
@@ -216,8 +216,9 @@ int zd_iowrite16a_locked(struct zd_chip *chip,
216 } 216 }
217 } 217 }
218 218
219 r = zd_usb_iowrite16v(&chip->usb, &ioreqs[i], j); 219 r = zd_usb_iowrite16v_async(&chip->usb, &ioreqs[i], j);
220 if (r) { 220 if (r) {
221 zd_usb_iowrite16v_async_end(&chip->usb, 0);
221 dev_dbg_f(zd_chip_dev(chip), 222 dev_dbg_f(zd_chip_dev(chip),
222 "error zd_usb_iowrite16v. Error number %d\n", 223 "error zd_usb_iowrite16v. Error number %d\n",
223 r); 224 r);
@@ -225,7 +226,7 @@ int zd_iowrite16a_locked(struct zd_chip *chip,
225 } 226 }
226 } 227 }
227 228
228 return 0; 229 return zd_usb_iowrite16v_async_end(&chip->usb, 50 /* ms */);
229} 230}
230 231
231/* Writes a variable number of 32 bit registers. The functions will split 232/* Writes a variable number of 32 bit registers. The functions will split
@@ -238,6 +239,8 @@ int zd_iowrite32a_locked(struct zd_chip *chip,
238 int r; 239 int r;
239 unsigned int i, j, t, max; 240 unsigned int i, j, t, max;
240 241
242 zd_usb_iowrite16v_async_start(&chip->usb);
243
241 for (i = 0; i < count; i += j + t) { 244 for (i = 0; i < count; i += j + t) {
242 t = 0; 245 t = 0;
243 max = count-i; 246 max = count-i;
@@ -250,8 +253,9 @@ int zd_iowrite32a_locked(struct zd_chip *chip,
250 } 253 }
251 } 254 }
252 255
253 r = _zd_iowrite32v_locked(chip, &ioreqs[i], j); 256 r = _zd_iowrite32v_async_locked(chip, &ioreqs[i], j);
254 if (r) { 257 if (r) {
258 zd_usb_iowrite16v_async_end(&chip->usb, 0);
255 dev_dbg_f(zd_chip_dev(chip), 259 dev_dbg_f(zd_chip_dev(chip),
256 "error _zd_iowrite32v_locked." 260 "error _zd_iowrite32v_locked."
257 " Error number %d\n", r); 261 " Error number %d\n", r);
@@ -259,7 +263,7 @@ int zd_iowrite32a_locked(struct zd_chip *chip,
259 } 263 }
260 } 264 }
261 265
262 return 0; 266 return zd_usb_iowrite16v_async_end(&chip->usb, 50 /* ms */);
263} 267}
264 268
265int zd_ioread16(struct zd_chip *chip, zd_addr_t addr, u16 *value) 269int zd_ioread16(struct zd_chip *chip, zd_addr_t addr, u16 *value)
@@ -370,16 +374,12 @@ error:
370 return r; 374 return r;
371} 375}
372 376
373/* MAC address: if custom mac addresses are to be used CR_MAC_ADDR_P1 and 377static int zd_write_mac_addr_common(struct zd_chip *chip, const u8 *mac_addr,
374 * CR_MAC_ADDR_P2 must be overwritten 378 const struct zd_ioreq32 *in_reqs,
375 */ 379 const char *type)
376int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
377{ 380{
378 int r; 381 int r;
379 struct zd_ioreq32 reqs[2] = { 382 struct zd_ioreq32 reqs[2] = {in_reqs[0], in_reqs[1]};
380 [0] = { .addr = CR_MAC_ADDR_P1 },
381 [1] = { .addr = CR_MAC_ADDR_P2 },
382 };
383 383
384 if (mac_addr) { 384 if (mac_addr) {
385 reqs[0].value = (mac_addr[3] << 24) 385 reqs[0].value = (mac_addr[3] << 24)
@@ -388,9 +388,9 @@ int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
388 | mac_addr[0]; 388 | mac_addr[0];
389 reqs[1].value = (mac_addr[5] << 8) 389 reqs[1].value = (mac_addr[5] << 8)
390 | mac_addr[4]; 390 | mac_addr[4];
391 dev_dbg_f(zd_chip_dev(chip), "mac addr %pM\n", mac_addr); 391 dev_dbg_f(zd_chip_dev(chip), "%s addr %pM\n", type, mac_addr);
392 } else { 392 } else {
393 dev_dbg_f(zd_chip_dev(chip), "set NULL mac\n"); 393 dev_dbg_f(zd_chip_dev(chip), "set NULL %s\n", type);
394 } 394 }
395 395
396 mutex_lock(&chip->mutex); 396 mutex_lock(&chip->mutex);
@@ -399,6 +399,29 @@ int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
399 return r; 399 return r;
400} 400}
401 401
402/* MAC address: if custom mac addresses are to be used CR_MAC_ADDR_P1 and
403 * CR_MAC_ADDR_P2 must be overwritten
404 */
405int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
406{
407 static const struct zd_ioreq32 reqs[2] = {
408 [0] = { .addr = CR_MAC_ADDR_P1 },
409 [1] = { .addr = CR_MAC_ADDR_P2 },
410 };
411
412 return zd_write_mac_addr_common(chip, mac_addr, reqs, "mac");
413}
414
415int zd_write_bssid(struct zd_chip *chip, const u8 *bssid)
416{
417 static const struct zd_ioreq32 reqs[2] = {
418 [0] = { .addr = CR_BSSID_P1 },
419 [1] = { .addr = CR_BSSID_P2 },
420 };
421
422 return zd_write_mac_addr_common(chip, bssid, reqs, "bssid");
423}
424
402int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain) 425int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain)
403{ 426{
404 int r; 427 int r;
@@ -849,11 +872,12 @@ static int get_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s)
849static int set_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s) 872static int set_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s)
850{ 873{
851 struct zd_ioreq32 reqs[3]; 874 struct zd_ioreq32 reqs[3];
875 u16 b_interval = s->beacon_interval & 0xffff;
852 876
853 if (s->beacon_interval <= 5) 877 if (b_interval <= 5)
854 s->beacon_interval = 5; 878 b_interval = 5;
855 if (s->pre_tbtt < 4 || s->pre_tbtt >= s->beacon_interval) 879 if (s->pre_tbtt < 4 || s->pre_tbtt >= b_interval)
856 s->pre_tbtt = s->beacon_interval - 1; 880 s->pre_tbtt = b_interval - 1;
857 if (s->atim_wnd_period >= s->pre_tbtt) 881 if (s->atim_wnd_period >= s->pre_tbtt)
858 s->atim_wnd_period = s->pre_tbtt - 1; 882 s->atim_wnd_period = s->pre_tbtt - 1;
859 883
@@ -862,31 +886,57 @@ static int set_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s)
862 reqs[1].addr = CR_PRE_TBTT; 886 reqs[1].addr = CR_PRE_TBTT;
863 reqs[1].value = s->pre_tbtt; 887 reqs[1].value = s->pre_tbtt;
864 reqs[2].addr = CR_BCN_INTERVAL; 888 reqs[2].addr = CR_BCN_INTERVAL;
865 reqs[2].value = s->beacon_interval; 889 reqs[2].value = (s->beacon_interval & ~0xffff) | b_interval;
866 890
867 return zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs)); 891 return zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs));
868} 892}
869 893
870 894
871static int set_beacon_interval(struct zd_chip *chip, u32 interval) 895static int set_beacon_interval(struct zd_chip *chip, u16 interval,
896 u8 dtim_period, int type)
872{ 897{
873 int r; 898 int r;
874 struct aw_pt_bi s; 899 struct aw_pt_bi s;
900 u32 b_interval, mode_flag;
875 901
876 ZD_ASSERT(mutex_is_locked(&chip->mutex)); 902 ZD_ASSERT(mutex_is_locked(&chip->mutex));
903
904 if (interval > 0) {
905 switch (type) {
906 case NL80211_IFTYPE_ADHOC:
907 case NL80211_IFTYPE_MESH_POINT:
908 mode_flag = BCN_MODE_IBSS;
909 break;
910 case NL80211_IFTYPE_AP:
911 mode_flag = BCN_MODE_AP;
912 break;
913 default:
914 mode_flag = 0;
915 break;
916 }
917 } else {
918 dtim_period = 0;
919 mode_flag = 0;
920 }
921
922 b_interval = mode_flag | (dtim_period << 16) | interval;
923
924 r = zd_iowrite32_locked(chip, b_interval, CR_BCN_INTERVAL);
925 if (r)
926 return r;
877 r = get_aw_pt_bi(chip, &s); 927 r = get_aw_pt_bi(chip, &s);
878 if (r) 928 if (r)
879 return r; 929 return r;
880 s.beacon_interval = interval;
881 return set_aw_pt_bi(chip, &s); 930 return set_aw_pt_bi(chip, &s);
882} 931}
883 932
884int zd_set_beacon_interval(struct zd_chip *chip, u32 interval) 933int zd_set_beacon_interval(struct zd_chip *chip, u16 interval, u8 dtim_period,
934 int type)
885{ 935{
886 int r; 936 int r;
887 937
888 mutex_lock(&chip->mutex); 938 mutex_lock(&chip->mutex);
889 r = set_beacon_interval(chip, interval); 939 r = set_beacon_interval(chip, interval, dtim_period, type);
890 mutex_unlock(&chip->mutex); 940 mutex_unlock(&chip->mutex);
891 return r; 941 return r;
892} 942}
@@ -905,7 +955,7 @@ static int hw_init(struct zd_chip *chip)
905 if (r) 955 if (r)
906 return r; 956 return r;
907 957
908 return set_beacon_interval(chip, 100); 958 return set_beacon_interval(chip, 100, 0, NL80211_IFTYPE_UNSPECIFIED);
909} 959}
910 960
911static zd_addr_t fw_reg_addr(struct zd_chip *chip, u16 offset) 961static zd_addr_t fw_reg_addr(struct zd_chip *chip, u16 offset)
@@ -1407,6 +1457,9 @@ void zd_chip_disable_int(struct zd_chip *chip)
1407 mutex_lock(&chip->mutex); 1457 mutex_lock(&chip->mutex);
1408 zd_usb_disable_int(&chip->usb); 1458 zd_usb_disable_int(&chip->usb);
1409 mutex_unlock(&chip->mutex); 1459 mutex_unlock(&chip->mutex);
1460
1461 /* cancel pending interrupt work */
1462 cancel_work_sync(&zd_chip_to_mac(chip)->process_intr);
1410} 1463}
1411 1464
1412int zd_chip_enable_rxtx(struct zd_chip *chip) 1465int zd_chip_enable_rxtx(struct zd_chip *chip)
@@ -1416,6 +1469,7 @@ int zd_chip_enable_rxtx(struct zd_chip *chip)
1416 mutex_lock(&chip->mutex); 1469 mutex_lock(&chip->mutex);
1417 zd_usb_enable_tx(&chip->usb); 1470 zd_usb_enable_tx(&chip->usb);
1418 r = zd_usb_enable_rx(&chip->usb); 1471 r = zd_usb_enable_rx(&chip->usb);
1472 zd_tx_watchdog_enable(&chip->usb);
1419 mutex_unlock(&chip->mutex); 1473 mutex_unlock(&chip->mutex);
1420 return r; 1474 return r;
1421} 1475}
@@ -1423,6 +1477,7 @@ int zd_chip_enable_rxtx(struct zd_chip *chip)
1423void zd_chip_disable_rxtx(struct zd_chip *chip) 1477void zd_chip_disable_rxtx(struct zd_chip *chip)
1424{ 1478{
1425 mutex_lock(&chip->mutex); 1479 mutex_lock(&chip->mutex);
1480 zd_tx_watchdog_disable(&chip->usb);
1426 zd_usb_disable_rx(&chip->usb); 1481 zd_usb_disable_rx(&chip->usb);
1427 zd_usb_disable_tx(&chip->usb); 1482 zd_usb_disable_tx(&chip->usb);
1428 mutex_unlock(&chip->mutex); 1483 mutex_unlock(&chip->mutex);
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index f8bbf7d302ae..14e4402a6111 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -546,6 +546,7 @@ enum {
546#define RX_FILTER_CTRL (RX_FILTER_RTS | RX_FILTER_CTS | \ 546#define RX_FILTER_CTRL (RX_FILTER_RTS | RX_FILTER_CTS | \
547 RX_FILTER_CFEND | RX_FILTER_CFACK) 547 RX_FILTER_CFEND | RX_FILTER_CFACK)
548 548
549#define BCN_MODE_AP 0x1000000
549#define BCN_MODE_IBSS 0x2000000 550#define BCN_MODE_IBSS 0x2000000
550 551
551/* Monitor mode sets filter to 0xfffff */ 552/* Monitor mode sets filter to 0xfffff */
@@ -881,6 +882,7 @@ static inline u8 _zd_chip_get_channel(struct zd_chip *chip)
881u8 zd_chip_get_channel(struct zd_chip *chip); 882u8 zd_chip_get_channel(struct zd_chip *chip);
882int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain); 883int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain);
883int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr); 884int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr);
885int zd_write_bssid(struct zd_chip *chip, const u8 *bssid);
884int zd_chip_switch_radio_on(struct zd_chip *chip); 886int zd_chip_switch_radio_on(struct zd_chip *chip);
885int zd_chip_switch_radio_off(struct zd_chip *chip); 887int zd_chip_switch_radio_off(struct zd_chip *chip);
886int zd_chip_enable_int(struct zd_chip *chip); 888int zd_chip_enable_int(struct zd_chip *chip);
@@ -920,7 +922,8 @@ enum led_status {
920 922
921int zd_chip_control_leds(struct zd_chip *chip, enum led_status status); 923int zd_chip_control_leds(struct zd_chip *chip, enum led_status status);
922 924
923int zd_set_beacon_interval(struct zd_chip *chip, u32 interval); 925int zd_set_beacon_interval(struct zd_chip *chip, u16 interval, u8 dtim_period,
926 int type);
924 927
925static inline int zd_get_beacon_interval(struct zd_chip *chip, u32 *interval) 928static inline int zd_get_beacon_interval(struct zd_chip *chip, u32 *interval)
926{ 929{
diff --git a/drivers/net/wireless/zd1211rw/zd_def.h b/drivers/net/wireless/zd1211rw/zd_def.h
index 6ac597ffd3b9..5463ca9ebc01 100644
--- a/drivers/net/wireless/zd1211rw/zd_def.h
+++ b/drivers/net/wireless/zd1211rw/zd_def.h
@@ -45,7 +45,7 @@ typedef u16 __nocast zd_addr_t;
45#ifdef DEBUG 45#ifdef DEBUG
46# define ZD_ASSERT(x) \ 46# define ZD_ASSERT(x) \
47do { \ 47do { \
48 if (!(x)) { \ 48 if (unlikely(!(x))) { \
49 pr_debug("%s:%d ASSERT %s VIOLATED!\n", \ 49 pr_debug("%s:%d ASSERT %s VIOLATED!\n", \
50 __FILE__, __LINE__, __stringify(x)); \ 50 __FILE__, __LINE__, __stringify(x)); \
51 dump_stack(); \ 51 dump_stack(); \
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 6107304cb94c..5037c8b2b415 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -138,6 +138,12 @@ static const struct ieee80211_channel zd_channels[] = {
138static void housekeeping_init(struct zd_mac *mac); 138static void housekeeping_init(struct zd_mac *mac);
139static void housekeeping_enable(struct zd_mac *mac); 139static void housekeeping_enable(struct zd_mac *mac);
140static void housekeeping_disable(struct zd_mac *mac); 140static void housekeeping_disable(struct zd_mac *mac);
141static void beacon_init(struct zd_mac *mac);
142static void beacon_enable(struct zd_mac *mac);
143static void beacon_disable(struct zd_mac *mac);
144static void set_rts_cts(struct zd_mac *mac, unsigned int short_preamble);
145static int zd_mac_config_beacon(struct ieee80211_hw *hw,
146 struct sk_buff *beacon);
141 147
142static int zd_reg2alpha2(u8 regdomain, char *alpha2) 148static int zd_reg2alpha2(u8 regdomain, char *alpha2)
143{ 149{
@@ -231,6 +237,26 @@ static int set_rx_filter(struct zd_mac *mac)
231 return zd_iowrite32(&mac->chip, CR_RX_FILTER, filter); 237 return zd_iowrite32(&mac->chip, CR_RX_FILTER, filter);
232} 238}
233 239
240static int set_mac_and_bssid(struct zd_mac *mac)
241{
242 int r;
243
244 if (!mac->vif)
245 return -1;
246
247 r = zd_write_mac_addr(&mac->chip, mac->vif->addr);
248 if (r)
249 return r;
250
251 /* Vendor driver after setting MAC either sets BSSID for AP or
252 * filter for other modes.
253 */
254 if (mac->type != NL80211_IFTYPE_AP)
255 return set_rx_filter(mac);
256 else
257 return zd_write_bssid(&mac->chip, mac->vif->addr);
258}
259
234static int set_mc_hash(struct zd_mac *mac) 260static int set_mc_hash(struct zd_mac *mac)
235{ 261{
236 struct zd_mc_hash hash; 262 struct zd_mc_hash hash;
@@ -238,7 +264,7 @@ static int set_mc_hash(struct zd_mac *mac)
238 return zd_chip_set_multicast_hash(&mac->chip, &hash); 264 return zd_chip_set_multicast_hash(&mac->chip, &hash);
239} 265}
240 266
241static int zd_op_start(struct ieee80211_hw *hw) 267int zd_op_start(struct ieee80211_hw *hw)
242{ 268{
243 struct zd_mac *mac = zd_hw_mac(hw); 269 struct zd_mac *mac = zd_hw_mac(hw);
244 struct zd_chip *chip = &mac->chip; 270 struct zd_chip *chip = &mac->chip;
@@ -275,6 +301,8 @@ static int zd_op_start(struct ieee80211_hw *hw)
275 goto disable_rxtx; 301 goto disable_rxtx;
276 302
277 housekeeping_enable(mac); 303 housekeeping_enable(mac);
304 beacon_enable(mac);
305 set_bit(ZD_DEVICE_RUNNING, &mac->flags);
278 return 0; 306 return 0;
279disable_rxtx: 307disable_rxtx:
280 zd_chip_disable_rxtx(chip); 308 zd_chip_disable_rxtx(chip);
@@ -286,19 +314,22 @@ out:
286 return r; 314 return r;
287} 315}
288 316
289static void zd_op_stop(struct ieee80211_hw *hw) 317void zd_op_stop(struct ieee80211_hw *hw)
290{ 318{
291 struct zd_mac *mac = zd_hw_mac(hw); 319 struct zd_mac *mac = zd_hw_mac(hw);
292 struct zd_chip *chip = &mac->chip; 320 struct zd_chip *chip = &mac->chip;
293 struct sk_buff *skb; 321 struct sk_buff *skb;
294 struct sk_buff_head *ack_wait_queue = &mac->ack_wait_queue; 322 struct sk_buff_head *ack_wait_queue = &mac->ack_wait_queue;
295 323
324 clear_bit(ZD_DEVICE_RUNNING, &mac->flags);
325
296 /* The order here deliberately is a little different from the open() 326 /* The order here deliberately is a little different from the open()
297 * method, since we need to make sure there is no opportunity for RX 327 * method, since we need to make sure there is no opportunity for RX
298 * frames to be processed by mac80211 after we have stopped it. 328 * frames to be processed by mac80211 after we have stopped it.
299 */ 329 */
300 330
301 zd_chip_disable_rxtx(chip); 331 zd_chip_disable_rxtx(chip);
332 beacon_disable(mac);
302 housekeeping_disable(mac); 333 housekeeping_disable(mac);
303 flush_workqueue(zd_workqueue); 334 flush_workqueue(zd_workqueue);
304 335
@@ -311,6 +342,68 @@ static void zd_op_stop(struct ieee80211_hw *hw)
311 dev_kfree_skb_any(skb); 342 dev_kfree_skb_any(skb);
312} 343}
313 344
345int zd_restore_settings(struct zd_mac *mac)
346{
347 struct sk_buff *beacon;
348 struct zd_mc_hash multicast_hash;
349 unsigned int short_preamble;
350 int r, beacon_interval, beacon_period;
351 u8 channel;
352
353 dev_dbg_f(zd_mac_dev(mac), "\n");
354
355 spin_lock_irq(&mac->lock);
356 multicast_hash = mac->multicast_hash;
357 short_preamble = mac->short_preamble;
358 beacon_interval = mac->beacon.interval;
359 beacon_period = mac->beacon.period;
360 channel = mac->channel;
361 spin_unlock_irq(&mac->lock);
362
363 r = set_mac_and_bssid(mac);
364 if (r < 0) {
365 dev_dbg_f(zd_mac_dev(mac), "set_mac_and_bssid failed, %d\n", r);
366 return r;
367 }
368
369 r = zd_chip_set_channel(&mac->chip, channel);
370 if (r < 0) {
371 dev_dbg_f(zd_mac_dev(mac), "zd_chip_set_channel failed, %d\n",
372 r);
373 return r;
374 }
375
376 set_rts_cts(mac, short_preamble);
377
378 r = zd_chip_set_multicast_hash(&mac->chip, &multicast_hash);
379 if (r < 0) {
380 dev_dbg_f(zd_mac_dev(mac),
381 "zd_chip_set_multicast_hash failed, %d\n", r);
382 return r;
383 }
384
385 if (mac->type == NL80211_IFTYPE_MESH_POINT ||
386 mac->type == NL80211_IFTYPE_ADHOC ||
387 mac->type == NL80211_IFTYPE_AP) {
388 if (mac->vif != NULL) {
389 beacon = ieee80211_beacon_get(mac->hw, mac->vif);
390 if (beacon) {
391 zd_mac_config_beacon(mac->hw, beacon);
392 kfree_skb(beacon);
393 }
394 }
395
396 zd_set_beacon_interval(&mac->chip, beacon_interval,
397 beacon_period, mac->type);
398
399 spin_lock_irq(&mac->lock);
400 mac->beacon.last_update = jiffies;
401 spin_unlock_irq(&mac->lock);
402 }
403
404 return 0;
405}
406
314/** 407/**
315 * zd_mac_tx_status - reports tx status of a packet if required 408 * zd_mac_tx_status - reports tx status of a packet if required
316 * @hw - a &struct ieee80211_hw pointer 409 * @hw - a &struct ieee80211_hw pointer
@@ -574,64 +667,120 @@ static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs,
574static int zd_mac_config_beacon(struct ieee80211_hw *hw, struct sk_buff *beacon) 667static int zd_mac_config_beacon(struct ieee80211_hw *hw, struct sk_buff *beacon)
575{ 668{
576 struct zd_mac *mac = zd_hw_mac(hw); 669 struct zd_mac *mac = zd_hw_mac(hw);
577 int r; 670 int r, ret, num_cmds, req_pos = 0;
578 u32 tmp, j = 0; 671 u32 tmp, j = 0;
579 /* 4 more bytes for tail CRC */ 672 /* 4 more bytes for tail CRC */
580 u32 full_len = beacon->len + 4; 673 u32 full_len = beacon->len + 4;
674 unsigned long end_jiffies, message_jiffies;
675 struct zd_ioreq32 *ioreqs;
581 676
582 r = zd_iowrite32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, 0); 677 /* Alloc memory for full beacon write at once. */
678 num_cmds = 1 + zd_chip_is_zd1211b(&mac->chip) + full_len;
679 ioreqs = kmalloc(num_cmds * sizeof(struct zd_ioreq32), GFP_KERNEL);
680 if (!ioreqs)
681 return -ENOMEM;
682
683 mutex_lock(&mac->chip.mutex);
684
685 r = zd_iowrite32_locked(&mac->chip, 0, CR_BCN_FIFO_SEMAPHORE);
583 if (r < 0) 686 if (r < 0)
584 return r; 687 goto out;
585 r = zd_ioread32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, &tmp); 688 r = zd_ioread32_locked(&mac->chip, &tmp, CR_BCN_FIFO_SEMAPHORE);
586 if (r < 0) 689 if (r < 0)
587 return r; 690 goto release_sema;
588 691
692 end_jiffies = jiffies + HZ / 2; /*~500ms*/
693 message_jiffies = jiffies + HZ / 10; /*~100ms*/
589 while (tmp & 0x2) { 694 while (tmp & 0x2) {
590 r = zd_ioread32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, &tmp); 695 r = zd_ioread32_locked(&mac->chip, &tmp, CR_BCN_FIFO_SEMAPHORE);
591 if (r < 0) 696 if (r < 0)
592 return r; 697 goto release_sema;
593 if ((++j % 100) == 0) { 698 if (time_is_before_eq_jiffies(message_jiffies)) {
594 printk(KERN_ERR "CR_BCN_FIFO_SEMAPHORE not ready\n"); 699 message_jiffies = jiffies + HZ / 10;
595 if (j >= 500) { 700 dev_err(zd_mac_dev(mac),
596 printk(KERN_ERR "Giving up beacon config.\n"); 701 "CR_BCN_FIFO_SEMAPHORE not ready\n");
597 return -ETIMEDOUT; 702 if (time_is_before_eq_jiffies(end_jiffies)) {
703 dev_err(zd_mac_dev(mac),
704 "Giving up beacon config.\n");
705 r = -ETIMEDOUT;
706 goto reset_device;
598 } 707 }
599 } 708 }
600 msleep(1); 709 msleep(20);
601 } 710 }
602 711
603 r = zd_iowrite32(&mac->chip, CR_BCN_FIFO, full_len - 1); 712 ioreqs[req_pos].addr = CR_BCN_FIFO;
604 if (r < 0) 713 ioreqs[req_pos].value = full_len - 1;
605 return r; 714 req_pos++;
606 if (zd_chip_is_zd1211b(&mac->chip)) { 715 if (zd_chip_is_zd1211b(&mac->chip)) {
607 r = zd_iowrite32(&mac->chip, CR_BCN_LENGTH, full_len - 1); 716 ioreqs[req_pos].addr = CR_BCN_LENGTH;
608 if (r < 0) 717 ioreqs[req_pos].value = full_len - 1;
609 return r; 718 req_pos++;
610 } 719 }
611 720
612 for (j = 0 ; j < beacon->len; j++) { 721 for (j = 0 ; j < beacon->len; j++) {
613 r = zd_iowrite32(&mac->chip, CR_BCN_FIFO, 722 ioreqs[req_pos].addr = CR_BCN_FIFO;
614 *((u8 *)(beacon->data + j))); 723 ioreqs[req_pos].value = *((u8 *)(beacon->data + j));
615 if (r < 0) 724 req_pos++;
616 return r;
617 } 725 }
618 726
619 for (j = 0; j < 4; j++) { 727 for (j = 0; j < 4; j++) {
620 r = zd_iowrite32(&mac->chip, CR_BCN_FIFO, 0x0); 728 ioreqs[req_pos].addr = CR_BCN_FIFO;
621 if (r < 0) 729 ioreqs[req_pos].value = 0x0;
622 return r; 730 req_pos++;
623 } 731 }
624 732
625 r = zd_iowrite32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, 1); 733 BUG_ON(req_pos != num_cmds);
626 if (r < 0) 734
627 return r; 735 r = zd_iowrite32a_locked(&mac->chip, ioreqs, num_cmds);
736
737release_sema:
738 /*
739 * Try very hard to release device beacon semaphore, as otherwise
740 * device/driver can be left in unusable state.
741 */
742 end_jiffies = jiffies + HZ / 2; /*~500ms*/
743 ret = zd_iowrite32_locked(&mac->chip, 1, CR_BCN_FIFO_SEMAPHORE);
744 while (ret < 0) {
745 if (time_is_before_eq_jiffies(end_jiffies)) {
746 ret = -ETIMEDOUT;
747 break;
748 }
749
750 msleep(20);
751 ret = zd_iowrite32_locked(&mac->chip, 1, CR_BCN_FIFO_SEMAPHORE);
752 }
753
754 if (ret < 0)
755 dev_err(zd_mac_dev(mac), "Could not release "
756 "CR_BCN_FIFO_SEMAPHORE!\n");
757 if (r < 0 || ret < 0) {
758 if (r >= 0)
759 r = ret;
760 goto out;
761 }
628 762
629 /* 802.11b/g 2.4G CCK 1Mb 763 /* 802.11b/g 2.4G CCK 1Mb
630 * 802.11a, not yet implemented, uses different values (see GPL vendor 764 * 802.11a, not yet implemented, uses different values (see GPL vendor
631 * driver) 765 * driver)
632 */ 766 */
633 return zd_iowrite32(&mac->chip, CR_BCN_PLCP_CFG, 0x00000400 | 767 r = zd_iowrite32_locked(&mac->chip, 0x00000400 | (full_len << 19),
634 (full_len << 19)); 768 CR_BCN_PLCP_CFG);
769out:
770 mutex_unlock(&mac->chip.mutex);
771 kfree(ioreqs);
772 return r;
773
774reset_device:
775 mutex_unlock(&mac->chip.mutex);
776 kfree(ioreqs);
777
778 /* semaphore stuck, reset device to avoid fw freeze later */
779 dev_warn(zd_mac_dev(mac), "CR_BCN_FIFO_SEMAPHORE stuck, "
780 "reseting device...");
781 usb_queue_reset_device(mac->chip.usb.intf);
782
783 return r;
635} 784}
636 785
637static int fill_ctrlset(struct zd_mac *mac, 786static int fill_ctrlset(struct zd_mac *mac,
@@ -701,7 +850,7 @@ static int fill_ctrlset(struct zd_mac *mac,
701 * control block of the skbuff will be initialized. If necessary the incoming 850 * control block of the skbuff will be initialized. If necessary the incoming
702 * mac80211 queues will be stopped. 851 * mac80211 queues will be stopped.
703 */ 852 */
704static int zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 853static void zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
705{ 854{
706 struct zd_mac *mac = zd_hw_mac(hw); 855 struct zd_mac *mac = zd_hw_mac(hw);
707 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 856 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -716,11 +865,10 @@ static int zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
716 r = zd_usb_tx(&mac->chip.usb, skb); 865 r = zd_usb_tx(&mac->chip.usb, skb);
717 if (r) 866 if (r)
718 goto fail; 867 goto fail;
719 return 0; 868 return;
720 869
721fail: 870fail:
722 dev_kfree_skb(skb); 871 dev_kfree_skb(skb);
723 return 0;
724} 872}
725 873
726/** 874/**
@@ -779,6 +927,13 @@ static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
779 927
780 mac->ack_pending = 1; 928 mac->ack_pending = 1;
781 mac->ack_signal = stats->signal; 929 mac->ack_signal = stats->signal;
930
931 /* Prevent pending tx-packet on AP-mode */
932 if (mac->type == NL80211_IFTYPE_AP) {
933 skb = __skb_dequeue(q);
934 zd_mac_tx_status(hw, skb, mac->ack_signal, NULL);
935 mac->ack_pending = 0;
936 }
782 } 937 }
783 938
784 spin_unlock_irqrestore(&q->lock, flags); 939 spin_unlock_irqrestore(&q->lock, flags);
@@ -882,13 +1037,16 @@ static int zd_op_add_interface(struct ieee80211_hw *hw,
882 case NL80211_IFTYPE_MESH_POINT: 1037 case NL80211_IFTYPE_MESH_POINT:
883 case NL80211_IFTYPE_STATION: 1038 case NL80211_IFTYPE_STATION:
884 case NL80211_IFTYPE_ADHOC: 1039 case NL80211_IFTYPE_ADHOC:
1040 case NL80211_IFTYPE_AP:
885 mac->type = vif->type; 1041 mac->type = vif->type;
886 break; 1042 break;
887 default: 1043 default:
888 return -EOPNOTSUPP; 1044 return -EOPNOTSUPP;
889 } 1045 }
890 1046
891 return zd_write_mac_addr(&mac->chip, vif->addr); 1047 mac->vif = vif;
1048
1049 return set_mac_and_bssid(mac);
892} 1050}
893 1051
894static void zd_op_remove_interface(struct ieee80211_hw *hw, 1052static void zd_op_remove_interface(struct ieee80211_hw *hw,
@@ -896,7 +1054,8 @@ static void zd_op_remove_interface(struct ieee80211_hw *hw,
896{ 1054{
897 struct zd_mac *mac = zd_hw_mac(hw); 1055 struct zd_mac *mac = zd_hw_mac(hw);
898 mac->type = NL80211_IFTYPE_UNSPECIFIED; 1056 mac->type = NL80211_IFTYPE_UNSPECIFIED;
899 zd_set_beacon_interval(&mac->chip, 0); 1057 mac->vif = NULL;
1058 zd_set_beacon_interval(&mac->chip, 0, 0, NL80211_IFTYPE_UNSPECIFIED);
900 zd_write_mac_addr(&mac->chip, NULL); 1059 zd_write_mac_addr(&mac->chip, NULL);
901} 1060}
902 1061
@@ -905,49 +1064,67 @@ static int zd_op_config(struct ieee80211_hw *hw, u32 changed)
905 struct zd_mac *mac = zd_hw_mac(hw); 1064 struct zd_mac *mac = zd_hw_mac(hw);
906 struct ieee80211_conf *conf = &hw->conf; 1065 struct ieee80211_conf *conf = &hw->conf;
907 1066
1067 spin_lock_irq(&mac->lock);
1068 mac->channel = conf->channel->hw_value;
1069 spin_unlock_irq(&mac->lock);
1070
908 return zd_chip_set_channel(&mac->chip, conf->channel->hw_value); 1071 return zd_chip_set_channel(&mac->chip, conf->channel->hw_value);
909} 1072}
910 1073
911static void zd_process_intr(struct work_struct *work) 1074static void zd_beacon_done(struct zd_mac *mac)
912{ 1075{
913 u16 int_status; 1076 struct sk_buff *skb, *beacon;
914 struct zd_mac *mac = container_of(work, struct zd_mac, process_intr);
915 1077
916 int_status = le16_to_cpu(*(__le16 *)(mac->intr_buffer+4)); 1078 if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
917 if (int_status & INT_CFG_NEXT_BCN) 1079 return;
918 dev_dbg_f_limit(zd_mac_dev(mac), "INT_CFG_NEXT_BCN\n"); 1080 if (!mac->vif || mac->vif->type != NL80211_IFTYPE_AP)
919 else 1081 return;
920 dev_dbg_f(zd_mac_dev(mac), "Unsupported interrupt\n");
921
922 zd_chip_enable_hwint(&mac->chip);
923}
924 1082
1083 /*
1084 * Send out buffered broad- and multicast frames.
1085 */
1086 while (!ieee80211_queue_stopped(mac->hw, 0)) {
1087 skb = ieee80211_get_buffered_bc(mac->hw, mac->vif);
1088 if (!skb)
1089 break;
1090 zd_op_tx(mac->hw, skb);
1091 }
925 1092
926static void set_multicast_hash_handler(struct work_struct *work) 1093 /*
927{ 1094 * Fetch next beacon so that tim_count is updated.
928 struct zd_mac *mac = 1095 */
929 container_of(work, struct zd_mac, set_multicast_hash_work); 1096 beacon = ieee80211_beacon_get(mac->hw, mac->vif);
930 struct zd_mc_hash hash; 1097 if (beacon) {
1098 zd_mac_config_beacon(mac->hw, beacon);
1099 kfree_skb(beacon);
1100 }
931 1101
932 spin_lock_irq(&mac->lock); 1102 spin_lock_irq(&mac->lock);
933 hash = mac->multicast_hash; 1103 mac->beacon.last_update = jiffies;
934 spin_unlock_irq(&mac->lock); 1104 spin_unlock_irq(&mac->lock);
935
936 zd_chip_set_multicast_hash(&mac->chip, &hash);
937} 1105}
938 1106
939static void set_rx_filter_handler(struct work_struct *work) 1107static void zd_process_intr(struct work_struct *work)
940{ 1108{
941 struct zd_mac *mac = 1109 u16 int_status;
942 container_of(work, struct zd_mac, set_rx_filter_work); 1110 unsigned long flags;
943 int r; 1111 struct zd_mac *mac = container_of(work, struct zd_mac, process_intr);
944 1112
945 dev_dbg_f(zd_mac_dev(mac), "\n"); 1113 spin_lock_irqsave(&mac->lock, flags);
946 r = set_rx_filter(mac); 1114 int_status = le16_to_cpu(*(__le16 *)(mac->intr_buffer + 4));
947 if (r) 1115 spin_unlock_irqrestore(&mac->lock, flags);
948 dev_err(zd_mac_dev(mac), "set_rx_filter_handler error %d\n", r); 1116
1117 if (int_status & INT_CFG_NEXT_BCN) {
1118 /*dev_dbg_f_limit(zd_mac_dev(mac), "INT_CFG_NEXT_BCN\n");*/
1119 zd_beacon_done(mac);
1120 } else {
1121 dev_dbg_f(zd_mac_dev(mac), "Unsupported interrupt\n");
1122 }
1123
1124 zd_chip_enable_hwint(&mac->chip);
949} 1125}
950 1126
1127
951static u64 zd_op_prepare_multicast(struct ieee80211_hw *hw, 1128static u64 zd_op_prepare_multicast(struct ieee80211_hw *hw,
952 struct netdev_hw_addr_list *mc_list) 1129 struct netdev_hw_addr_list *mc_list)
953{ 1130{
@@ -979,6 +1156,7 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
979 }; 1156 };
980 struct zd_mac *mac = zd_hw_mac(hw); 1157 struct zd_mac *mac = zd_hw_mac(hw);
981 unsigned long flags; 1158 unsigned long flags;
1159 int r;
982 1160
983 /* Only deal with supported flags */ 1161 /* Only deal with supported flags */
984 changed_flags &= SUPPORTED_FIF_FLAGS; 1162 changed_flags &= SUPPORTED_FIF_FLAGS;
@@ -1000,11 +1178,13 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
1000 mac->multicast_hash = hash; 1178 mac->multicast_hash = hash;
1001 spin_unlock_irqrestore(&mac->lock, flags); 1179 spin_unlock_irqrestore(&mac->lock, flags);
1002 1180
1003 /* XXX: these can be called here now, can sleep now! */ 1181 zd_chip_set_multicast_hash(&mac->chip, &hash);
1004 queue_work(zd_workqueue, &mac->set_multicast_hash_work);
1005 1182
1006 if (changed_flags & FIF_CONTROL) 1183 if (changed_flags & FIF_CONTROL) {
1007 queue_work(zd_workqueue, &mac->set_rx_filter_work); 1184 r = set_rx_filter(mac);
1185 if (r)
1186 dev_err(zd_mac_dev(mac), "set_rx_filter error %d\n", r);
1187 }
1008 1188
1009 /* no handling required for FIF_OTHER_BSS as we don't currently 1189 /* no handling required for FIF_OTHER_BSS as we don't currently
1010 * do BSSID filtering */ 1190 * do BSSID filtering */
@@ -1016,20 +1196,9 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
1016 * time. */ 1196 * time. */
1017} 1197}
1018 1198
1019static void set_rts_cts_work(struct work_struct *work) 1199static void set_rts_cts(struct zd_mac *mac, unsigned int short_preamble)
1020{ 1200{
1021 struct zd_mac *mac =
1022 container_of(work, struct zd_mac, set_rts_cts_work);
1023 unsigned long flags;
1024 unsigned int short_preamble;
1025
1026 mutex_lock(&mac->chip.mutex); 1201 mutex_lock(&mac->chip.mutex);
1027
1028 spin_lock_irqsave(&mac->lock, flags);
1029 mac->updating_rts_rate = 0;
1030 short_preamble = mac->short_preamble;
1031 spin_unlock_irqrestore(&mac->lock, flags);
1032
1033 zd_chip_set_rts_cts_rate_locked(&mac->chip, short_preamble); 1202 zd_chip_set_rts_cts_rate_locked(&mac->chip, short_preamble);
1034 mutex_unlock(&mac->chip.mutex); 1203 mutex_unlock(&mac->chip.mutex);
1035} 1204}
@@ -1040,33 +1209,42 @@ static void zd_op_bss_info_changed(struct ieee80211_hw *hw,
1040 u32 changes) 1209 u32 changes)
1041{ 1210{
1042 struct zd_mac *mac = zd_hw_mac(hw); 1211 struct zd_mac *mac = zd_hw_mac(hw);
1043 unsigned long flags;
1044 int associated; 1212 int associated;
1045 1213
1046 dev_dbg_f(zd_mac_dev(mac), "changes: %x\n", changes); 1214 dev_dbg_f(zd_mac_dev(mac), "changes: %x\n", changes);
1047 1215
1048 if (mac->type == NL80211_IFTYPE_MESH_POINT || 1216 if (mac->type == NL80211_IFTYPE_MESH_POINT ||
1049 mac->type == NL80211_IFTYPE_ADHOC) { 1217 mac->type == NL80211_IFTYPE_ADHOC ||
1218 mac->type == NL80211_IFTYPE_AP) {
1050 associated = true; 1219 associated = true;
1051 if (changes & BSS_CHANGED_BEACON) { 1220 if (changes & BSS_CHANGED_BEACON) {
1052 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif); 1221 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
1053 1222
1054 if (beacon) { 1223 if (beacon) {
1224 zd_chip_disable_hwint(&mac->chip);
1055 zd_mac_config_beacon(hw, beacon); 1225 zd_mac_config_beacon(hw, beacon);
1226 zd_chip_enable_hwint(&mac->chip);
1056 kfree_skb(beacon); 1227 kfree_skb(beacon);
1057 } 1228 }
1058 } 1229 }
1059 1230
1060 if (changes & BSS_CHANGED_BEACON_ENABLED) { 1231 if (changes & BSS_CHANGED_BEACON_ENABLED) {
1061 u32 interval; 1232 u16 interval = 0;
1233 u8 period = 0;
1062 1234
1063 if (bss_conf->enable_beacon) 1235 if (bss_conf->enable_beacon) {
1064 interval = BCN_MODE_IBSS | 1236 period = bss_conf->dtim_period;
1065 bss_conf->beacon_int; 1237 interval = bss_conf->beacon_int;
1066 else 1238 }
1067 interval = 0;
1068 1239
1069 zd_set_beacon_interval(&mac->chip, interval); 1240 spin_lock_irq(&mac->lock);
1241 mac->beacon.period = period;
1242 mac->beacon.interval = interval;
1243 mac->beacon.last_update = jiffies;
1244 spin_unlock_irq(&mac->lock);
1245
1246 zd_set_beacon_interval(&mac->chip, interval, period,
1247 mac->type);
1070 } 1248 }
1071 } else 1249 } else
1072 associated = is_valid_ether_addr(bss_conf->bssid); 1250 associated = is_valid_ether_addr(bss_conf->bssid);
@@ -1078,15 +1256,11 @@ static void zd_op_bss_info_changed(struct ieee80211_hw *hw,
1078 /* TODO: do hardware bssid filtering */ 1256 /* TODO: do hardware bssid filtering */
1079 1257
1080 if (changes & BSS_CHANGED_ERP_PREAMBLE) { 1258 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
1081 spin_lock_irqsave(&mac->lock, flags); 1259 spin_lock_irq(&mac->lock);
1082 mac->short_preamble = bss_conf->use_short_preamble; 1260 mac->short_preamble = bss_conf->use_short_preamble;
1083 if (!mac->updating_rts_rate) { 1261 spin_unlock_irq(&mac->lock);
1084 mac->updating_rts_rate = 1; 1262
1085 /* FIXME: should disable TX here, until work has 1263 set_rts_cts(mac, bss_conf->use_short_preamble);
1086 * completed and RTS_CTS reg is updated */
1087 queue_work(zd_workqueue, &mac->set_rts_cts_work);
1088 }
1089 spin_unlock_irqrestore(&mac->lock, flags);
1090 } 1264 }
1091} 1265}
1092 1266
@@ -1138,12 +1312,14 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
1138 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band; 1312 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band;
1139 1313
1140 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 1314 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
1141 IEEE80211_HW_SIGNAL_UNSPEC; 1315 IEEE80211_HW_SIGNAL_UNSPEC |
1316 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
1142 1317
1143 hw->wiphy->interface_modes = 1318 hw->wiphy->interface_modes =
1144 BIT(NL80211_IFTYPE_MESH_POINT) | 1319 BIT(NL80211_IFTYPE_MESH_POINT) |
1145 BIT(NL80211_IFTYPE_STATION) | 1320 BIT(NL80211_IFTYPE_STATION) |
1146 BIT(NL80211_IFTYPE_ADHOC); 1321 BIT(NL80211_IFTYPE_ADHOC) |
1322 BIT(NL80211_IFTYPE_AP);
1147 1323
1148 hw->max_signal = 100; 1324 hw->max_signal = 100;
1149 hw->queues = 1; 1325 hw->queues = 1;
@@ -1160,15 +1336,82 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
1160 1336
1161 zd_chip_init(&mac->chip, hw, intf); 1337 zd_chip_init(&mac->chip, hw, intf);
1162 housekeeping_init(mac); 1338 housekeeping_init(mac);
1163 INIT_WORK(&mac->set_multicast_hash_work, set_multicast_hash_handler); 1339 beacon_init(mac);
1164 INIT_WORK(&mac->set_rts_cts_work, set_rts_cts_work);
1165 INIT_WORK(&mac->set_rx_filter_work, set_rx_filter_handler);
1166 INIT_WORK(&mac->process_intr, zd_process_intr); 1340 INIT_WORK(&mac->process_intr, zd_process_intr);
1167 1341
1168 SET_IEEE80211_DEV(hw, &intf->dev); 1342 SET_IEEE80211_DEV(hw, &intf->dev);
1169 return hw; 1343 return hw;
1170} 1344}
1171 1345
1346#define BEACON_WATCHDOG_DELAY round_jiffies_relative(HZ)
1347
1348static void beacon_watchdog_handler(struct work_struct *work)
1349{
1350 struct zd_mac *mac =
1351 container_of(work, struct zd_mac, beacon.watchdog_work.work);
1352 struct sk_buff *beacon;
1353 unsigned long timeout;
1354 int interval, period;
1355
1356 if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
1357 goto rearm;
1358 if (mac->type != NL80211_IFTYPE_AP || !mac->vif)
1359 goto rearm;
1360
1361 spin_lock_irq(&mac->lock);
1362 interval = mac->beacon.interval;
1363 period = mac->beacon.period;
1364 timeout = mac->beacon.last_update + msecs_to_jiffies(interval) + HZ;
1365 spin_unlock_irq(&mac->lock);
1366
1367 if (interval > 0 && time_is_before_jiffies(timeout)) {
1368 dev_dbg_f(zd_mac_dev(mac), "beacon interrupt stalled, "
1369 "restarting. "
1370 "(interval: %d, dtim: %d)\n",
1371 interval, period);
1372
1373 zd_chip_disable_hwint(&mac->chip);
1374
1375 beacon = ieee80211_beacon_get(mac->hw, mac->vif);
1376 if (beacon) {
1377 zd_mac_config_beacon(mac->hw, beacon);
1378 kfree_skb(beacon);
1379 }
1380
1381 zd_set_beacon_interval(&mac->chip, interval, period, mac->type);
1382
1383 zd_chip_enable_hwint(&mac->chip);
1384
1385 spin_lock_irq(&mac->lock);
1386 mac->beacon.last_update = jiffies;
1387 spin_unlock_irq(&mac->lock);
1388 }
1389
1390rearm:
1391 queue_delayed_work(zd_workqueue, &mac->beacon.watchdog_work,
1392 BEACON_WATCHDOG_DELAY);
1393}
1394
1395static void beacon_init(struct zd_mac *mac)
1396{
1397 INIT_DELAYED_WORK(&mac->beacon.watchdog_work, beacon_watchdog_handler);
1398}
1399
1400static void beacon_enable(struct zd_mac *mac)
1401{
1402 dev_dbg_f(zd_mac_dev(mac), "\n");
1403
1404 mac->beacon.last_update = jiffies;
1405 queue_delayed_work(zd_workqueue, &mac->beacon.watchdog_work,
1406 BEACON_WATCHDOG_DELAY);
1407}
1408
1409static void beacon_disable(struct zd_mac *mac)
1410{
1411 dev_dbg_f(zd_mac_dev(mac), "\n");
1412 cancel_delayed_work_sync(&mac->beacon.watchdog_work);
1413}
1414
1172#define LINK_LED_WORK_DELAY HZ 1415#define LINK_LED_WORK_DELAY HZ
1173 1416
1174static void link_led_handler(struct work_struct *work) 1417static void link_led_handler(struct work_struct *work)
@@ -1179,6 +1422,9 @@ static void link_led_handler(struct work_struct *work)
1179 int is_associated; 1422 int is_associated;
1180 int r; 1423 int r;
1181 1424
1425 if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
1426 goto requeue;
1427
1182 spin_lock_irq(&mac->lock); 1428 spin_lock_irq(&mac->lock);
1183 is_associated = mac->associated; 1429 is_associated = mac->associated;
1184 spin_unlock_irq(&mac->lock); 1430 spin_unlock_irq(&mac->lock);
@@ -1188,6 +1434,7 @@ static void link_led_handler(struct work_struct *work)
1188 if (r) 1434 if (r)
1189 dev_dbg_f(zd_mac_dev(mac), "zd_chip_control_leds error %d\n", r); 1435 dev_dbg_f(zd_mac_dev(mac), "zd_chip_control_leds error %d\n", r);
1190 1436
1437requeue:
1191 queue_delayed_work(zd_workqueue, &mac->housekeeping.link_led_work, 1438 queue_delayed_work(zd_workqueue, &mac->housekeeping.link_led_work,
1192 LINK_LED_WORK_DELAY); 1439 LINK_LED_WORK_DELAY);
1193} 1440}
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index a6d86b996c79..f8c93c3fe755 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -163,6 +163,17 @@ struct housekeeping {
163 struct delayed_work link_led_work; 163 struct delayed_work link_led_work;
164}; 164};
165 165
166struct beacon {
167 struct delayed_work watchdog_work;
168 unsigned long last_update;
169 u16 interval;
170 u8 period;
171};
172
173enum zd_device_flags {
174 ZD_DEVICE_RUNNING,
175};
176
166#define ZD_MAC_STATS_BUFFER_SIZE 16 177#define ZD_MAC_STATS_BUFFER_SIZE 16
167 178
168#define ZD_MAC_MAX_ACK_WAITERS 50 179#define ZD_MAC_MAX_ACK_WAITERS 50
@@ -172,17 +183,19 @@ struct zd_mac {
172 spinlock_t lock; 183 spinlock_t lock;
173 spinlock_t intr_lock; 184 spinlock_t intr_lock;
174 struct ieee80211_hw *hw; 185 struct ieee80211_hw *hw;
186 struct ieee80211_vif *vif;
175 struct housekeeping housekeeping; 187 struct housekeeping housekeeping;
176 struct work_struct set_multicast_hash_work; 188 struct beacon beacon;
177 struct work_struct set_rts_cts_work; 189 struct work_struct set_rts_cts_work;
178 struct work_struct set_rx_filter_work;
179 struct work_struct process_intr; 190 struct work_struct process_intr;
180 struct zd_mc_hash multicast_hash; 191 struct zd_mc_hash multicast_hash;
181 u8 intr_buffer[USB_MAX_EP_INT_BUFFER]; 192 u8 intr_buffer[USB_MAX_EP_INT_BUFFER];
182 u8 regdomain; 193 u8 regdomain;
183 u8 default_regdomain; 194 u8 default_regdomain;
195 u8 channel;
184 int type; 196 int type;
185 int associated; 197 int associated;
198 unsigned long flags;
186 struct sk_buff_head ack_wait_queue; 199 struct sk_buff_head ack_wait_queue;
187 struct ieee80211_channel channels[14]; 200 struct ieee80211_channel channels[14];
188 struct ieee80211_rate rates[12]; 201 struct ieee80211_rate rates[12];
@@ -191,9 +204,6 @@ struct zd_mac {
191 /* Short preamble (used for RTS/CTS) */ 204 /* Short preamble (used for RTS/CTS) */
192 unsigned int short_preamble:1; 205 unsigned int short_preamble:1;
193 206
194 /* flags to indicate update in progress */
195 unsigned int updating_rts_rate:1;
196
197 /* whether to pass frames with CRC errors to stack */ 207 /* whether to pass frames with CRC errors to stack */
198 unsigned int pass_failed_fcs:1; 208 unsigned int pass_failed_fcs:1;
199 209
@@ -304,6 +314,10 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length);
304void zd_mac_tx_failed(struct urb *urb); 314void zd_mac_tx_failed(struct urb *urb);
305void zd_mac_tx_to_dev(struct sk_buff *skb, int error); 315void zd_mac_tx_to_dev(struct sk_buff *skb, int error);
306 316
317int zd_op_start(struct ieee80211_hw *hw);
318void zd_op_stop(struct ieee80211_hw *hw);
319int zd_restore_settings(struct zd_mac *mac);
320
307#ifdef DEBUG 321#ifdef DEBUG
308void zd_dump_rx_status(const struct rx_status *status); 322void zd_dump_rx_status(const struct rx_status *status);
309#else 323#else
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 06041cb1c422..81e80489a052 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -377,8 +377,10 @@ static inline void handle_regs_int(struct urb *urb)
377 int_num = le16_to_cpu(*(__le16 *)(urb->transfer_buffer+2)); 377 int_num = le16_to_cpu(*(__le16 *)(urb->transfer_buffer+2));
378 if (int_num == CR_INTERRUPT) { 378 if (int_num == CR_INTERRUPT) {
379 struct zd_mac *mac = zd_hw_mac(zd_usb_to_hw(urb->context)); 379 struct zd_mac *mac = zd_hw_mac(zd_usb_to_hw(urb->context));
380 spin_lock(&mac->lock);
380 memcpy(&mac->intr_buffer, urb->transfer_buffer, 381 memcpy(&mac->intr_buffer, urb->transfer_buffer,
381 USB_MAX_EP_INT_BUFFER); 382 USB_MAX_EP_INT_BUFFER);
383 spin_unlock(&mac->lock);
382 schedule_work(&mac->process_intr); 384 schedule_work(&mac->process_intr);
383 } else if (intr->read_regs_enabled) { 385 } else if (intr->read_regs_enabled) {
384 intr->read_regs.length = len = urb->actual_length; 386 intr->read_regs.length = len = urb->actual_length;
@@ -409,8 +411,10 @@ static void int_urb_complete(struct urb *urb)
409 case -ENOENT: 411 case -ENOENT:
410 case -ECONNRESET: 412 case -ECONNRESET:
411 case -EPIPE: 413 case -EPIPE:
412 goto kfree; 414 dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
415 return;
413 default: 416 default:
417 dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
414 goto resubmit; 418 goto resubmit;
415 } 419 }
416 420
@@ -441,12 +445,11 @@ static void int_urb_complete(struct urb *urb)
441resubmit: 445resubmit:
442 r = usb_submit_urb(urb, GFP_ATOMIC); 446 r = usb_submit_urb(urb, GFP_ATOMIC);
443 if (r) { 447 if (r) {
444 dev_dbg_f(urb_dev(urb), "resubmit urb %p\n", urb); 448 dev_dbg_f(urb_dev(urb), "error: resubmit urb %p err code %d\n",
445 goto kfree; 449 urb, r);
450 /* TODO: add worker to reset intr->urb */
446 } 451 }
447 return; 452 return;
448kfree:
449 kfree(urb->transfer_buffer);
450} 453}
451 454
452static inline int int_urb_interval(struct usb_device *udev) 455static inline int int_urb_interval(struct usb_device *udev)
@@ -477,9 +480,8 @@ static inline int usb_int_enabled(struct zd_usb *usb)
477int zd_usb_enable_int(struct zd_usb *usb) 480int zd_usb_enable_int(struct zd_usb *usb)
478{ 481{
479 int r; 482 int r;
480 struct usb_device *udev; 483 struct usb_device *udev = zd_usb_to_usbdev(usb);
481 struct zd_usb_interrupt *intr = &usb->intr; 484 struct zd_usb_interrupt *intr = &usb->intr;
482 void *transfer_buffer = NULL;
483 struct urb *urb; 485 struct urb *urb;
484 486
485 dev_dbg_f(zd_usb_dev(usb), "\n"); 487 dev_dbg_f(zd_usb_dev(usb), "\n");
@@ -500,20 +502,21 @@ int zd_usb_enable_int(struct zd_usb *usb)
500 intr->urb = urb; 502 intr->urb = urb;
501 spin_unlock_irq(&intr->lock); 503 spin_unlock_irq(&intr->lock);
502 504
503 /* TODO: make it a DMA buffer */
504 r = -ENOMEM; 505 r = -ENOMEM;
505 transfer_buffer = kmalloc(USB_MAX_EP_INT_BUFFER, GFP_KERNEL); 506 intr->buffer = usb_alloc_coherent(udev, USB_MAX_EP_INT_BUFFER,
506 if (!transfer_buffer) { 507 GFP_KERNEL, &intr->buffer_dma);
508 if (!intr->buffer) {
507 dev_dbg_f(zd_usb_dev(usb), 509 dev_dbg_f(zd_usb_dev(usb),
508 "couldn't allocate transfer_buffer\n"); 510 "couldn't allocate transfer_buffer\n");
509 goto error_set_urb_null; 511 goto error_set_urb_null;
510 } 512 }
511 513
512 udev = zd_usb_to_usbdev(usb);
513 usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, EP_INT_IN), 514 usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, EP_INT_IN),
514 transfer_buffer, USB_MAX_EP_INT_BUFFER, 515 intr->buffer, USB_MAX_EP_INT_BUFFER,
515 int_urb_complete, usb, 516 int_urb_complete, usb,
516 intr->interval); 517 intr->interval);
518 urb->transfer_dma = intr->buffer_dma;
519 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
517 520
518 dev_dbg_f(zd_usb_dev(usb), "submit urb %p\n", intr->urb); 521 dev_dbg_f(zd_usb_dev(usb), "submit urb %p\n", intr->urb);
519 r = usb_submit_urb(urb, GFP_KERNEL); 522 r = usb_submit_urb(urb, GFP_KERNEL);
@@ -525,7 +528,8 @@ int zd_usb_enable_int(struct zd_usb *usb)
525 528
526 return 0; 529 return 0;
527error: 530error:
528 kfree(transfer_buffer); 531 usb_free_coherent(udev, USB_MAX_EP_INT_BUFFER,
532 intr->buffer, intr->buffer_dma);
529error_set_urb_null: 533error_set_urb_null:
530 spin_lock_irq(&intr->lock); 534 spin_lock_irq(&intr->lock);
531 intr->urb = NULL; 535 intr->urb = NULL;
@@ -539,8 +543,11 @@ out:
539void zd_usb_disable_int(struct zd_usb *usb) 543void zd_usb_disable_int(struct zd_usb *usb)
540{ 544{
541 unsigned long flags; 545 unsigned long flags;
546 struct usb_device *udev = zd_usb_to_usbdev(usb);
542 struct zd_usb_interrupt *intr = &usb->intr; 547 struct zd_usb_interrupt *intr = &usb->intr;
543 struct urb *urb; 548 struct urb *urb;
549 void *buffer;
550 dma_addr_t buffer_dma;
544 551
545 spin_lock_irqsave(&intr->lock, flags); 552 spin_lock_irqsave(&intr->lock, flags);
546 urb = intr->urb; 553 urb = intr->urb;
@@ -549,11 +556,18 @@ void zd_usb_disable_int(struct zd_usb *usb)
549 return; 556 return;
550 } 557 }
551 intr->urb = NULL; 558 intr->urb = NULL;
559 buffer = intr->buffer;
560 buffer_dma = intr->buffer_dma;
561 intr->buffer = NULL;
552 spin_unlock_irqrestore(&intr->lock, flags); 562 spin_unlock_irqrestore(&intr->lock, flags);
553 563
554 usb_kill_urb(urb); 564 usb_kill_urb(urb);
555 dev_dbg_f(zd_usb_dev(usb), "urb %p killed\n", urb); 565 dev_dbg_f(zd_usb_dev(usb), "urb %p killed\n", urb);
556 usb_free_urb(urb); 566 usb_free_urb(urb);
567
568 if (buffer)
569 usb_free_coherent(udev, USB_MAX_EP_INT_BUFFER,
570 buffer, buffer_dma);
557} 571}
558 572
559static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer, 573static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer,
@@ -601,6 +615,7 @@ static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer,
601 615
602static void rx_urb_complete(struct urb *urb) 616static void rx_urb_complete(struct urb *urb)
603{ 617{
618 int r;
604 struct zd_usb *usb; 619 struct zd_usb *usb;
605 struct zd_usb_rx *rx; 620 struct zd_usb_rx *rx;
606 const u8 *buffer; 621 const u8 *buffer;
@@ -615,6 +630,7 @@ static void rx_urb_complete(struct urb *urb)
615 case -ENOENT: 630 case -ENOENT:
616 case -ECONNRESET: 631 case -ECONNRESET:
617 case -EPIPE: 632 case -EPIPE:
633 dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
618 return; 634 return;
619 default: 635 default:
620 dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status); 636 dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
@@ -626,6 +642,8 @@ static void rx_urb_complete(struct urb *urb)
626 usb = urb->context; 642 usb = urb->context;
627 rx = &usb->rx; 643 rx = &usb->rx;
628 644
645 zd_usb_reset_rx_idle_timer(usb);
646
629 if (length%rx->usb_packet_size > rx->usb_packet_size-4) { 647 if (length%rx->usb_packet_size > rx->usb_packet_size-4) {
630 /* If there is an old first fragment, we don't care. */ 648 /* If there is an old first fragment, we don't care. */
631 dev_dbg_f(urb_dev(urb), "*** first fragment ***\n"); 649 dev_dbg_f(urb_dev(urb), "*** first fragment ***\n");
@@ -654,7 +672,9 @@ static void rx_urb_complete(struct urb *urb)
654 } 672 }
655 673
656resubmit: 674resubmit:
657 usb_submit_urb(urb, GFP_ATOMIC); 675 r = usb_submit_urb(urb, GFP_ATOMIC);
676 if (r)
677 dev_dbg_f(urb_dev(urb), "urb %p resubmit error %d\n", urb, r);
658} 678}
659 679
660static struct urb *alloc_rx_urb(struct zd_usb *usb) 680static struct urb *alloc_rx_urb(struct zd_usb *usb)
@@ -690,7 +710,7 @@ static void free_rx_urb(struct urb *urb)
690 usb_free_urb(urb); 710 usb_free_urb(urb);
691} 711}
692 712
693int zd_usb_enable_rx(struct zd_usb *usb) 713static int __zd_usb_enable_rx(struct zd_usb *usb)
694{ 714{
695 int i, r; 715 int i, r;
696 struct zd_usb_rx *rx = &usb->rx; 716 struct zd_usb_rx *rx = &usb->rx;
@@ -742,7 +762,21 @@ error:
742 return r; 762 return r;
743} 763}
744 764
745void zd_usb_disable_rx(struct zd_usb *usb) 765int zd_usb_enable_rx(struct zd_usb *usb)
766{
767 int r;
768 struct zd_usb_rx *rx = &usb->rx;
769
770 mutex_lock(&rx->setup_mutex);
771 r = __zd_usb_enable_rx(usb);
772 mutex_unlock(&rx->setup_mutex);
773
774 zd_usb_reset_rx_idle_timer(usb);
775
776 return r;
777}
778
779static void __zd_usb_disable_rx(struct zd_usb *usb)
746{ 780{
747 int i; 781 int i;
748 unsigned long flags; 782 unsigned long flags;
@@ -769,6 +803,40 @@ void zd_usb_disable_rx(struct zd_usb *usb)
769 spin_unlock_irqrestore(&rx->lock, flags); 803 spin_unlock_irqrestore(&rx->lock, flags);
770} 804}
771 805
806void zd_usb_disable_rx(struct zd_usb *usb)
807{
808 struct zd_usb_rx *rx = &usb->rx;
809
810 mutex_lock(&rx->setup_mutex);
811 __zd_usb_disable_rx(usb);
812 mutex_unlock(&rx->setup_mutex);
813
814 cancel_delayed_work_sync(&rx->idle_work);
815}
816
817static void zd_usb_reset_rx(struct zd_usb *usb)
818{
819 bool do_reset;
820 struct zd_usb_rx *rx = &usb->rx;
821 unsigned long flags;
822
823 mutex_lock(&rx->setup_mutex);
824
825 spin_lock_irqsave(&rx->lock, flags);
826 do_reset = rx->urbs != NULL;
827 spin_unlock_irqrestore(&rx->lock, flags);
828
829 if (do_reset) {
830 __zd_usb_disable_rx(usb);
831 __zd_usb_enable_rx(usb);
832 }
833
834 mutex_unlock(&rx->setup_mutex);
835
836 if (do_reset)
837 zd_usb_reset_rx_idle_timer(usb);
838}
839
772/** 840/**
773 * zd_usb_disable_tx - disable transmission 841 * zd_usb_disable_tx - disable transmission
774 * @usb: the zd1211rw-private USB structure 842 * @usb: the zd1211rw-private USB structure
@@ -779,19 +847,21 @@ void zd_usb_disable_tx(struct zd_usb *usb)
779{ 847{
780 struct zd_usb_tx *tx = &usb->tx; 848 struct zd_usb_tx *tx = &usb->tx;
781 unsigned long flags; 849 unsigned long flags;
782 struct list_head *pos, *n; 850
851 atomic_set(&tx->enabled, 0);
852
853 /* kill all submitted tx-urbs */
854 usb_kill_anchored_urbs(&tx->submitted);
783 855
784 spin_lock_irqsave(&tx->lock, flags); 856 spin_lock_irqsave(&tx->lock, flags);
785 list_for_each_safe(pos, n, &tx->free_urb_list) { 857 WARN_ON(!skb_queue_empty(&tx->submitted_skbs));
786 list_del(pos); 858 WARN_ON(tx->submitted_urbs != 0);
787 usb_free_urb(list_entry(pos, struct urb, urb_list));
788 }
789 tx->enabled = 0;
790 tx->submitted_urbs = 0; 859 tx->submitted_urbs = 0;
860 spin_unlock_irqrestore(&tx->lock, flags);
861
791 /* The stopped state is ignored, relying on ieee80211_wake_queues() 862 /* The stopped state is ignored, relying on ieee80211_wake_queues()
792 * in a potentionally following zd_usb_enable_tx(). 863 * in a potentionally following zd_usb_enable_tx().
793 */ 864 */
794 spin_unlock_irqrestore(&tx->lock, flags);
795} 865}
796 866
797/** 867/**
@@ -807,63 +877,13 @@ void zd_usb_enable_tx(struct zd_usb *usb)
807 struct zd_usb_tx *tx = &usb->tx; 877 struct zd_usb_tx *tx = &usb->tx;
808 878
809 spin_lock_irqsave(&tx->lock, flags); 879 spin_lock_irqsave(&tx->lock, flags);
810 tx->enabled = 1; 880 atomic_set(&tx->enabled, 1);
811 tx->submitted_urbs = 0; 881 tx->submitted_urbs = 0;
812 ieee80211_wake_queues(zd_usb_to_hw(usb)); 882 ieee80211_wake_queues(zd_usb_to_hw(usb));
813 tx->stopped = 0; 883 tx->stopped = 0;
814 spin_unlock_irqrestore(&tx->lock, flags); 884 spin_unlock_irqrestore(&tx->lock, flags);
815} 885}
816 886
817/**
818 * alloc_tx_urb - provides an tx URB
819 * @usb: a &struct zd_usb pointer
820 *
821 * Allocates a new URB. If possible takes the urb from the free list in
822 * usb->tx.
823 */
824static struct urb *alloc_tx_urb(struct zd_usb *usb)
825{
826 struct zd_usb_tx *tx = &usb->tx;
827 unsigned long flags;
828 struct list_head *entry;
829 struct urb *urb;
830
831 spin_lock_irqsave(&tx->lock, flags);
832 if (list_empty(&tx->free_urb_list)) {
833 urb = usb_alloc_urb(0, GFP_ATOMIC);
834 goto out;
835 }
836 entry = tx->free_urb_list.next;
837 list_del(entry);
838 urb = list_entry(entry, struct urb, urb_list);
839out:
840 spin_unlock_irqrestore(&tx->lock, flags);
841 return urb;
842}
843
844/**
845 * free_tx_urb - frees a used tx URB
846 * @usb: a &struct zd_usb pointer
847 * @urb: URB to be freed
848 *
849 * Frees the transmission URB, which means to put it on the free URB
850 * list.
851 */
852static void free_tx_urb(struct zd_usb *usb, struct urb *urb)
853{
854 struct zd_usb_tx *tx = &usb->tx;
855 unsigned long flags;
856
857 spin_lock_irqsave(&tx->lock, flags);
858 if (!tx->enabled) {
859 usb_free_urb(urb);
860 goto out;
861 }
862 list_add(&urb->urb_list, &tx->free_urb_list);
863out:
864 spin_unlock_irqrestore(&tx->lock, flags);
865}
866
867static void tx_dec_submitted_urbs(struct zd_usb *usb) 887static void tx_dec_submitted_urbs(struct zd_usb *usb)
868{ 888{
869 struct zd_usb_tx *tx = &usb->tx; 889 struct zd_usb_tx *tx = &usb->tx;
@@ -905,6 +925,16 @@ static void tx_urb_complete(struct urb *urb)
905 struct sk_buff *skb; 925 struct sk_buff *skb;
906 struct ieee80211_tx_info *info; 926 struct ieee80211_tx_info *info;
907 struct zd_usb *usb; 927 struct zd_usb *usb;
928 struct zd_usb_tx *tx;
929
930 skb = (struct sk_buff *)urb->context;
931 info = IEEE80211_SKB_CB(skb);
932 /*
933 * grab 'usb' pointer before handing off the skb (since
934 * it might be freed by zd_mac_tx_to_dev or mac80211)
935 */
936 usb = &zd_hw_mac(info->rate_driver_data[0])->chip.usb;
937 tx = &usb->tx;
908 938
909 switch (urb->status) { 939 switch (urb->status) {
910 case 0: 940 case 0:
@@ -922,20 +952,16 @@ static void tx_urb_complete(struct urb *urb)
922 goto resubmit; 952 goto resubmit;
923 } 953 }
924free_urb: 954free_urb:
925 skb = (struct sk_buff *)urb->context; 955 skb_unlink(skb, &usb->tx.submitted_skbs);
926 /*
927 * grab 'usb' pointer before handing off the skb (since
928 * it might be freed by zd_mac_tx_to_dev or mac80211)
929 */
930 info = IEEE80211_SKB_CB(skb);
931 usb = &zd_hw_mac(info->rate_driver_data[0])->chip.usb;
932 zd_mac_tx_to_dev(skb, urb->status); 956 zd_mac_tx_to_dev(skb, urb->status);
933 free_tx_urb(usb, urb); 957 usb_free_urb(urb);
934 tx_dec_submitted_urbs(usb); 958 tx_dec_submitted_urbs(usb);
935 return; 959 return;
936resubmit: 960resubmit:
961 usb_anchor_urb(urb, &tx->submitted);
937 r = usb_submit_urb(urb, GFP_ATOMIC); 962 r = usb_submit_urb(urb, GFP_ATOMIC);
938 if (r) { 963 if (r) {
964 usb_unanchor_urb(urb);
939 dev_dbg_f(urb_dev(urb), "error resubmit urb %p %d\n", urb, r); 965 dev_dbg_f(urb_dev(urb), "error resubmit urb %p %d\n", urb, r);
940 goto free_urb; 966 goto free_urb;
941 } 967 }
@@ -956,10 +982,17 @@ resubmit:
956int zd_usb_tx(struct zd_usb *usb, struct sk_buff *skb) 982int zd_usb_tx(struct zd_usb *usb, struct sk_buff *skb)
957{ 983{
958 int r; 984 int r;
985 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
959 struct usb_device *udev = zd_usb_to_usbdev(usb); 986 struct usb_device *udev = zd_usb_to_usbdev(usb);
960 struct urb *urb; 987 struct urb *urb;
988 struct zd_usb_tx *tx = &usb->tx;
989
990 if (!atomic_read(&tx->enabled)) {
991 r = -ENOENT;
992 goto out;
993 }
961 994
962 urb = alloc_tx_urb(usb); 995 urb = usb_alloc_urb(0, GFP_ATOMIC);
963 if (!urb) { 996 if (!urb) {
964 r = -ENOMEM; 997 r = -ENOMEM;
965 goto out; 998 goto out;
@@ -968,17 +1001,118 @@ int zd_usb_tx(struct zd_usb *usb, struct sk_buff *skb)
968 usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_DATA_OUT), 1001 usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_DATA_OUT),
969 skb->data, skb->len, tx_urb_complete, skb); 1002 skb->data, skb->len, tx_urb_complete, skb);
970 1003
1004 info->rate_driver_data[1] = (void *)jiffies;
1005 skb_queue_tail(&tx->submitted_skbs, skb);
1006 usb_anchor_urb(urb, &tx->submitted);
1007
971 r = usb_submit_urb(urb, GFP_ATOMIC); 1008 r = usb_submit_urb(urb, GFP_ATOMIC);
972 if (r) 1009 if (r) {
1010 dev_dbg_f(zd_usb_dev(usb), "error submit urb %p %d\n", urb, r);
1011 usb_unanchor_urb(urb);
1012 skb_unlink(skb, &tx->submitted_skbs);
973 goto error; 1013 goto error;
1014 }
974 tx_inc_submitted_urbs(usb); 1015 tx_inc_submitted_urbs(usb);
975 return 0; 1016 return 0;
976error: 1017error:
977 free_tx_urb(usb, urb); 1018 usb_free_urb(urb);
978out: 1019out:
979 return r; 1020 return r;
980} 1021}
981 1022
1023static bool zd_tx_timeout(struct zd_usb *usb)
1024{
1025 struct zd_usb_tx *tx = &usb->tx;
1026 struct sk_buff_head *q = &tx->submitted_skbs;
1027 struct sk_buff *skb, *skbnext;
1028 struct ieee80211_tx_info *info;
1029 unsigned long flags, trans_start;
1030 bool have_timedout = false;
1031
1032 spin_lock_irqsave(&q->lock, flags);
1033 skb_queue_walk_safe(q, skb, skbnext) {
1034 info = IEEE80211_SKB_CB(skb);
1035 trans_start = (unsigned long)info->rate_driver_data[1];
1036
1037 if (time_is_before_jiffies(trans_start + ZD_TX_TIMEOUT)) {
1038 have_timedout = true;
1039 break;
1040 }
1041 }
1042 spin_unlock_irqrestore(&q->lock, flags);
1043
1044 return have_timedout;
1045}
1046
1047static void zd_tx_watchdog_handler(struct work_struct *work)
1048{
1049 struct zd_usb *usb =
1050 container_of(work, struct zd_usb, tx.watchdog_work.work);
1051 struct zd_usb_tx *tx = &usb->tx;
1052
1053 if (!atomic_read(&tx->enabled) || !tx->watchdog_enabled)
1054 goto out;
1055 if (!zd_tx_timeout(usb))
1056 goto out;
1057
1058 /* TX halted, try reset */
1059 dev_warn(zd_usb_dev(usb), "TX-stall detected, reseting device...");
1060
1061 usb_queue_reset_device(usb->intf);
1062
1063 /* reset will stop this worker, don't rearm */
1064 return;
1065out:
1066 queue_delayed_work(zd_workqueue, &tx->watchdog_work,
1067 ZD_TX_WATCHDOG_INTERVAL);
1068}
1069
1070void zd_tx_watchdog_enable(struct zd_usb *usb)
1071{
1072 struct zd_usb_tx *tx = &usb->tx;
1073
1074 if (!tx->watchdog_enabled) {
1075 dev_dbg_f(zd_usb_dev(usb), "\n");
1076 queue_delayed_work(zd_workqueue, &tx->watchdog_work,
1077 ZD_TX_WATCHDOG_INTERVAL);
1078 tx->watchdog_enabled = 1;
1079 }
1080}
1081
1082void zd_tx_watchdog_disable(struct zd_usb *usb)
1083{
1084 struct zd_usb_tx *tx = &usb->tx;
1085
1086 if (tx->watchdog_enabled) {
1087 dev_dbg_f(zd_usb_dev(usb), "\n");
1088 tx->watchdog_enabled = 0;
1089 cancel_delayed_work_sync(&tx->watchdog_work);
1090 }
1091}
1092
1093static void zd_rx_idle_timer_handler(struct work_struct *work)
1094{
1095 struct zd_usb *usb =
1096 container_of(work, struct zd_usb, rx.idle_work.work);
1097 struct zd_mac *mac = zd_usb_to_mac(usb);
1098
1099 if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
1100 return;
1101
1102 dev_dbg_f(zd_usb_dev(usb), "\n");
1103
1104 /* 30 seconds since last rx, reset rx */
1105 zd_usb_reset_rx(usb);
1106}
1107
1108void zd_usb_reset_rx_idle_timer(struct zd_usb *usb)
1109{
1110 struct zd_usb_rx *rx = &usb->rx;
1111
1112 cancel_delayed_work(&rx->idle_work);
1113 queue_delayed_work(zd_workqueue, &rx->idle_work, ZD_RX_IDLE_INTERVAL);
1114}
1115
982static inline void init_usb_interrupt(struct zd_usb *usb) 1116static inline void init_usb_interrupt(struct zd_usb *usb)
983{ 1117{
984 struct zd_usb_interrupt *intr = &usb->intr; 1118 struct zd_usb_interrupt *intr = &usb->intr;
@@ -993,22 +1127,27 @@ static inline void init_usb_rx(struct zd_usb *usb)
993{ 1127{
994 struct zd_usb_rx *rx = &usb->rx; 1128 struct zd_usb_rx *rx = &usb->rx;
995 spin_lock_init(&rx->lock); 1129 spin_lock_init(&rx->lock);
1130 mutex_init(&rx->setup_mutex);
996 if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH) { 1131 if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH) {
997 rx->usb_packet_size = 512; 1132 rx->usb_packet_size = 512;
998 } else { 1133 } else {
999 rx->usb_packet_size = 64; 1134 rx->usb_packet_size = 64;
1000 } 1135 }
1001 ZD_ASSERT(rx->fragment_length == 0); 1136 ZD_ASSERT(rx->fragment_length == 0);
1137 INIT_DELAYED_WORK(&rx->idle_work, zd_rx_idle_timer_handler);
1002} 1138}
1003 1139
1004static inline void init_usb_tx(struct zd_usb *usb) 1140static inline void init_usb_tx(struct zd_usb *usb)
1005{ 1141{
1006 struct zd_usb_tx *tx = &usb->tx; 1142 struct zd_usb_tx *tx = &usb->tx;
1007 spin_lock_init(&tx->lock); 1143 spin_lock_init(&tx->lock);
1008 tx->enabled = 0; 1144 atomic_set(&tx->enabled, 0);
1009 tx->stopped = 0; 1145 tx->stopped = 0;
1010 INIT_LIST_HEAD(&tx->free_urb_list); 1146 skb_queue_head_init(&tx->submitted_skbs);
1147 init_usb_anchor(&tx->submitted);
1011 tx->submitted_urbs = 0; 1148 tx->submitted_urbs = 0;
1149 tx->watchdog_enabled = 0;
1150 INIT_DELAYED_WORK(&tx->watchdog_work, zd_tx_watchdog_handler);
1012} 1151}
1013 1152
1014void zd_usb_init(struct zd_usb *usb, struct ieee80211_hw *hw, 1153void zd_usb_init(struct zd_usb *usb, struct ieee80211_hw *hw,
@@ -1017,6 +1156,7 @@ void zd_usb_init(struct zd_usb *usb, struct ieee80211_hw *hw,
1017 memset(usb, 0, sizeof(*usb)); 1156 memset(usb, 0, sizeof(*usb));
1018 usb->intf = usb_get_intf(intf); 1157 usb->intf = usb_get_intf(intf);
1019 usb_set_intfdata(usb->intf, hw); 1158 usb_set_intfdata(usb->intf, hw);
1159 init_usb_anchor(&usb->submitted_cmds);
1020 init_usb_interrupt(usb); 1160 init_usb_interrupt(usb);
1021 init_usb_tx(usb); 1161 init_usb_tx(usb);
1022 init_usb_rx(usb); 1162 init_usb_rx(usb);
@@ -1240,6 +1380,7 @@ static void disconnect(struct usb_interface *intf)
1240 ieee80211_unregister_hw(hw); 1380 ieee80211_unregister_hw(hw);
1241 1381
1242 /* Just in case something has gone wrong! */ 1382 /* Just in case something has gone wrong! */
1383 zd_usb_disable_tx(usb);
1243 zd_usb_disable_rx(usb); 1384 zd_usb_disable_rx(usb);
1244 zd_usb_disable_int(usb); 1385 zd_usb_disable_int(usb);
1245 1386
@@ -1255,11 +1396,92 @@ static void disconnect(struct usb_interface *intf)
1255 dev_dbg(&intf->dev, "disconnected\n"); 1396 dev_dbg(&intf->dev, "disconnected\n");
1256} 1397}
1257 1398
1399static void zd_usb_resume(struct zd_usb *usb)
1400{
1401 struct zd_mac *mac = zd_usb_to_mac(usb);
1402 int r;
1403
1404 dev_dbg_f(zd_usb_dev(usb), "\n");
1405
1406 r = zd_op_start(zd_usb_to_hw(usb));
1407 if (r < 0) {
1408 dev_warn(zd_usb_dev(usb), "Device resume failed "
1409 "with error code %d. Retrying...\n", r);
1410 if (usb->was_running)
1411 set_bit(ZD_DEVICE_RUNNING, &mac->flags);
1412 usb_queue_reset_device(usb->intf);
1413 return;
1414 }
1415
1416 if (mac->type != NL80211_IFTYPE_UNSPECIFIED) {
1417 r = zd_restore_settings(mac);
1418 if (r < 0) {
1419 dev_dbg(zd_usb_dev(usb),
1420 "failed to restore settings, %d\n", r);
1421 return;
1422 }
1423 }
1424}
1425
1426static void zd_usb_stop(struct zd_usb *usb)
1427{
1428 dev_dbg_f(zd_usb_dev(usb), "\n");
1429
1430 zd_op_stop(zd_usb_to_hw(usb));
1431
1432 zd_usb_disable_tx(usb);
1433 zd_usb_disable_rx(usb);
1434 zd_usb_disable_int(usb);
1435
1436 usb->initialized = 0;
1437}
1438
1439static int pre_reset(struct usb_interface *intf)
1440{
1441 struct ieee80211_hw *hw = usb_get_intfdata(intf);
1442 struct zd_mac *mac;
1443 struct zd_usb *usb;
1444
1445 if (!hw || intf->condition != USB_INTERFACE_BOUND)
1446 return 0;
1447
1448 mac = zd_hw_mac(hw);
1449 usb = &mac->chip.usb;
1450
1451 usb->was_running = test_bit(ZD_DEVICE_RUNNING, &mac->flags);
1452
1453 zd_usb_stop(usb);
1454
1455 mutex_lock(&mac->chip.mutex);
1456 return 0;
1457}
1458
1459static int post_reset(struct usb_interface *intf)
1460{
1461 struct ieee80211_hw *hw = usb_get_intfdata(intf);
1462 struct zd_mac *mac;
1463 struct zd_usb *usb;
1464
1465 if (!hw || intf->condition != USB_INTERFACE_BOUND)
1466 return 0;
1467
1468 mac = zd_hw_mac(hw);
1469 usb = &mac->chip.usb;
1470
1471 mutex_unlock(&mac->chip.mutex);
1472
1473 if (usb->was_running)
1474 zd_usb_resume(usb);
1475 return 0;
1476}
1477
1258static struct usb_driver driver = { 1478static struct usb_driver driver = {
1259 .name = KBUILD_MODNAME, 1479 .name = KBUILD_MODNAME,
1260 .id_table = usb_ids, 1480 .id_table = usb_ids,
1261 .probe = probe, 1481 .probe = probe,
1262 .disconnect = disconnect, 1482 .disconnect = disconnect,
1483 .pre_reset = pre_reset,
1484 .post_reset = post_reset,
1263}; 1485};
1264 1486
1265struct workqueue_struct *zd_workqueue; 1487struct workqueue_struct *zd_workqueue;
@@ -1393,30 +1615,35 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
1393 return -EWOULDBLOCK; 1615 return -EWOULDBLOCK;
1394 } 1616 }
1395 if (!usb_int_enabled(usb)) { 1617 if (!usb_int_enabled(usb)) {
1396 dev_dbg_f(zd_usb_dev(usb), 1618 dev_dbg_f(zd_usb_dev(usb),
1397 "error: usb interrupt not enabled\n"); 1619 "error: usb interrupt not enabled\n");
1398 return -EWOULDBLOCK; 1620 return -EWOULDBLOCK;
1399 } 1621 }
1400 1622
1623 ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
1624 BUILD_BUG_ON(sizeof(struct usb_req_read_regs) + USB_MAX_IOREAD16_COUNT *
1625 sizeof(__le16) > sizeof(usb->req_buf));
1626 BUG_ON(sizeof(struct usb_req_read_regs) + count * sizeof(__le16) >
1627 sizeof(usb->req_buf));
1628
1401 req_len = sizeof(struct usb_req_read_regs) + count * sizeof(__le16); 1629 req_len = sizeof(struct usb_req_read_regs) + count * sizeof(__le16);
1402 req = kmalloc(req_len, GFP_KERNEL); 1630 req = (void *)usb->req_buf;
1403 if (!req) 1631
1404 return -ENOMEM;
1405 req->id = cpu_to_le16(USB_REQ_READ_REGS); 1632 req->id = cpu_to_le16(USB_REQ_READ_REGS);
1406 for (i = 0; i < count; i++) 1633 for (i = 0; i < count; i++)
1407 req->addr[i] = cpu_to_le16((u16)addresses[i]); 1634 req->addr[i] = cpu_to_le16((u16)addresses[i]);
1408 1635
1409 udev = zd_usb_to_usbdev(usb); 1636 udev = zd_usb_to_usbdev(usb);
1410 prepare_read_regs_int(usb); 1637 prepare_read_regs_int(usb);
1411 r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT), 1638 r = usb_interrupt_msg(udev, usb_sndintpipe(udev, EP_REGS_OUT),
1412 req, req_len, &actual_req_len, 1000 /* ms */); 1639 req, req_len, &actual_req_len, 50 /* ms */);
1413 if (r) { 1640 if (r) {
1414 dev_dbg_f(zd_usb_dev(usb), 1641 dev_dbg_f(zd_usb_dev(usb),
1415 "error in usb_bulk_msg(). Error number %d\n", r); 1642 "error in usb_interrupt_msg(). Error number %d\n", r);
1416 goto error; 1643 goto error;
1417 } 1644 }
1418 if (req_len != actual_req_len) { 1645 if (req_len != actual_req_len) {
1419 dev_dbg_f(zd_usb_dev(usb), "error in usb_bulk_msg()\n" 1646 dev_dbg_f(zd_usb_dev(usb), "error in usb_interrupt_msg()\n"
1420 " req_len %d != actual_req_len %d\n", 1647 " req_len %d != actual_req_len %d\n",
1421 req_len, actual_req_len); 1648 req_len, actual_req_len);
1422 r = -EIO; 1649 r = -EIO;
@@ -1424,7 +1651,7 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
1424 } 1651 }
1425 1652
1426 timeout = wait_for_completion_timeout(&usb->intr.read_regs.completion, 1653 timeout = wait_for_completion_timeout(&usb->intr.read_regs.completion,
1427 msecs_to_jiffies(1000)); 1654 msecs_to_jiffies(50));
1428 if (!timeout) { 1655 if (!timeout) {
1429 disable_read_regs_int(usb); 1656 disable_read_regs_int(usb);
1430 dev_dbg_f(zd_usb_dev(usb), "read timed out\n"); 1657 dev_dbg_f(zd_usb_dev(usb), "read timed out\n");
@@ -1434,17 +1661,106 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
1434 1661
1435 r = get_results(usb, values, req, count); 1662 r = get_results(usb, values, req, count);
1436error: 1663error:
1437 kfree(req);
1438 return r; 1664 return r;
1439} 1665}
1440 1666
1441int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs, 1667static void iowrite16v_urb_complete(struct urb *urb)
1442 unsigned int count) 1668{
1669 struct zd_usb *usb = urb->context;
1670
1671 if (urb->status && !usb->cmd_error)
1672 usb->cmd_error = urb->status;
1673}
1674
1675static int zd_submit_waiting_urb(struct zd_usb *usb, bool last)
1676{
1677 int r = 0;
1678 struct urb *urb = usb->urb_async_waiting;
1679
1680 if (!urb)
1681 return 0;
1682
1683 usb->urb_async_waiting = NULL;
1684
1685 if (!last)
1686 urb->transfer_flags |= URB_NO_INTERRUPT;
1687
1688 usb_anchor_urb(urb, &usb->submitted_cmds);
1689 r = usb_submit_urb(urb, GFP_KERNEL);
1690 if (r) {
1691 usb_unanchor_urb(urb);
1692 dev_dbg_f(zd_usb_dev(usb),
1693 "error in usb_submit_urb(). Error number %d\n", r);
1694 goto error;
1695 }
1696
1697 /* fall-through with r == 0 */
1698error:
1699 usb_free_urb(urb);
1700 return r;
1701}
1702
1703void zd_usb_iowrite16v_async_start(struct zd_usb *usb)
1704{
1705 ZD_ASSERT(usb_anchor_empty(&usb->submitted_cmds));
1706 ZD_ASSERT(usb->urb_async_waiting == NULL);
1707 ZD_ASSERT(!usb->in_async);
1708
1709 ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
1710
1711 usb->in_async = 1;
1712 usb->cmd_error = 0;
1713 usb->urb_async_waiting = NULL;
1714}
1715
1716int zd_usb_iowrite16v_async_end(struct zd_usb *usb, unsigned int timeout)
1717{
1718 int r;
1719
1720 ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
1721 ZD_ASSERT(usb->in_async);
1722
1723 /* Submit last iowrite16v URB */
1724 r = zd_submit_waiting_urb(usb, true);
1725 if (r) {
1726 dev_dbg_f(zd_usb_dev(usb),
1727 "error in zd_submit_waiting_usb(). "
1728 "Error number %d\n", r);
1729
1730 usb_kill_anchored_urbs(&usb->submitted_cmds);
1731 goto error;
1732 }
1733
1734 if (timeout)
1735 timeout = usb_wait_anchor_empty_timeout(&usb->submitted_cmds,
1736 timeout);
1737 if (!timeout) {
1738 usb_kill_anchored_urbs(&usb->submitted_cmds);
1739 if (usb->cmd_error == -ENOENT) {
1740 dev_dbg_f(zd_usb_dev(usb), "timed out");
1741 r = -ETIMEDOUT;
1742 goto error;
1743 }
1744 }
1745
1746 r = usb->cmd_error;
1747error:
1748 usb->in_async = 0;
1749 return r;
1750}
1751
1752int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
1753 unsigned int count)
1443{ 1754{
1444 int r; 1755 int r;
1445 struct usb_device *udev; 1756 struct usb_device *udev;
1446 struct usb_req_write_regs *req = NULL; 1757 struct usb_req_write_regs *req = NULL;
1447 int i, req_len, actual_req_len; 1758 int i, req_len;
1759 struct urb *urb;
1760 struct usb_host_endpoint *ep;
1761
1762 ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
1763 ZD_ASSERT(usb->in_async);
1448 1764
1449 if (count == 0) 1765 if (count == 0)
1450 return 0; 1766 return 0;
@@ -1460,11 +1776,23 @@ int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
1460 return -EWOULDBLOCK; 1776 return -EWOULDBLOCK;
1461 } 1777 }
1462 1778
1779 udev = zd_usb_to_usbdev(usb);
1780
1781 ep = usb_pipe_endpoint(udev, usb_sndintpipe(udev, EP_REGS_OUT));
1782 if (!ep)
1783 return -ENOENT;
1784
1785 urb = usb_alloc_urb(0, GFP_KERNEL);
1786 if (!urb)
1787 return -ENOMEM;
1788
1463 req_len = sizeof(struct usb_req_write_regs) + 1789 req_len = sizeof(struct usb_req_write_regs) +
1464 count * sizeof(struct reg_data); 1790 count * sizeof(struct reg_data);
1465 req = kmalloc(req_len, GFP_KERNEL); 1791 req = kmalloc(req_len, GFP_KERNEL);
1466 if (!req) 1792 if (!req) {
1467 return -ENOMEM; 1793 r = -ENOMEM;
1794 goto error;
1795 }
1468 1796
1469 req->id = cpu_to_le16(USB_REQ_WRITE_REGS); 1797 req->id = cpu_to_le16(USB_REQ_WRITE_REGS);
1470 for (i = 0; i < count; i++) { 1798 for (i = 0; i < count; i++) {
@@ -1473,29 +1801,44 @@ int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
1473 rw->value = cpu_to_le16(ioreqs[i].value); 1801 rw->value = cpu_to_le16(ioreqs[i].value);
1474 } 1802 }
1475 1803
1476 udev = zd_usb_to_usbdev(usb); 1804 usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT),
1477 r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT), 1805 req, req_len, iowrite16v_urb_complete, usb,
1478 req, req_len, &actual_req_len, 1000 /* ms */); 1806 ep->desc.bInterval);
1807 urb->transfer_flags |= URB_FREE_BUFFER | URB_SHORT_NOT_OK;
1808
1809 /* Submit previous URB */
1810 r = zd_submit_waiting_urb(usb, false);
1479 if (r) { 1811 if (r) {
1480 dev_dbg_f(zd_usb_dev(usb), 1812 dev_dbg_f(zd_usb_dev(usb),
1481 "error in usb_bulk_msg(). Error number %d\n", r); 1813 "error in zd_submit_waiting_usb(). "
1482 goto error; 1814 "Error number %d\n", r);
1483 }
1484 if (req_len != actual_req_len) {
1485 dev_dbg_f(zd_usb_dev(usb),
1486 "error in usb_bulk_msg()"
1487 " req_len %d != actual_req_len %d\n",
1488 req_len, actual_req_len);
1489 r = -EIO;
1490 goto error; 1815 goto error;
1491 } 1816 }
1492 1817
1493 /* FALL-THROUGH with r == 0 */ 1818 /* Delay submit so that URB_NO_INTERRUPT flag can be set for all URBs
1819 * of currect batch except for very last.
1820 */
1821 usb->urb_async_waiting = urb;
1822 return 0;
1494error: 1823error:
1495 kfree(req); 1824 usb_free_urb(urb);
1496 return r; 1825 return r;
1497} 1826}
1498 1827
1828int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
1829 unsigned int count)
1830{
1831 int r;
1832
1833 zd_usb_iowrite16v_async_start(usb);
1834 r = zd_usb_iowrite16v_async(usb, ioreqs, count);
1835 if (r) {
1836 zd_usb_iowrite16v_async_end(usb, 0);
1837 return r;
1838 }
1839 return zd_usb_iowrite16v_async_end(usb, 50 /* ms */);
1840}
1841
1499int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits) 1842int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
1500{ 1843{
1501 int r; 1844 int r;
@@ -1537,14 +1880,19 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
1537 if (r) { 1880 if (r) {
1538 dev_dbg_f(zd_usb_dev(usb), 1881 dev_dbg_f(zd_usb_dev(usb),
1539 "error %d: Couldn't read CR203\n", r); 1882 "error %d: Couldn't read CR203\n", r);
1540 goto out; 1883 return r;
1541 } 1884 }
1542 bit_value_template &= ~(RF_IF_LE|RF_CLK|RF_DATA); 1885 bit_value_template &= ~(RF_IF_LE|RF_CLK|RF_DATA);
1543 1886
1887 ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
1888 BUILD_BUG_ON(sizeof(struct usb_req_rfwrite) +
1889 USB_MAX_RFWRITE_BIT_COUNT * sizeof(__le16) >
1890 sizeof(usb->req_buf));
1891 BUG_ON(sizeof(struct usb_req_rfwrite) + bits * sizeof(__le16) >
1892 sizeof(usb->req_buf));
1893
1544 req_len = sizeof(struct usb_req_rfwrite) + bits * sizeof(__le16); 1894 req_len = sizeof(struct usb_req_rfwrite) + bits * sizeof(__le16);
1545 req = kmalloc(req_len, GFP_KERNEL); 1895 req = (void *)usb->req_buf;
1546 if (!req)
1547 return -ENOMEM;
1548 1896
1549 req->id = cpu_to_le16(USB_REQ_WRITE_RF); 1897 req->id = cpu_to_le16(USB_REQ_WRITE_RF);
1550 /* 1: 3683a, but not used in ZYDAS driver */ 1898 /* 1: 3683a, but not used in ZYDAS driver */
@@ -1559,15 +1907,15 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
1559 } 1907 }
1560 1908
1561 udev = zd_usb_to_usbdev(usb); 1909 udev = zd_usb_to_usbdev(usb);
1562 r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT), 1910 r = usb_interrupt_msg(udev, usb_sndintpipe(udev, EP_REGS_OUT),
1563 req, req_len, &actual_req_len, 1000 /* ms */); 1911 req, req_len, &actual_req_len, 50 /* ms */);
1564 if (r) { 1912 if (r) {
1565 dev_dbg_f(zd_usb_dev(usb), 1913 dev_dbg_f(zd_usb_dev(usb),
1566 "error in usb_bulk_msg(). Error number %d\n", r); 1914 "error in usb_interrupt_msg(). Error number %d\n", r);
1567 goto out; 1915 goto out;
1568 } 1916 }
1569 if (req_len != actual_req_len) { 1917 if (req_len != actual_req_len) {
1570 dev_dbg_f(zd_usb_dev(usb), "error in usb_bulk_msg()" 1918 dev_dbg_f(zd_usb_dev(usb), "error in usb_interrupt_msg()"
1571 " req_len %d != actual_req_len %d\n", 1919 " req_len %d != actual_req_len %d\n",
1572 req_len, actual_req_len); 1920 req_len, actual_req_len);
1573 r = -EIO; 1921 r = -EIO;
@@ -1576,6 +1924,5 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
1576 1924
1577 /* FALL-THROUGH with r == 0 */ 1925 /* FALL-THROUGH with r == 0 */
1578out: 1926out:
1579 kfree(req);
1580 return r; 1927 return r;
1581} 1928}
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
index 1b1655cb7cb4..b3df2c8116cc 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -32,6 +32,10 @@
32#define ZD_USB_TX_HIGH 5 32#define ZD_USB_TX_HIGH 5
33#define ZD_USB_TX_LOW 2 33#define ZD_USB_TX_LOW 2
34 34
35#define ZD_TX_TIMEOUT (HZ * 5)
36#define ZD_TX_WATCHDOG_INTERVAL round_jiffies_relative(HZ)
37#define ZD_RX_IDLE_INTERVAL round_jiffies_relative(30 * HZ)
38
35enum devicetype { 39enum devicetype {
36 DEVICE_ZD1211 = 0, 40 DEVICE_ZD1211 = 0,
37 DEVICE_ZD1211B = 1, 41 DEVICE_ZD1211B = 1,
@@ -162,6 +166,8 @@ struct zd_usb_interrupt {
162 struct read_regs_int read_regs; 166 struct read_regs_int read_regs;
163 spinlock_t lock; 167 spinlock_t lock;
164 struct urb *urb; 168 struct urb *urb;
169 void *buffer;
170 dma_addr_t buffer_dma;
165 int interval; 171 int interval;
166 u8 read_regs_enabled:1; 172 u8 read_regs_enabled:1;
167}; 173};
@@ -175,7 +181,9 @@ static inline struct usb_int_regs *get_read_regs(struct zd_usb_interrupt *intr)
175 181
176struct zd_usb_rx { 182struct zd_usb_rx {
177 spinlock_t lock; 183 spinlock_t lock;
178 u8 fragment[2*USB_MAX_RX_SIZE]; 184 struct mutex setup_mutex;
185 struct delayed_work idle_work;
186 u8 fragment[2 * USB_MAX_RX_SIZE];
179 unsigned int fragment_length; 187 unsigned int fragment_length;
180 unsigned int usb_packet_size; 188 unsigned int usb_packet_size;
181 struct urb **urbs; 189 struct urb **urbs;
@@ -184,19 +192,21 @@ struct zd_usb_rx {
184 192
185/** 193/**
186 * struct zd_usb_tx - structure used for transmitting frames 194 * struct zd_usb_tx - structure used for transmitting frames
195 * @enabled: atomic enabled flag, indicates whether tx is enabled
187 * @lock: lock for transmission 196 * @lock: lock for transmission
188 * @free_urb_list: list of free URBs, contains all the URBs, which can be used 197 * @submitted: anchor for URBs sent to device
189 * @submitted_urbs: atomic integer that counts the URBs having sent to the 198 * @submitted_urbs: atomic integer that counts the URBs having sent to the
190 * device, which haven't been completed 199 * device, which haven't been completed
191 * @enabled: enabled flag, indicates whether tx is enabled
192 * @stopped: indicates whether higher level tx queues are stopped 200 * @stopped: indicates whether higher level tx queues are stopped
193 */ 201 */
194struct zd_usb_tx { 202struct zd_usb_tx {
203 atomic_t enabled;
195 spinlock_t lock; 204 spinlock_t lock;
196 struct list_head free_urb_list; 205 struct delayed_work watchdog_work;
206 struct sk_buff_head submitted_skbs;
207 struct usb_anchor submitted;
197 int submitted_urbs; 208 int submitted_urbs;
198 int enabled; 209 u8 stopped:1, watchdog_enabled:1;
199 int stopped;
200}; 210};
201 211
202/* Contains the usb parts. The structure doesn't require a lock because intf 212/* Contains the usb parts. The structure doesn't require a lock because intf
@@ -207,7 +217,11 @@ struct zd_usb {
207 struct zd_usb_rx rx; 217 struct zd_usb_rx rx;
208 struct zd_usb_tx tx; 218 struct zd_usb_tx tx;
209 struct usb_interface *intf; 219 struct usb_interface *intf;
210 u8 is_zd1211b:1, initialized:1; 220 struct usb_anchor submitted_cmds;
221 struct urb *urb_async_waiting;
222 int cmd_error;
223 u8 req_buf[64]; /* zd_usb_iowrite16v needs 62 bytes */
224 u8 is_zd1211b:1, initialized:1, was_running:1, in_async:1;
211}; 225};
212 226
213#define zd_usb_dev(usb) (&usb->intf->dev) 227#define zd_usb_dev(usb) (&usb->intf->dev)
@@ -234,12 +248,17 @@ void zd_usb_clear(struct zd_usb *usb);
234 248
235int zd_usb_scnprint_id(struct zd_usb *usb, char *buffer, size_t size); 249int zd_usb_scnprint_id(struct zd_usb *usb, char *buffer, size_t size);
236 250
251void zd_tx_watchdog_enable(struct zd_usb *usb);
252void zd_tx_watchdog_disable(struct zd_usb *usb);
253
237int zd_usb_enable_int(struct zd_usb *usb); 254int zd_usb_enable_int(struct zd_usb *usb);
238void zd_usb_disable_int(struct zd_usb *usb); 255void zd_usb_disable_int(struct zd_usb *usb);
239 256
240int zd_usb_enable_rx(struct zd_usb *usb); 257int zd_usb_enable_rx(struct zd_usb *usb);
241void zd_usb_disable_rx(struct zd_usb *usb); 258void zd_usb_disable_rx(struct zd_usb *usb);
242 259
260void zd_usb_reset_rx_idle_timer(struct zd_usb *usb);
261
243void zd_usb_enable_tx(struct zd_usb *usb); 262void zd_usb_enable_tx(struct zd_usb *usb);
244void zd_usb_disable_tx(struct zd_usb *usb); 263void zd_usb_disable_tx(struct zd_usb *usb);
245 264
@@ -254,6 +273,10 @@ static inline int zd_usb_ioread16(struct zd_usb *usb, u16 *value,
254 return zd_usb_ioread16v(usb, value, (const zd_addr_t *)&addr, 1); 273 return zd_usb_ioread16v(usb, value, (const zd_addr_t *)&addr, 1);
255} 274}
256 275
276void zd_usb_iowrite16v_async_start(struct zd_usb *usb);
277int zd_usb_iowrite16v_async_end(struct zd_usb *usb, unsigned int timeout);
278int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
279 unsigned int count);
257int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs, 280int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
258 unsigned int count); 281 unsigned int count);
259 282
diff --git a/drivers/net/xen-netback/Makefile b/drivers/net/xen-netback/Makefile
new file mode 100644
index 000000000000..e346e8125ef5
--- /dev/null
+++ b/drivers/net/xen-netback/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_XEN_NETDEV_BACKEND) := xen-netback.o
2
3xen-netback-y := netback.o xenbus.o interface.o
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
new file mode 100644
index 000000000000..5d7bbf2b2ee7
--- /dev/null
+++ b/drivers/net/xen-netback/common.h
@@ -0,0 +1,161 @@
1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License version 2
4 * as published by the Free Software Foundation; or, when distributed
5 * separately from the Linux kernel or incorporated into other
6 * software packages, subject to the following license:
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this source file (the "Software"), to deal in the Software without
10 * restriction, including without limitation the rights to use, copy, modify,
11 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
12 * and to permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * IN THE SOFTWARE.
25 */
26
27#ifndef __XEN_NETBACK__COMMON_H__
28#define __XEN_NETBACK__COMMON_H__
29
30#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
31
32#include <linux/module.h>
33#include <linux/interrupt.h>
34#include <linux/slab.h>
35#include <linux/ip.h>
36#include <linux/in.h>
37#include <linux/io.h>
38#include <linux/netdevice.h>
39#include <linux/etherdevice.h>
40#include <linux/wait.h>
41#include <linux/sched.h>
42
43#include <xen/interface/io/netif.h>
44#include <xen/interface/grant_table.h>
45#include <xen/grant_table.h>
46#include <xen/xenbus.h>
47
48struct xen_netbk;
49
50struct xenvif {
51 /* Unique identifier for this interface. */
52 domid_t domid;
53 unsigned int handle;
54
55 /* Reference to netback processing backend. */
56 struct xen_netbk *netbk;
57
58 u8 fe_dev_addr[6];
59
60 /* Physical parameters of the comms window. */
61 grant_handle_t tx_shmem_handle;
62 grant_ref_t tx_shmem_ref;
63 grant_handle_t rx_shmem_handle;
64 grant_ref_t rx_shmem_ref;
65 unsigned int irq;
66
67 /* List of frontends to notify after a batch of frames sent. */
68 struct list_head notify_list;
69
70 /* The shared rings and indexes. */
71 struct xen_netif_tx_back_ring tx;
72 struct xen_netif_rx_back_ring rx;
73 struct vm_struct *tx_comms_area;
74 struct vm_struct *rx_comms_area;
75
76 /* Flags that must not be set in dev->features */
77 u32 features_disabled;
78
79 /* Frontend feature information. */
80 u8 can_sg:1;
81 u8 gso:1;
82 u8 gso_prefix:1;
83 u8 csum:1;
84
85 /* Internal feature information. */
86 u8 can_queue:1; /* can queue packets for receiver? */
87
88 /*
89 * Allow xenvif_start_xmit() to peek ahead in the rx request
90 * ring. This is a prediction of what rx_req_cons will be
91 * once all queued skbs are put on the ring.
92 */
93 RING_IDX rx_req_cons_peek;
94
95 /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
96 unsigned long credit_bytes;
97 unsigned long credit_usec;
98 unsigned long remaining_credit;
99 struct timer_list credit_timeout;
100
101 /* Statistics */
102 unsigned long rx_gso_checksum_fixup;
103
104 /* Miscellaneous private stuff. */
105 struct list_head schedule_list;
106 atomic_t refcnt;
107 struct net_device *dev;
108
109 wait_queue_head_t waiting_to_free;
110};
111
112#define XEN_NETIF_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE)
113#define XEN_NETIF_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE)
114
115struct xenvif *xenvif_alloc(struct device *parent,
116 domid_t domid,
117 unsigned int handle);
118
119int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
120 unsigned long rx_ring_ref, unsigned int evtchn);
121void xenvif_disconnect(struct xenvif *vif);
122
123void xenvif_get(struct xenvif *vif);
124void xenvif_put(struct xenvif *vif);
125
126int xenvif_xenbus_init(void);
127
128int xenvif_schedulable(struct xenvif *vif);
129
130int xen_netbk_rx_ring_full(struct xenvif *vif);
131
132int xen_netbk_must_stop_queue(struct xenvif *vif);
133
134/* (Un)Map communication rings. */
135void xen_netbk_unmap_frontend_rings(struct xenvif *vif);
136int xen_netbk_map_frontend_rings(struct xenvif *vif,
137 grant_ref_t tx_ring_ref,
138 grant_ref_t rx_ring_ref);
139
140/* (De)Register a xenvif with the netback backend. */
141void xen_netbk_add_xenvif(struct xenvif *vif);
142void xen_netbk_remove_xenvif(struct xenvif *vif);
143
144/* (De)Schedule backend processing for a xenvif */
145void xen_netbk_schedule_xenvif(struct xenvif *vif);
146void xen_netbk_deschedule_xenvif(struct xenvif *vif);
147
148/* Check for SKBs from frontend and schedule backend processing */
149void xen_netbk_check_rx_xenvif(struct xenvif *vif);
150/* Receive an SKB from the frontend */
151void xenvif_receive_skb(struct xenvif *vif, struct sk_buff *skb);
152
153/* Queue an SKB for transmission to the frontend */
154void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
155/* Notify xenvif that ring now has space to send an skb to the frontend */
156void xenvif_notify_tx_completion(struct xenvif *vif);
157
158/* Returns number of ring slots required to send an skb to the frontend */
159unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
160
161#endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
new file mode 100644
index 000000000000..de569cc19da4
--- /dev/null
+++ b/drivers/net/xen-netback/interface.c
@@ -0,0 +1,424 @@
1/*
2 * Network-device interface management.
3 *
4 * Copyright (c) 2004-2005, Keir Fraser
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation; or, when distributed
9 * separately from the Linux kernel or incorporated into other
10 * software packages, subject to the following license:
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * IN THE SOFTWARE.
29 */
30
31#include "common.h"
32
33#include <linux/ethtool.h>
34#include <linux/rtnetlink.h>
35#include <linux/if_vlan.h>
36
37#include <xen/events.h>
38#include <asm/xen/hypercall.h>
39
40#define XENVIF_QUEUE_LENGTH 32
41
42void xenvif_get(struct xenvif *vif)
43{
44 atomic_inc(&vif->refcnt);
45}
46
47void xenvif_put(struct xenvif *vif)
48{
49 if (atomic_dec_and_test(&vif->refcnt))
50 wake_up(&vif->waiting_to_free);
51}
52
53int xenvif_schedulable(struct xenvif *vif)
54{
55 return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
56}
57
58static int xenvif_rx_schedulable(struct xenvif *vif)
59{
60 return xenvif_schedulable(vif) && !xen_netbk_rx_ring_full(vif);
61}
62
63static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
64{
65 struct xenvif *vif = dev_id;
66
67 if (vif->netbk == NULL)
68 return IRQ_NONE;
69
70 xen_netbk_schedule_xenvif(vif);
71
72 if (xenvif_rx_schedulable(vif))
73 netif_wake_queue(vif->dev);
74
75 return IRQ_HANDLED;
76}
77
78static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
79{
80 struct xenvif *vif = netdev_priv(dev);
81
82 BUG_ON(skb->dev != dev);
83
84 if (vif->netbk == NULL)
85 goto drop;
86
87 /* Drop the packet if the target domain has no receive buffers. */
88 if (!xenvif_rx_schedulable(vif))
89 goto drop;
90
91 /* Reserve ring slots for the worst-case number of fragments. */
92 vif->rx_req_cons_peek += xen_netbk_count_skb_slots(vif, skb);
93 xenvif_get(vif);
94
95 if (vif->can_queue && xen_netbk_must_stop_queue(vif))
96 netif_stop_queue(dev);
97
98 xen_netbk_queue_tx_skb(vif, skb);
99
100 return NETDEV_TX_OK;
101
102 drop:
103 vif->dev->stats.tx_dropped++;
104 dev_kfree_skb(skb);
105 return NETDEV_TX_OK;
106}
107
108void xenvif_receive_skb(struct xenvif *vif, struct sk_buff *skb)
109{
110 netif_rx_ni(skb);
111}
112
113void xenvif_notify_tx_completion(struct xenvif *vif)
114{
115 if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif))
116 netif_wake_queue(vif->dev);
117}
118
119static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
120{
121 struct xenvif *vif = netdev_priv(dev);
122 return &vif->dev->stats;
123}
124
125static void xenvif_up(struct xenvif *vif)
126{
127 xen_netbk_add_xenvif(vif);
128 enable_irq(vif->irq);
129 xen_netbk_check_rx_xenvif(vif);
130}
131
132static void xenvif_down(struct xenvif *vif)
133{
134 disable_irq(vif->irq);
135 xen_netbk_deschedule_xenvif(vif);
136 xen_netbk_remove_xenvif(vif);
137}
138
139static int xenvif_open(struct net_device *dev)
140{
141 struct xenvif *vif = netdev_priv(dev);
142 if (netif_carrier_ok(dev))
143 xenvif_up(vif);
144 netif_start_queue(dev);
145 return 0;
146}
147
148static int xenvif_close(struct net_device *dev)
149{
150 struct xenvif *vif = netdev_priv(dev);
151 if (netif_carrier_ok(dev))
152 xenvif_down(vif);
153 netif_stop_queue(dev);
154 return 0;
155}
156
157static int xenvif_change_mtu(struct net_device *dev, int mtu)
158{
159 struct xenvif *vif = netdev_priv(dev);
160 int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN;
161
162 if (mtu > max)
163 return -EINVAL;
164 dev->mtu = mtu;
165 return 0;
166}
167
168static void xenvif_set_features(struct xenvif *vif)
169{
170 struct net_device *dev = vif->dev;
171 u32 features = dev->features;
172
173 if (vif->can_sg)
174 features |= NETIF_F_SG;
175 if (vif->gso || vif->gso_prefix)
176 features |= NETIF_F_TSO;
177 if (vif->csum)
178 features |= NETIF_F_IP_CSUM;
179
180 features &= ~(vif->features_disabled);
181
182 if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN)
183 dev->mtu = ETH_DATA_LEN;
184
185 dev->features = features;
186}
187
188static int xenvif_set_tx_csum(struct net_device *dev, u32 data)
189{
190 struct xenvif *vif = netdev_priv(dev);
191 if (data) {
192 if (!vif->csum)
193 return -EOPNOTSUPP;
194 vif->features_disabled &= ~NETIF_F_IP_CSUM;
195 } else {
196 vif->features_disabled |= NETIF_F_IP_CSUM;
197 }
198
199 xenvif_set_features(vif);
200 return 0;
201}
202
203static int xenvif_set_sg(struct net_device *dev, u32 data)
204{
205 struct xenvif *vif = netdev_priv(dev);
206 if (data) {
207 if (!vif->can_sg)
208 return -EOPNOTSUPP;
209 vif->features_disabled &= ~NETIF_F_SG;
210 } else {
211 vif->features_disabled |= NETIF_F_SG;
212 }
213
214 xenvif_set_features(vif);
215 return 0;
216}
217
218static int xenvif_set_tso(struct net_device *dev, u32 data)
219{
220 struct xenvif *vif = netdev_priv(dev);
221 if (data) {
222 if (!vif->gso && !vif->gso_prefix)
223 return -EOPNOTSUPP;
224 vif->features_disabled &= ~NETIF_F_TSO;
225 } else {
226 vif->features_disabled |= NETIF_F_TSO;
227 }
228
229 xenvif_set_features(vif);
230 return 0;
231}
232
233static const struct xenvif_stat {
234 char name[ETH_GSTRING_LEN];
235 u16 offset;
236} xenvif_stats[] = {
237 {
238 "rx_gso_checksum_fixup",
239 offsetof(struct xenvif, rx_gso_checksum_fixup)
240 },
241};
242
243static int xenvif_get_sset_count(struct net_device *dev, int string_set)
244{
245 switch (string_set) {
246 case ETH_SS_STATS:
247 return ARRAY_SIZE(xenvif_stats);
248 default:
249 return -EINVAL;
250 }
251}
252
253static void xenvif_get_ethtool_stats(struct net_device *dev,
254 struct ethtool_stats *stats, u64 * data)
255{
256 void *vif = netdev_priv(dev);
257 int i;
258
259 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
260 data[i] = *(unsigned long *)(vif + xenvif_stats[i].offset);
261}
262
263static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
264{
265 int i;
266
267 switch (stringset) {
268 case ETH_SS_STATS:
269 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
270 memcpy(data + i * ETH_GSTRING_LEN,
271 xenvif_stats[i].name, ETH_GSTRING_LEN);
272 break;
273 }
274}
275
276static struct ethtool_ops xenvif_ethtool_ops = {
277 .get_tx_csum = ethtool_op_get_tx_csum,
278 .set_tx_csum = xenvif_set_tx_csum,
279 .get_sg = ethtool_op_get_sg,
280 .set_sg = xenvif_set_sg,
281 .get_tso = ethtool_op_get_tso,
282 .set_tso = xenvif_set_tso,
283 .get_link = ethtool_op_get_link,
284
285 .get_sset_count = xenvif_get_sset_count,
286 .get_ethtool_stats = xenvif_get_ethtool_stats,
287 .get_strings = xenvif_get_strings,
288};
289
290static struct net_device_ops xenvif_netdev_ops = {
291 .ndo_start_xmit = xenvif_start_xmit,
292 .ndo_get_stats = xenvif_get_stats,
293 .ndo_open = xenvif_open,
294 .ndo_stop = xenvif_close,
295 .ndo_change_mtu = xenvif_change_mtu,
296};
297
298struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
299 unsigned int handle)
300{
301 int err;
302 struct net_device *dev;
303 struct xenvif *vif;
304 char name[IFNAMSIZ] = {};
305
306 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
307 dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup);
308 if (dev == NULL) {
309 pr_warn("Could not allocate netdev\n");
310 return ERR_PTR(-ENOMEM);
311 }
312
313 SET_NETDEV_DEV(dev, parent);
314
315 vif = netdev_priv(dev);
316 vif->domid = domid;
317 vif->handle = handle;
318 vif->netbk = NULL;
319 vif->can_sg = 1;
320 vif->csum = 1;
321 atomic_set(&vif->refcnt, 1);
322 init_waitqueue_head(&vif->waiting_to_free);
323 vif->dev = dev;
324 INIT_LIST_HEAD(&vif->schedule_list);
325 INIT_LIST_HEAD(&vif->notify_list);
326
327 vif->credit_bytes = vif->remaining_credit = ~0UL;
328 vif->credit_usec = 0UL;
329 init_timer(&vif->credit_timeout);
330 /* Initialize 'expires' now: it's used to track the credit window. */
331 vif->credit_timeout.expires = jiffies;
332
333 dev->netdev_ops = &xenvif_netdev_ops;
334 xenvif_set_features(vif);
335 SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops);
336
337 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
338
339 /*
340 * Initialise a dummy MAC address. We choose the numerically
341 * largest non-broadcast address to prevent the address getting
342 * stolen by an Ethernet bridge for STP purposes.
343 * (FE:FF:FF:FF:FF:FF)
344 */
345 memset(dev->dev_addr, 0xFF, ETH_ALEN);
346 dev->dev_addr[0] &= ~0x01;
347
348 netif_carrier_off(dev);
349
350 err = register_netdev(dev);
351 if (err) {
352 netdev_warn(dev, "Could not register device: err=%d\n", err);
353 free_netdev(dev);
354 return ERR_PTR(err);
355 }
356
357 netdev_dbg(dev, "Successfully created xenvif\n");
358 return vif;
359}
360
361int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
362 unsigned long rx_ring_ref, unsigned int evtchn)
363{
364 int err = -ENOMEM;
365
366 /* Already connected through? */
367 if (vif->irq)
368 return 0;
369
370 xenvif_set_features(vif);
371
372 err = xen_netbk_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
373 if (err < 0)
374 goto err;
375
376 err = bind_interdomain_evtchn_to_irqhandler(
377 vif->domid, evtchn, xenvif_interrupt, 0,
378 vif->dev->name, vif);
379 if (err < 0)
380 goto err_unmap;
381 vif->irq = err;
382 disable_irq(vif->irq);
383
384 xenvif_get(vif);
385
386 rtnl_lock();
387 netif_carrier_on(vif->dev);
388 if (netif_running(vif->dev))
389 xenvif_up(vif);
390 rtnl_unlock();
391
392 return 0;
393err_unmap:
394 xen_netbk_unmap_frontend_rings(vif);
395err:
396 return err;
397}
398
399void xenvif_disconnect(struct xenvif *vif)
400{
401 struct net_device *dev = vif->dev;
402 if (netif_carrier_ok(dev)) {
403 rtnl_lock();
404 netif_carrier_off(dev); /* discard queued packets */
405 if (netif_running(dev))
406 xenvif_down(vif);
407 rtnl_unlock();
408 xenvif_put(vif);
409 }
410
411 atomic_dec(&vif->refcnt);
412 wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
413
414 del_timer_sync(&vif->credit_timeout);
415
416 if (vif->irq)
417 unbind_from_irqhandler(vif->irq, vif);
418
419 unregister_netdev(vif->dev);
420
421 xen_netbk_unmap_frontend_rings(vif);
422
423 free_netdev(vif->dev);
424}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
new file mode 100644
index 000000000000..0e4851b8a773
--- /dev/null
+++ b/drivers/net/xen-netback/netback.c
@@ -0,0 +1,1745 @@
1/*
2 * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
4 * by any operating system that implements a compatible front end. A
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
7 *
8 * Copyright (c) 2002-2005, K A Fraser
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation; or, when distributed
13 * separately from the Linux kernel or incorporated into other
14 * software packages, subject to the following license:
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
17 * of this source file (the "Software"), to deal in the Software without
18 * restriction, including without limitation the rights to use, copy, modify,
19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20 * and to permit persons to whom the Software is furnished to do so, subject to
21 * the following conditions:
22 *
23 * The above copyright notice and this permission notice shall be included in
24 * all copies or substantial portions of the Software.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 * IN THE SOFTWARE.
33 */
34
35#include "common.h"
36
37#include <linux/kthread.h>
38#include <linux/if_vlan.h>
39#include <linux/udp.h>
40
41#include <net/tcp.h>
42
43#include <xen/events.h>
44#include <xen/interface/memory.h>
45
46#include <asm/xen/hypercall.h>
47#include <asm/xen/page.h>
48
49struct pending_tx_info {
50 struct xen_netif_tx_request req;
51 struct xenvif *vif;
52};
53typedef unsigned int pending_ring_idx_t;
54
55struct netbk_rx_meta {
56 int id;
57 int size;
58 int gso_size;
59};
60
61#define MAX_PENDING_REQS 256
62
63#define MAX_BUFFER_OFFSET PAGE_SIZE
64
65/* extra field used in struct page */
66union page_ext {
67 struct {
68#if BITS_PER_LONG < 64
69#define IDX_WIDTH 8
70#define GROUP_WIDTH (BITS_PER_LONG - IDX_WIDTH)
71 unsigned int group:GROUP_WIDTH;
72 unsigned int idx:IDX_WIDTH;
73#else
74 unsigned int group, idx;
75#endif
76 } e;
77 void *mapping;
78};
79
80struct xen_netbk {
81 wait_queue_head_t wq;
82 struct task_struct *task;
83
84 struct sk_buff_head rx_queue;
85 struct sk_buff_head tx_queue;
86
87 struct timer_list net_timer;
88
89 struct page *mmap_pages[MAX_PENDING_REQS];
90
91 pending_ring_idx_t pending_prod;
92 pending_ring_idx_t pending_cons;
93 struct list_head net_schedule_list;
94
95 /* Protect the net_schedule_list in netif. */
96 spinlock_t net_schedule_list_lock;
97
98 atomic_t netfront_count;
99
100 struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
101 struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
102
103 u16 pending_ring[MAX_PENDING_REQS];
104
105 /*
106 * Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
107 * head/fragment page uses 2 copy operations because it
108 * straddles two buffers in the frontend.
109 */
110 struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
111 struct netbk_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
112};
113
114static struct xen_netbk *xen_netbk;
115static int xen_netbk_group_nr;
116
117void xen_netbk_add_xenvif(struct xenvif *vif)
118{
119 int i;
120 int min_netfront_count;
121 int min_group = 0;
122 struct xen_netbk *netbk;
123
124 min_netfront_count = atomic_read(&xen_netbk[0].netfront_count);
125 for (i = 0; i < xen_netbk_group_nr; i++) {
126 int netfront_count = atomic_read(&xen_netbk[i].netfront_count);
127 if (netfront_count < min_netfront_count) {
128 min_group = i;
129 min_netfront_count = netfront_count;
130 }
131 }
132
133 netbk = &xen_netbk[min_group];
134
135 vif->netbk = netbk;
136 atomic_inc(&netbk->netfront_count);
137}
138
139void xen_netbk_remove_xenvif(struct xenvif *vif)
140{
141 struct xen_netbk *netbk = vif->netbk;
142 vif->netbk = NULL;
143 atomic_dec(&netbk->netfront_count);
144}
145
146static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx);
147static void make_tx_response(struct xenvif *vif,
148 struct xen_netif_tx_request *txp,
149 s8 st);
150static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
151 u16 id,
152 s8 st,
153 u16 offset,
154 u16 size,
155 u16 flags);
156
157static inline unsigned long idx_to_pfn(struct xen_netbk *netbk,
158 unsigned int idx)
159{
160 return page_to_pfn(netbk->mmap_pages[idx]);
161}
162
163static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk,
164 unsigned int idx)
165{
166 return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx));
167}
168
169/* extra field used in struct page */
170static inline void set_page_ext(struct page *pg, struct xen_netbk *netbk,
171 unsigned int idx)
172{
173 unsigned int group = netbk - xen_netbk;
174 union page_ext ext = { .e = { .group = group + 1, .idx = idx } };
175
176 BUILD_BUG_ON(sizeof(ext) > sizeof(ext.mapping));
177 pg->mapping = ext.mapping;
178}
179
180static int get_page_ext(struct page *pg,
181 unsigned int *pgroup, unsigned int *pidx)
182{
183 union page_ext ext = { .mapping = pg->mapping };
184 struct xen_netbk *netbk;
185 unsigned int group, idx;
186
187 group = ext.e.group - 1;
188
189 if (group < 0 || group >= xen_netbk_group_nr)
190 return 0;
191
192 netbk = &xen_netbk[group];
193
194 idx = ext.e.idx;
195
196 if ((idx < 0) || (idx >= MAX_PENDING_REQS))
197 return 0;
198
199 if (netbk->mmap_pages[idx] != pg)
200 return 0;
201
202 *pgroup = group;
203 *pidx = idx;
204
205 return 1;
206}
207
208/*
209 * This is the amount of packet we copy rather than map, so that the
210 * guest can't fiddle with the contents of the headers while we do
211 * packet processing on them (netfilter, routing, etc).
212 */
213#define PKT_PROT_LEN (ETH_HLEN + \
214 VLAN_HLEN + \
215 sizeof(struct iphdr) + MAX_IPOPTLEN + \
216 sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
217
218static inline pending_ring_idx_t pending_index(unsigned i)
219{
220 return i & (MAX_PENDING_REQS-1);
221}
222
223static inline pending_ring_idx_t nr_pending_reqs(struct xen_netbk *netbk)
224{
225 return MAX_PENDING_REQS -
226 netbk->pending_prod + netbk->pending_cons;
227}
228
229static void xen_netbk_kick_thread(struct xen_netbk *netbk)
230{
231 wake_up(&netbk->wq);
232}
233
234static int max_required_rx_slots(struct xenvif *vif)
235{
236 int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
237
238 if (vif->can_sg || vif->gso || vif->gso_prefix)
239 max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
240
241 return max;
242}
243
244int xen_netbk_rx_ring_full(struct xenvif *vif)
245{
246 RING_IDX peek = vif->rx_req_cons_peek;
247 RING_IDX needed = max_required_rx_slots(vif);
248
249 return ((vif->rx.sring->req_prod - peek) < needed) ||
250 ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
251}
252
253int xen_netbk_must_stop_queue(struct xenvif *vif)
254{
255 if (!xen_netbk_rx_ring_full(vif))
256 return 0;
257
258 vif->rx.sring->req_event = vif->rx_req_cons_peek +
259 max_required_rx_slots(vif);
260 mb(); /* request notification /then/ check the queue */
261
262 return xen_netbk_rx_ring_full(vif);
263}
264
265/*
266 * Returns true if we should start a new receive buffer instead of
267 * adding 'size' bytes to a buffer which currently contains 'offset'
268 * bytes.
269 */
270static bool start_new_rx_buffer(int offset, unsigned long size, int head)
271{
272 /* simple case: we have completely filled the current buffer. */
273 if (offset == MAX_BUFFER_OFFSET)
274 return true;
275
276 /*
277 * complex case: start a fresh buffer if the current frag
278 * would overflow the current buffer but only if:
279 * (i) this frag would fit completely in the next buffer
280 * and (ii) there is already some data in the current buffer
281 * and (iii) this is not the head buffer.
282 *
283 * Where:
284 * - (i) stops us splitting a frag into two copies
285 * unless the frag is too large for a single buffer.
286 * - (ii) stops us from leaving a buffer pointlessly empty.
287 * - (iii) stops us leaving the first buffer
288 * empty. Strictly speaking this is already covered
289 * by (ii) but is explicitly checked because
290 * netfront relies on the first buffer being
291 * non-empty and can crash otherwise.
292 *
293 * This means we will effectively linearise small
294 * frags but do not needlessly split large buffers
295 * into multiple copies tend to give large frags their
296 * own buffers as before.
297 */
298 if ((offset + size > MAX_BUFFER_OFFSET) &&
299 (size <= MAX_BUFFER_OFFSET) && offset && !head)
300 return true;
301
302 return false;
303}
304
305/*
306 * Figure out how many ring slots we're going to need to send @skb to
307 * the guest. This function is essentially a dry run of
308 * netbk_gop_frag_copy.
309 */
310unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
311{
312 unsigned int count;
313 int i, copy_off;
314
315 count = DIV_ROUND_UP(
316 offset_in_page(skb->data)+skb_headlen(skb), PAGE_SIZE);
317
318 copy_off = skb_headlen(skb) % PAGE_SIZE;
319
320 if (skb_shinfo(skb)->gso_size)
321 count++;
322
323 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
324 unsigned long size = skb_shinfo(skb)->frags[i].size;
325 unsigned long bytes;
326 while (size > 0) {
327 BUG_ON(copy_off > MAX_BUFFER_OFFSET);
328
329 if (start_new_rx_buffer(copy_off, size, 0)) {
330 count++;
331 copy_off = 0;
332 }
333
334 bytes = size;
335 if (copy_off + bytes > MAX_BUFFER_OFFSET)
336 bytes = MAX_BUFFER_OFFSET - copy_off;
337
338 copy_off += bytes;
339 size -= bytes;
340 }
341 }
342 return count;
343}
344
345struct netrx_pending_operations {
346 unsigned copy_prod, copy_cons;
347 unsigned meta_prod, meta_cons;
348 struct gnttab_copy *copy;
349 struct netbk_rx_meta *meta;
350 int copy_off;
351 grant_ref_t copy_gref;
352};
353
354static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif,
355 struct netrx_pending_operations *npo)
356{
357 struct netbk_rx_meta *meta;
358 struct xen_netif_rx_request *req;
359
360 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
361
362 meta = npo->meta + npo->meta_prod++;
363 meta->gso_size = 0;
364 meta->size = 0;
365 meta->id = req->id;
366
367 npo->copy_off = 0;
368 npo->copy_gref = req->gref;
369
370 return meta;
371}
372
373/*
374 * Set up the grant operations for this fragment. If it's a flipping
375 * interface, we also set up the unmap request from here.
376 */
377static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
378 struct netrx_pending_operations *npo,
379 struct page *page, unsigned long size,
380 unsigned long offset, int *head)
381{
382 struct gnttab_copy *copy_gop;
383 struct netbk_rx_meta *meta;
384 /*
385 * These variables a used iff get_page_ext returns true,
386 * in which case they are guaranteed to be initialized.
387 */
388 unsigned int uninitialized_var(group), uninitialized_var(idx);
389 int foreign = get_page_ext(page, &group, &idx);
390 unsigned long bytes;
391
392 /* Data must not cross a page boundary. */
393 BUG_ON(size + offset > PAGE_SIZE);
394
395 meta = npo->meta + npo->meta_prod - 1;
396
397 while (size > 0) {
398 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
399
400 if (start_new_rx_buffer(npo->copy_off, size, *head)) {
401 /*
402 * Netfront requires there to be some data in the head
403 * buffer.
404 */
405 BUG_ON(*head);
406
407 meta = get_next_rx_buffer(vif, npo);
408 }
409
410 bytes = size;
411 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
412 bytes = MAX_BUFFER_OFFSET - npo->copy_off;
413
414 copy_gop = npo->copy + npo->copy_prod++;
415 copy_gop->flags = GNTCOPY_dest_gref;
416 if (foreign) {
417 struct xen_netbk *netbk = &xen_netbk[group];
418 struct pending_tx_info *src_pend;
419
420 src_pend = &netbk->pending_tx_info[idx];
421
422 copy_gop->source.domid = src_pend->vif->domid;
423 copy_gop->source.u.ref = src_pend->req.gref;
424 copy_gop->flags |= GNTCOPY_source_gref;
425 } else {
426 void *vaddr = page_address(page);
427 copy_gop->source.domid = DOMID_SELF;
428 copy_gop->source.u.gmfn = virt_to_mfn(vaddr);
429 }
430 copy_gop->source.offset = offset;
431 copy_gop->dest.domid = vif->domid;
432
433 copy_gop->dest.offset = npo->copy_off;
434 copy_gop->dest.u.ref = npo->copy_gref;
435 copy_gop->len = bytes;
436
437 npo->copy_off += bytes;
438 meta->size += bytes;
439
440 offset += bytes;
441 size -= bytes;
442
443 /* Leave a gap for the GSO descriptor. */
444 if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
445 vif->rx.req_cons++;
446
447 *head = 0; /* There must be something in this buffer now. */
448
449 }
450}
451
452/*
453 * Prepare an SKB to be transmitted to the frontend.
454 *
455 * This function is responsible for allocating grant operations, meta
456 * structures, etc.
457 *
458 * It returns the number of meta structures consumed. The number of
459 * ring slots used is always equal to the number of meta slots used
460 * plus the number of GSO descriptors used. Currently, we use either
461 * zero GSO descriptors (for non-GSO packets) or one descriptor (for
462 * frontend-side LRO).
463 */
464static int netbk_gop_skb(struct sk_buff *skb,
465 struct netrx_pending_operations *npo)
466{
467 struct xenvif *vif = netdev_priv(skb->dev);
468 int nr_frags = skb_shinfo(skb)->nr_frags;
469 int i;
470 struct xen_netif_rx_request *req;
471 struct netbk_rx_meta *meta;
472 unsigned char *data;
473 int head = 1;
474 int old_meta_prod;
475
476 old_meta_prod = npo->meta_prod;
477
478 /* Set up a GSO prefix descriptor, if necessary */
479 if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
480 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
481 meta = npo->meta + npo->meta_prod++;
482 meta->gso_size = skb_shinfo(skb)->gso_size;
483 meta->size = 0;
484 meta->id = req->id;
485 }
486
487 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
488 meta = npo->meta + npo->meta_prod++;
489
490 if (!vif->gso_prefix)
491 meta->gso_size = skb_shinfo(skb)->gso_size;
492 else
493 meta->gso_size = 0;
494
495 meta->size = 0;
496 meta->id = req->id;
497 npo->copy_off = 0;
498 npo->copy_gref = req->gref;
499
500 data = skb->data;
501 while (data < skb_tail_pointer(skb)) {
502 unsigned int offset = offset_in_page(data);
503 unsigned int len = PAGE_SIZE - offset;
504
505 if (data + len > skb_tail_pointer(skb))
506 len = skb_tail_pointer(skb) - data;
507
508 netbk_gop_frag_copy(vif, skb, npo,
509 virt_to_page(data), len, offset, &head);
510 data += len;
511 }
512
513 for (i = 0; i < nr_frags; i++) {
514 netbk_gop_frag_copy(vif, skb, npo,
515 skb_shinfo(skb)->frags[i].page,
516 skb_shinfo(skb)->frags[i].size,
517 skb_shinfo(skb)->frags[i].page_offset,
518 &head);
519 }
520
521 return npo->meta_prod - old_meta_prod;
522}
523
524/*
525 * This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
526 * used to set up the operations on the top of
527 * netrx_pending_operations, which have since been done. Check that
528 * they didn't give any errors and advance over them.
529 */
530static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots,
531 struct netrx_pending_operations *npo)
532{
533 struct gnttab_copy *copy_op;
534 int status = XEN_NETIF_RSP_OKAY;
535 int i;
536
537 for (i = 0; i < nr_meta_slots; i++) {
538 copy_op = npo->copy + npo->copy_cons++;
539 if (copy_op->status != GNTST_okay) {
540 netdev_dbg(vif->dev,
541 "Bad status %d from copy to DOM%d.\n",
542 copy_op->status, vif->domid);
543 status = XEN_NETIF_RSP_ERROR;
544 }
545 }
546
547 return status;
548}
549
550static void netbk_add_frag_responses(struct xenvif *vif, int status,
551 struct netbk_rx_meta *meta,
552 int nr_meta_slots)
553{
554 int i;
555 unsigned long offset;
556
557 /* No fragments used */
558 if (nr_meta_slots <= 1)
559 return;
560
561 nr_meta_slots--;
562
563 for (i = 0; i < nr_meta_slots; i++) {
564 int flags;
565 if (i == nr_meta_slots - 1)
566 flags = 0;
567 else
568 flags = XEN_NETRXF_more_data;
569
570 offset = 0;
571 make_rx_response(vif, meta[i].id, status, offset,
572 meta[i].size, flags);
573 }
574}
575
576struct skb_cb_overlay {
577 int meta_slots_used;
578};
579
580static void xen_netbk_rx_action(struct xen_netbk *netbk)
581{
582 struct xenvif *vif = NULL, *tmp;
583 s8 status;
584 u16 irq, flags;
585 struct xen_netif_rx_response *resp;
586 struct sk_buff_head rxq;
587 struct sk_buff *skb;
588 LIST_HEAD(notify);
589 int ret;
590 int nr_frags;
591 int count;
592 unsigned long offset;
593 struct skb_cb_overlay *sco;
594
595 struct netrx_pending_operations npo = {
596 .copy = netbk->grant_copy_op,
597 .meta = netbk->meta,
598 };
599
600 skb_queue_head_init(&rxq);
601
602 count = 0;
603
604 while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) {
605 vif = netdev_priv(skb->dev);
606 nr_frags = skb_shinfo(skb)->nr_frags;
607
608 sco = (struct skb_cb_overlay *)skb->cb;
609 sco->meta_slots_used = netbk_gop_skb(skb, &npo);
610
611 count += nr_frags + 1;
612
613 __skb_queue_tail(&rxq, skb);
614
615 /* Filled the batch queue? */
616 if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
617 break;
618 }
619
620 BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta));
621
622 if (!npo.copy_prod)
623 return;
624
625 BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op));
626 ret = HYPERVISOR_grant_table_op(GNTTABOP_copy, &netbk->grant_copy_op,
627 npo.copy_prod);
628 BUG_ON(ret != 0);
629
630 while ((skb = __skb_dequeue(&rxq)) != NULL) {
631 sco = (struct skb_cb_overlay *)skb->cb;
632
633 vif = netdev_priv(skb->dev);
634
635 if (netbk->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
636 resp = RING_GET_RESPONSE(&vif->rx,
637 vif->rx.rsp_prod_pvt++);
638
639 resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
640
641 resp->offset = netbk->meta[npo.meta_cons].gso_size;
642 resp->id = netbk->meta[npo.meta_cons].id;
643 resp->status = sco->meta_slots_used;
644
645 npo.meta_cons++;
646 sco->meta_slots_used--;
647 }
648
649
650 vif->dev->stats.tx_bytes += skb->len;
651 vif->dev->stats.tx_packets++;
652
653 status = netbk_check_gop(vif, sco->meta_slots_used, &npo);
654
655 if (sco->meta_slots_used == 1)
656 flags = 0;
657 else
658 flags = XEN_NETRXF_more_data;
659
660 if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
661 flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
662 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
663 /* remote but checksummed. */
664 flags |= XEN_NETRXF_data_validated;
665
666 offset = 0;
667 resp = make_rx_response(vif, netbk->meta[npo.meta_cons].id,
668 status, offset,
669 netbk->meta[npo.meta_cons].size,
670 flags);
671
672 if (netbk->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
673 struct xen_netif_extra_info *gso =
674 (struct xen_netif_extra_info *)
675 RING_GET_RESPONSE(&vif->rx,
676 vif->rx.rsp_prod_pvt++);
677
678 resp->flags |= XEN_NETRXF_extra_info;
679
680 gso->u.gso.size = netbk->meta[npo.meta_cons].gso_size;
681 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
682 gso->u.gso.pad = 0;
683 gso->u.gso.features = 0;
684
685 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
686 gso->flags = 0;
687 }
688
689 netbk_add_frag_responses(vif, status,
690 netbk->meta + npo.meta_cons + 1,
691 sco->meta_slots_used);
692
693 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
694 irq = vif->irq;
695 if (ret && list_empty(&vif->notify_list))
696 list_add_tail(&vif->notify_list, &notify);
697
698 xenvif_notify_tx_completion(vif);
699
700 xenvif_put(vif);
701 npo.meta_cons += sco->meta_slots_used;
702 dev_kfree_skb(skb);
703 }
704
705 list_for_each_entry_safe(vif, tmp, &notify, notify_list) {
706 notify_remote_via_irq(vif->irq);
707 list_del_init(&vif->notify_list);
708 }
709
710 /* More work to do? */
711 if (!skb_queue_empty(&netbk->rx_queue) &&
712 !timer_pending(&netbk->net_timer))
713 xen_netbk_kick_thread(netbk);
714}
715
716void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
717{
718 struct xen_netbk *netbk = vif->netbk;
719
720 skb_queue_tail(&netbk->rx_queue, skb);
721
722 xen_netbk_kick_thread(netbk);
723}
724
725static void xen_netbk_alarm(unsigned long data)
726{
727 struct xen_netbk *netbk = (struct xen_netbk *)data;
728 xen_netbk_kick_thread(netbk);
729}
730
731static int __on_net_schedule_list(struct xenvif *vif)
732{
733 return !list_empty(&vif->schedule_list);
734}
735
736/* Must be called with net_schedule_list_lock held */
737static void remove_from_net_schedule_list(struct xenvif *vif)
738{
739 if (likely(__on_net_schedule_list(vif))) {
740 list_del_init(&vif->schedule_list);
741 xenvif_put(vif);
742 }
743}
744
745static struct xenvif *poll_net_schedule_list(struct xen_netbk *netbk)
746{
747 struct xenvif *vif = NULL;
748
749 spin_lock_irq(&netbk->net_schedule_list_lock);
750 if (list_empty(&netbk->net_schedule_list))
751 goto out;
752
753 vif = list_first_entry(&netbk->net_schedule_list,
754 struct xenvif, schedule_list);
755 if (!vif)
756 goto out;
757
758 xenvif_get(vif);
759
760 remove_from_net_schedule_list(vif);
761out:
762 spin_unlock_irq(&netbk->net_schedule_list_lock);
763 return vif;
764}
765
766void xen_netbk_schedule_xenvif(struct xenvif *vif)
767{
768 unsigned long flags;
769 struct xen_netbk *netbk = vif->netbk;
770
771 if (__on_net_schedule_list(vif))
772 goto kick;
773
774 spin_lock_irqsave(&netbk->net_schedule_list_lock, flags);
775 if (!__on_net_schedule_list(vif) &&
776 likely(xenvif_schedulable(vif))) {
777 list_add_tail(&vif->schedule_list, &netbk->net_schedule_list);
778 xenvif_get(vif);
779 }
780 spin_unlock_irqrestore(&netbk->net_schedule_list_lock, flags);
781
782kick:
783 smp_mb();
784 if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) &&
785 !list_empty(&netbk->net_schedule_list))
786 xen_netbk_kick_thread(netbk);
787}
788
789void xen_netbk_deschedule_xenvif(struct xenvif *vif)
790{
791 struct xen_netbk *netbk = vif->netbk;
792 spin_lock_irq(&netbk->net_schedule_list_lock);
793 remove_from_net_schedule_list(vif);
794 spin_unlock_irq(&netbk->net_schedule_list_lock);
795}
796
797void xen_netbk_check_rx_xenvif(struct xenvif *vif)
798{
799 int more_to_do;
800
801 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
802
803 if (more_to_do)
804 xen_netbk_schedule_xenvif(vif);
805}
806
807static void tx_add_credit(struct xenvif *vif)
808{
809 unsigned long max_burst, max_credit;
810
811 /*
812 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
813 * Otherwise the interface can seize up due to insufficient credit.
814 */
815 max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
816 max_burst = min(max_burst, 131072UL);
817 max_burst = max(max_burst, vif->credit_bytes);
818
819 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
820 max_credit = vif->remaining_credit + vif->credit_bytes;
821 if (max_credit < vif->remaining_credit)
822 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
823
824 vif->remaining_credit = min(max_credit, max_burst);
825}
826
827static void tx_credit_callback(unsigned long data)
828{
829 struct xenvif *vif = (struct xenvif *)data;
830 tx_add_credit(vif);
831 xen_netbk_check_rx_xenvif(vif);
832}
833
834static void netbk_tx_err(struct xenvif *vif,
835 struct xen_netif_tx_request *txp, RING_IDX end)
836{
837 RING_IDX cons = vif->tx.req_cons;
838
839 do {
840 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
841 if (cons >= end)
842 break;
843 txp = RING_GET_REQUEST(&vif->tx, cons++);
844 } while (1);
845 vif->tx.req_cons = cons;
846 xen_netbk_check_rx_xenvif(vif);
847 xenvif_put(vif);
848}
849
850static int netbk_count_requests(struct xenvif *vif,
851 struct xen_netif_tx_request *first,
852 struct xen_netif_tx_request *txp,
853 int work_to_do)
854{
855 RING_IDX cons = vif->tx.req_cons;
856 int frags = 0;
857
858 if (!(first->flags & XEN_NETTXF_more_data))
859 return 0;
860
861 do {
862 if (frags >= work_to_do) {
863 netdev_dbg(vif->dev, "Need more frags\n");
864 return -frags;
865 }
866
867 if (unlikely(frags >= MAX_SKB_FRAGS)) {
868 netdev_dbg(vif->dev, "Too many frags\n");
869 return -frags;
870 }
871
872 memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
873 sizeof(*txp));
874 if (txp->size > first->size) {
875 netdev_dbg(vif->dev, "Frags galore\n");
876 return -frags;
877 }
878
879 first->size -= txp->size;
880 frags++;
881
882 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
883 netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n",
884 txp->offset, txp->size);
885 return -frags;
886 }
887 } while ((txp++)->flags & XEN_NETTXF_more_data);
888 return frags;
889}
890
891static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
892 struct sk_buff *skb,
893 unsigned long pending_idx)
894{
895 struct page *page;
896 page = alloc_page(GFP_KERNEL|__GFP_COLD);
897 if (!page)
898 return NULL;
899 set_page_ext(page, netbk, pending_idx);
900 netbk->mmap_pages[pending_idx] = page;
901 return page;
902}
903
904static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
905 struct xenvif *vif,
906 struct sk_buff *skb,
907 struct xen_netif_tx_request *txp,
908 struct gnttab_copy *gop)
909{
910 struct skb_shared_info *shinfo = skb_shinfo(skb);
911 skb_frag_t *frags = shinfo->frags;
912 unsigned long pending_idx = *((u16 *)skb->data);
913 int i, start;
914
915 /* Skip first skb fragment if it is on same page as header fragment. */
916 start = ((unsigned long)shinfo->frags[0].page == pending_idx);
917
918 for (i = start; i < shinfo->nr_frags; i++, txp++) {
919 struct page *page;
920 pending_ring_idx_t index;
921 struct pending_tx_info *pending_tx_info =
922 netbk->pending_tx_info;
923
924 index = pending_index(netbk->pending_cons++);
925 pending_idx = netbk->pending_ring[index];
926 page = xen_netbk_alloc_page(netbk, skb, pending_idx);
927 if (!page)
928 return NULL;
929
930 netbk->mmap_pages[pending_idx] = page;
931
932 gop->source.u.ref = txp->gref;
933 gop->source.domid = vif->domid;
934 gop->source.offset = txp->offset;
935
936 gop->dest.u.gmfn = virt_to_mfn(page_address(page));
937 gop->dest.domid = DOMID_SELF;
938 gop->dest.offset = txp->offset;
939
940 gop->len = txp->size;
941 gop->flags = GNTCOPY_source_gref;
942
943 gop++;
944
945 memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp));
946 xenvif_get(vif);
947 pending_tx_info[pending_idx].vif = vif;
948 frags[i].page = (void *)pending_idx;
949 }
950
951 return gop;
952}
953
954static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
955 struct sk_buff *skb,
956 struct gnttab_copy **gopp)
957{
958 struct gnttab_copy *gop = *gopp;
959 int pending_idx = *((u16 *)skb->data);
960 struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
961 struct xenvif *vif = pending_tx_info[pending_idx].vif;
962 struct xen_netif_tx_request *txp;
963 struct skb_shared_info *shinfo = skb_shinfo(skb);
964 int nr_frags = shinfo->nr_frags;
965 int i, err, start;
966
967 /* Check status of header. */
968 err = gop->status;
969 if (unlikely(err)) {
970 pending_ring_idx_t index;
971 index = pending_index(netbk->pending_prod++);
972 txp = &pending_tx_info[pending_idx].req;
973 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
974 netbk->pending_ring[index] = pending_idx;
975 xenvif_put(vif);
976 }
977
978 /* Skip first skb fragment if it is on same page as header fragment. */
979 start = ((unsigned long)shinfo->frags[0].page == pending_idx);
980
981 for (i = start; i < nr_frags; i++) {
982 int j, newerr;
983 pending_ring_idx_t index;
984
985 pending_idx = (unsigned long)shinfo->frags[i].page;
986
987 /* Check error status: if okay then remember grant handle. */
988 newerr = (++gop)->status;
989 if (likely(!newerr)) {
990 /* Had a previous error? Invalidate this fragment. */
991 if (unlikely(err))
992 xen_netbk_idx_release(netbk, pending_idx);
993 continue;
994 }
995
996 /* Error on this fragment: respond to client with an error. */
997 txp = &netbk->pending_tx_info[pending_idx].req;
998 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
999 index = pending_index(netbk->pending_prod++);
1000 netbk->pending_ring[index] = pending_idx;
1001 xenvif_put(vif);
1002
1003 /* Not the first error? Preceding frags already invalidated. */
1004 if (err)
1005 continue;
1006
1007 /* First error: invalidate header and preceding fragments. */
1008 pending_idx = *((u16 *)skb->data);
1009 xen_netbk_idx_release(netbk, pending_idx);
1010 for (j = start; j < i; j++) {
1011 pending_idx = (unsigned long)shinfo->frags[i].page;
1012 xen_netbk_idx_release(netbk, pending_idx);
1013 }
1014
1015 /* Remember the error: invalidate all subsequent fragments. */
1016 err = newerr;
1017 }
1018
1019 *gopp = gop + 1;
1020 return err;
1021}
1022
1023static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
1024{
1025 struct skb_shared_info *shinfo = skb_shinfo(skb);
1026 int nr_frags = shinfo->nr_frags;
1027 int i;
1028
1029 for (i = 0; i < nr_frags; i++) {
1030 skb_frag_t *frag = shinfo->frags + i;
1031 struct xen_netif_tx_request *txp;
1032 unsigned long pending_idx;
1033
1034 pending_idx = (unsigned long)frag->page;
1035
1036 txp = &netbk->pending_tx_info[pending_idx].req;
1037 frag->page = virt_to_page(idx_to_kaddr(netbk, pending_idx));
1038 frag->size = txp->size;
1039 frag->page_offset = txp->offset;
1040
1041 skb->len += txp->size;
1042 skb->data_len += txp->size;
1043 skb->truesize += txp->size;
1044
1045 /* Take an extra reference to offset xen_netbk_idx_release */
1046 get_page(netbk->mmap_pages[pending_idx]);
1047 xen_netbk_idx_release(netbk, pending_idx);
1048 }
1049}
1050
1051static int xen_netbk_get_extras(struct xenvif *vif,
1052 struct xen_netif_extra_info *extras,
1053 int work_to_do)
1054{
1055 struct xen_netif_extra_info extra;
1056 RING_IDX cons = vif->tx.req_cons;
1057
1058 do {
1059 if (unlikely(work_to_do-- <= 0)) {
1060 netdev_dbg(vif->dev, "Missing extra info\n");
1061 return -EBADR;
1062 }
1063
1064 memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
1065 sizeof(extra));
1066 if (unlikely(!extra.type ||
1067 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1068 vif->tx.req_cons = ++cons;
1069 netdev_dbg(vif->dev,
1070 "Invalid extra type: %d\n", extra.type);
1071 return -EINVAL;
1072 }
1073
1074 memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1075 vif->tx.req_cons = ++cons;
1076 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1077
1078 return work_to_do;
1079}
1080
1081static int netbk_set_skb_gso(struct xenvif *vif,
1082 struct sk_buff *skb,
1083 struct xen_netif_extra_info *gso)
1084{
1085 if (!gso->u.gso.size) {
1086 netdev_dbg(vif->dev, "GSO size must not be zero.\n");
1087 return -EINVAL;
1088 }
1089
1090 /* Currently only TCPv4 S.O. is supported. */
1091 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
1092 netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1093 return -EINVAL;
1094 }
1095
1096 skb_shinfo(skb)->gso_size = gso->u.gso.size;
1097 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1098
1099 /* Header must be checked, and gso_segs computed. */
1100 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1101 skb_shinfo(skb)->gso_segs = 0;
1102
1103 return 0;
1104}
1105
1106static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1107{
1108 struct iphdr *iph;
1109 unsigned char *th;
1110 int err = -EPROTO;
1111 int recalculate_partial_csum = 0;
1112
1113 /*
1114 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1115 * peers can fail to set NETRXF_csum_blank when sending a GSO
1116 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1117 * recalculate the partial checksum.
1118 */
1119 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1120 vif->rx_gso_checksum_fixup++;
1121 skb->ip_summed = CHECKSUM_PARTIAL;
1122 recalculate_partial_csum = 1;
1123 }
1124
1125 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1126 if (skb->ip_summed != CHECKSUM_PARTIAL)
1127 return 0;
1128
1129 if (skb->protocol != htons(ETH_P_IP))
1130 goto out;
1131
1132 iph = (void *)skb->data;
1133 th = skb->data + 4 * iph->ihl;
1134 if (th >= skb_tail_pointer(skb))
1135 goto out;
1136
1137 skb->csum_start = th - skb->head;
1138 switch (iph->protocol) {
1139 case IPPROTO_TCP:
1140 skb->csum_offset = offsetof(struct tcphdr, check);
1141
1142 if (recalculate_partial_csum) {
1143 struct tcphdr *tcph = (struct tcphdr *)th;
1144 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1145 skb->len - iph->ihl*4,
1146 IPPROTO_TCP, 0);
1147 }
1148 break;
1149 case IPPROTO_UDP:
1150 skb->csum_offset = offsetof(struct udphdr, check);
1151
1152 if (recalculate_partial_csum) {
1153 struct udphdr *udph = (struct udphdr *)th;
1154 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1155 skb->len - iph->ihl*4,
1156 IPPROTO_UDP, 0);
1157 }
1158 break;
1159 default:
1160 if (net_ratelimit())
1161 netdev_err(vif->dev,
1162 "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
1163 iph->protocol);
1164 goto out;
1165 }
1166
1167 if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb))
1168 goto out;
1169
1170 err = 0;
1171
1172out:
1173 return err;
1174}
1175
1176static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1177{
1178 unsigned long now = jiffies;
1179 unsigned long next_credit =
1180 vif->credit_timeout.expires +
1181 msecs_to_jiffies(vif->credit_usec / 1000);
1182
1183 /* Timer could already be pending in rare cases. */
1184 if (timer_pending(&vif->credit_timeout))
1185 return true;
1186
1187 /* Passed the point where we can replenish credit? */
1188 if (time_after_eq(now, next_credit)) {
1189 vif->credit_timeout.expires = now;
1190 tx_add_credit(vif);
1191 }
1192
1193 /* Still too big to send right now? Set a callback. */
1194 if (size > vif->remaining_credit) {
1195 vif->credit_timeout.data =
1196 (unsigned long)vif;
1197 vif->credit_timeout.function =
1198 tx_credit_callback;
1199 mod_timer(&vif->credit_timeout,
1200 next_credit);
1201
1202 return true;
1203 }
1204
1205 return false;
1206}
1207
1208static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1209{
1210 struct gnttab_copy *gop = netbk->tx_copy_ops, *request_gop;
1211 struct sk_buff *skb;
1212 int ret;
1213
1214 while (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
1215 !list_empty(&netbk->net_schedule_list)) {
1216 struct xenvif *vif;
1217 struct xen_netif_tx_request txreq;
1218 struct xen_netif_tx_request txfrags[MAX_SKB_FRAGS];
1219 struct page *page;
1220 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1221 u16 pending_idx;
1222 RING_IDX idx;
1223 int work_to_do;
1224 unsigned int data_len;
1225 pending_ring_idx_t index;
1226
1227 /* Get a netif from the list with work to do. */
1228 vif = poll_net_schedule_list(netbk);
1229 if (!vif)
1230 continue;
1231
1232 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
1233 if (!work_to_do) {
1234 xenvif_put(vif);
1235 continue;
1236 }
1237
1238 idx = vif->tx.req_cons;
1239 rmb(); /* Ensure that we see the request before we copy it. */
1240 memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
1241
1242 /* Credit-based scheduling. */
1243 if (txreq.size > vif->remaining_credit &&
1244 tx_credit_exceeded(vif, txreq.size)) {
1245 xenvif_put(vif);
1246 continue;
1247 }
1248
1249 vif->remaining_credit -= txreq.size;
1250
1251 work_to_do--;
1252 vif->tx.req_cons = ++idx;
1253
1254 memset(extras, 0, sizeof(extras));
1255 if (txreq.flags & XEN_NETTXF_extra_info) {
1256 work_to_do = xen_netbk_get_extras(vif, extras,
1257 work_to_do);
1258 idx = vif->tx.req_cons;
1259 if (unlikely(work_to_do < 0)) {
1260 netbk_tx_err(vif, &txreq, idx);
1261 continue;
1262 }
1263 }
1264
1265 ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
1266 if (unlikely(ret < 0)) {
1267 netbk_tx_err(vif, &txreq, idx - ret);
1268 continue;
1269 }
1270 idx += ret;
1271
1272 if (unlikely(txreq.size < ETH_HLEN)) {
1273 netdev_dbg(vif->dev,
1274 "Bad packet size: %d\n", txreq.size);
1275 netbk_tx_err(vif, &txreq, idx);
1276 continue;
1277 }
1278
1279 /* No crossing a page as the payload mustn't fragment. */
1280 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
1281 netdev_dbg(vif->dev,
1282 "txreq.offset: %x, size: %u, end: %lu\n",
1283 txreq.offset, txreq.size,
1284 (txreq.offset&~PAGE_MASK) + txreq.size);
1285 netbk_tx_err(vif, &txreq, idx);
1286 continue;
1287 }
1288
1289 index = pending_index(netbk->pending_cons);
1290 pending_idx = netbk->pending_ring[index];
1291
1292 data_len = (txreq.size > PKT_PROT_LEN &&
1293 ret < MAX_SKB_FRAGS) ?
1294 PKT_PROT_LEN : txreq.size;
1295
1296 skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
1297 GFP_ATOMIC | __GFP_NOWARN);
1298 if (unlikely(skb == NULL)) {
1299 netdev_dbg(vif->dev,
1300 "Can't allocate a skb in start_xmit.\n");
1301 netbk_tx_err(vif, &txreq, idx);
1302 break;
1303 }
1304
1305 /* Packets passed to netif_rx() must have some headroom. */
1306 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1307
1308 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1309 struct xen_netif_extra_info *gso;
1310 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1311
1312 if (netbk_set_skb_gso(vif, skb, gso)) {
1313 kfree_skb(skb);
1314 netbk_tx_err(vif, &txreq, idx);
1315 continue;
1316 }
1317 }
1318
1319 /* XXX could copy straight to head */
1320 page = xen_netbk_alloc_page(netbk, skb, pending_idx);
1321 if (!page) {
1322 kfree_skb(skb);
1323 netbk_tx_err(vif, &txreq, idx);
1324 continue;
1325 }
1326
1327 netbk->mmap_pages[pending_idx] = page;
1328
1329 gop->source.u.ref = txreq.gref;
1330 gop->source.domid = vif->domid;
1331 gop->source.offset = txreq.offset;
1332
1333 gop->dest.u.gmfn = virt_to_mfn(page_address(page));
1334 gop->dest.domid = DOMID_SELF;
1335 gop->dest.offset = txreq.offset;
1336
1337 gop->len = txreq.size;
1338 gop->flags = GNTCOPY_source_gref;
1339
1340 gop++;
1341
1342 memcpy(&netbk->pending_tx_info[pending_idx].req,
1343 &txreq, sizeof(txreq));
1344 netbk->pending_tx_info[pending_idx].vif = vif;
1345 *((u16 *)skb->data) = pending_idx;
1346
1347 __skb_put(skb, data_len);
1348
1349 skb_shinfo(skb)->nr_frags = ret;
1350 if (data_len < txreq.size) {
1351 skb_shinfo(skb)->nr_frags++;
1352 skb_shinfo(skb)->frags[0].page =
1353 (void *)(unsigned long)pending_idx;
1354 } else {
1355 /* Discriminate from any valid pending_idx value. */
1356 skb_shinfo(skb)->frags[0].page = (void *)~0UL;
1357 }
1358
1359 __skb_queue_tail(&netbk->tx_queue, skb);
1360
1361 netbk->pending_cons++;
1362
1363 request_gop = xen_netbk_get_requests(netbk, vif,
1364 skb, txfrags, gop);
1365 if (request_gop == NULL) {
1366 kfree_skb(skb);
1367 netbk_tx_err(vif, &txreq, idx);
1368 continue;
1369 }
1370 gop = request_gop;
1371
1372 vif->tx.req_cons = idx;
1373 xen_netbk_check_rx_xenvif(vif);
1374
1375 if ((gop-netbk->tx_copy_ops) >= ARRAY_SIZE(netbk->tx_copy_ops))
1376 break;
1377 }
1378
1379 return gop - netbk->tx_copy_ops;
1380}
1381
1382static void xen_netbk_tx_submit(struct xen_netbk *netbk)
1383{
1384 struct gnttab_copy *gop = netbk->tx_copy_ops;
1385 struct sk_buff *skb;
1386
1387 while ((skb = __skb_dequeue(&netbk->tx_queue)) != NULL) {
1388 struct xen_netif_tx_request *txp;
1389 struct xenvif *vif;
1390 u16 pending_idx;
1391 unsigned data_len;
1392
1393 pending_idx = *((u16 *)skb->data);
1394 vif = netbk->pending_tx_info[pending_idx].vif;
1395 txp = &netbk->pending_tx_info[pending_idx].req;
1396
1397 /* Check the remap error code. */
1398 if (unlikely(xen_netbk_tx_check_gop(netbk, skb, &gop))) {
1399 netdev_dbg(vif->dev, "netback grant failed.\n");
1400 skb_shinfo(skb)->nr_frags = 0;
1401 kfree_skb(skb);
1402 continue;
1403 }
1404
1405 data_len = skb->len;
1406 memcpy(skb->data,
1407 (void *)(idx_to_kaddr(netbk, pending_idx)|txp->offset),
1408 data_len);
1409 if (data_len < txp->size) {
1410 /* Append the packet payload as a fragment. */
1411 txp->offset += data_len;
1412 txp->size -= data_len;
1413 } else {
1414 /* Schedule a response immediately. */
1415 xen_netbk_idx_release(netbk, pending_idx);
1416 }
1417
1418 if (txp->flags & XEN_NETTXF_csum_blank)
1419 skb->ip_summed = CHECKSUM_PARTIAL;
1420 else if (txp->flags & XEN_NETTXF_data_validated)
1421 skb->ip_summed = CHECKSUM_UNNECESSARY;
1422
1423 xen_netbk_fill_frags(netbk, skb);
1424
1425 /*
1426 * If the initial fragment was < PKT_PROT_LEN then
1427 * pull through some bytes from the other fragments to
1428 * increase the linear region to PKT_PROT_LEN bytes.
1429 */
1430 if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) {
1431 int target = min_t(int, skb->len, PKT_PROT_LEN);
1432 __pskb_pull_tail(skb, target - skb_headlen(skb));
1433 }
1434
1435 skb->dev = vif->dev;
1436 skb->protocol = eth_type_trans(skb, skb->dev);
1437
1438 if (checksum_setup(vif, skb)) {
1439 netdev_dbg(vif->dev,
1440 "Can't setup checksum in net_tx_action\n");
1441 kfree_skb(skb);
1442 continue;
1443 }
1444
1445 vif->dev->stats.rx_bytes += skb->len;
1446 vif->dev->stats.rx_packets++;
1447
1448 xenvif_receive_skb(vif, skb);
1449 }
1450}
1451
1452/* Called after netfront has transmitted */
1453static void xen_netbk_tx_action(struct xen_netbk *netbk)
1454{
1455 unsigned nr_gops;
1456 int ret;
1457
1458 nr_gops = xen_netbk_tx_build_gops(netbk);
1459
1460 if (nr_gops == 0)
1461 return;
1462 ret = HYPERVISOR_grant_table_op(GNTTABOP_copy,
1463 netbk->tx_copy_ops, nr_gops);
1464 BUG_ON(ret);
1465
1466 xen_netbk_tx_submit(netbk);
1467
1468}
1469
1470static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
1471{
1472 struct xenvif *vif;
1473 struct pending_tx_info *pending_tx_info;
1474 pending_ring_idx_t index;
1475
1476 /* Already complete? */
1477 if (netbk->mmap_pages[pending_idx] == NULL)
1478 return;
1479
1480 pending_tx_info = &netbk->pending_tx_info[pending_idx];
1481
1482 vif = pending_tx_info->vif;
1483
1484 make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY);
1485
1486 index = pending_index(netbk->pending_prod++);
1487 netbk->pending_ring[index] = pending_idx;
1488
1489 xenvif_put(vif);
1490
1491 netbk->mmap_pages[pending_idx]->mapping = 0;
1492 put_page(netbk->mmap_pages[pending_idx]);
1493 netbk->mmap_pages[pending_idx] = NULL;
1494}
1495
1496static void make_tx_response(struct xenvif *vif,
1497 struct xen_netif_tx_request *txp,
1498 s8 st)
1499{
1500 RING_IDX i = vif->tx.rsp_prod_pvt;
1501 struct xen_netif_tx_response *resp;
1502 int notify;
1503
1504 resp = RING_GET_RESPONSE(&vif->tx, i);
1505 resp->id = txp->id;
1506 resp->status = st;
1507
1508 if (txp->flags & XEN_NETTXF_extra_info)
1509 RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1510
1511 vif->tx.rsp_prod_pvt = ++i;
1512 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
1513 if (notify)
1514 notify_remote_via_irq(vif->irq);
1515}
1516
1517static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
1518 u16 id,
1519 s8 st,
1520 u16 offset,
1521 u16 size,
1522 u16 flags)
1523{
1524 RING_IDX i = vif->rx.rsp_prod_pvt;
1525 struct xen_netif_rx_response *resp;
1526
1527 resp = RING_GET_RESPONSE(&vif->rx, i);
1528 resp->offset = offset;
1529 resp->flags = flags;
1530 resp->id = id;
1531 resp->status = (s16)size;
1532 if (st < 0)
1533 resp->status = (s16)st;
1534
1535 vif->rx.rsp_prod_pvt = ++i;
1536
1537 return resp;
1538}
1539
1540static inline int rx_work_todo(struct xen_netbk *netbk)
1541{
1542 return !skb_queue_empty(&netbk->rx_queue);
1543}
1544
1545static inline int tx_work_todo(struct xen_netbk *netbk)
1546{
1547
1548 if (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
1549 !list_empty(&netbk->net_schedule_list))
1550 return 1;
1551
1552 return 0;
1553}
1554
1555static int xen_netbk_kthread(void *data)
1556{
1557 struct xen_netbk *netbk = data;
1558 while (!kthread_should_stop()) {
1559 wait_event_interruptible(netbk->wq,
1560 rx_work_todo(netbk) ||
1561 tx_work_todo(netbk) ||
1562 kthread_should_stop());
1563 cond_resched();
1564
1565 if (kthread_should_stop())
1566 break;
1567
1568 if (rx_work_todo(netbk))
1569 xen_netbk_rx_action(netbk);
1570
1571 if (tx_work_todo(netbk))
1572 xen_netbk_tx_action(netbk);
1573 }
1574
1575 return 0;
1576}
1577
1578void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
1579{
1580 struct gnttab_unmap_grant_ref op;
1581
1582 if (vif->tx.sring) {
1583 gnttab_set_unmap_op(&op, (unsigned long)vif->tx_comms_area->addr,
1584 GNTMAP_host_map, vif->tx_shmem_handle);
1585
1586 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
1587 BUG();
1588 }
1589
1590 if (vif->rx.sring) {
1591 gnttab_set_unmap_op(&op, (unsigned long)vif->rx_comms_area->addr,
1592 GNTMAP_host_map, vif->rx_shmem_handle);
1593
1594 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
1595 BUG();
1596 }
1597 if (vif->rx_comms_area)
1598 free_vm_area(vif->rx_comms_area);
1599 if (vif->tx_comms_area)
1600 free_vm_area(vif->tx_comms_area);
1601}
1602
1603int xen_netbk_map_frontend_rings(struct xenvif *vif,
1604 grant_ref_t tx_ring_ref,
1605 grant_ref_t rx_ring_ref)
1606{
1607 struct gnttab_map_grant_ref op;
1608 struct xen_netif_tx_sring *txs;
1609 struct xen_netif_rx_sring *rxs;
1610
1611 int err = -ENOMEM;
1612
1613 vif->tx_comms_area = alloc_vm_area(PAGE_SIZE);
1614 if (vif->tx_comms_area == NULL)
1615 goto err;
1616
1617 vif->rx_comms_area = alloc_vm_area(PAGE_SIZE);
1618 if (vif->rx_comms_area == NULL)
1619 goto err;
1620
1621 gnttab_set_map_op(&op, (unsigned long)vif->tx_comms_area->addr,
1622 GNTMAP_host_map, tx_ring_ref, vif->domid);
1623
1624 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
1625 BUG();
1626
1627 if (op.status) {
1628 netdev_warn(vif->dev,
1629 "failed to map tx ring. err=%d status=%d\n",
1630 err, op.status);
1631 err = op.status;
1632 goto err;
1633 }
1634
1635 vif->tx_shmem_ref = tx_ring_ref;
1636 vif->tx_shmem_handle = op.handle;
1637
1638 txs = (struct xen_netif_tx_sring *)vif->tx_comms_area->addr;
1639 BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
1640
1641 gnttab_set_map_op(&op, (unsigned long)vif->rx_comms_area->addr,
1642 GNTMAP_host_map, rx_ring_ref, vif->domid);
1643
1644 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
1645 BUG();
1646
1647 if (op.status) {
1648 netdev_warn(vif->dev,
1649 "failed to map rx ring. err=%d status=%d\n",
1650 err, op.status);
1651 err = op.status;
1652 goto err;
1653 }
1654
1655 vif->rx_shmem_ref = rx_ring_ref;
1656 vif->rx_shmem_handle = op.handle;
1657 vif->rx_req_cons_peek = 0;
1658
1659 rxs = (struct xen_netif_rx_sring *)vif->rx_comms_area->addr;
1660 BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
1661
1662 return 0;
1663
1664err:
1665 xen_netbk_unmap_frontend_rings(vif);
1666 return err;
1667}
1668
1669static int __init netback_init(void)
1670{
1671 int i;
1672 int rc = 0;
1673 int group;
1674
1675 if (!xen_pv_domain())
1676 return -ENODEV;
1677
1678 xen_netbk_group_nr = num_online_cpus();
1679 xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr);
1680 if (!xen_netbk) {
1681 printk(KERN_ALERT "%s: out of memory\n", __func__);
1682 return -ENOMEM;
1683 }
1684
1685 for (group = 0; group < xen_netbk_group_nr; group++) {
1686 struct xen_netbk *netbk = &xen_netbk[group];
1687 skb_queue_head_init(&netbk->rx_queue);
1688 skb_queue_head_init(&netbk->tx_queue);
1689
1690 init_timer(&netbk->net_timer);
1691 netbk->net_timer.data = (unsigned long)netbk;
1692 netbk->net_timer.function = xen_netbk_alarm;
1693
1694 netbk->pending_cons = 0;
1695 netbk->pending_prod = MAX_PENDING_REQS;
1696 for (i = 0; i < MAX_PENDING_REQS; i++)
1697 netbk->pending_ring[i] = i;
1698
1699 init_waitqueue_head(&netbk->wq);
1700 netbk->task = kthread_create(xen_netbk_kthread,
1701 (void *)netbk,
1702 "netback/%u", group);
1703
1704 if (IS_ERR(netbk->task)) {
1705 printk(KERN_ALERT "kthread_run() fails at netback\n");
1706 del_timer(&netbk->net_timer);
1707 rc = PTR_ERR(netbk->task);
1708 goto failed_init;
1709 }
1710
1711 kthread_bind(netbk->task, group);
1712
1713 INIT_LIST_HEAD(&netbk->net_schedule_list);
1714
1715 spin_lock_init(&netbk->net_schedule_list_lock);
1716
1717 atomic_set(&netbk->netfront_count, 0);
1718
1719 wake_up_process(netbk->task);
1720 }
1721
1722 rc = xenvif_xenbus_init();
1723 if (rc)
1724 goto failed_init;
1725
1726 return 0;
1727
1728failed_init:
1729 while (--group >= 0) {
1730 struct xen_netbk *netbk = &xen_netbk[group];
1731 for (i = 0; i < MAX_PENDING_REQS; i++) {
1732 if (netbk->mmap_pages[i])
1733 __free_page(netbk->mmap_pages[i]);
1734 }
1735 del_timer(&netbk->net_timer);
1736 kthread_stop(netbk->task);
1737 }
1738 vfree(xen_netbk);
1739 return rc;
1740
1741}
1742
1743module_init(netback_init);
1744
1745MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
new file mode 100644
index 000000000000..22b8c3505991
--- /dev/null
+++ b/drivers/net/xen-netback/xenbus.c
@@ -0,0 +1,490 @@
1/*
2 * Xenbus code for netif backend
3 *
4 * Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
5 * Copyright (C) 2005 XenSource Ltd
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20*/
21
22#include "common.h"
23
24struct backend_info {
25 struct xenbus_device *dev;
26 struct xenvif *vif;
27 enum xenbus_state frontend_state;
28 struct xenbus_watch hotplug_status_watch;
29 int have_hotplug_status_watch:1;
30};
31
32static int connect_rings(struct backend_info *);
33static void connect(struct backend_info *);
34static void backend_create_xenvif(struct backend_info *be);
35static void unregister_hotplug_status_watch(struct backend_info *be);
36
37static int netback_remove(struct xenbus_device *dev)
38{
39 struct backend_info *be = dev_get_drvdata(&dev->dev);
40
41 unregister_hotplug_status_watch(be);
42 if (be->vif) {
43 kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
44 xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
45 xenvif_disconnect(be->vif);
46 be->vif = NULL;
47 }
48 kfree(be);
49 dev_set_drvdata(&dev->dev, NULL);
50 return 0;
51}
52
53
54/**
55 * Entry point to this code when a new device is created. Allocate the basic
56 * structures and switch to InitWait.
57 */
58static int netback_probe(struct xenbus_device *dev,
59 const struct xenbus_device_id *id)
60{
61 const char *message;
62 struct xenbus_transaction xbt;
63 int err;
64 int sg;
65 struct backend_info *be = kzalloc(sizeof(struct backend_info),
66 GFP_KERNEL);
67 if (!be) {
68 xenbus_dev_fatal(dev, -ENOMEM,
69 "allocating backend structure");
70 return -ENOMEM;
71 }
72
73 be->dev = dev;
74 dev_set_drvdata(&dev->dev, be);
75
76 sg = 1;
77
78 do {
79 err = xenbus_transaction_start(&xbt);
80 if (err) {
81 xenbus_dev_fatal(dev, err, "starting transaction");
82 goto fail;
83 }
84
85 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", sg);
86 if (err) {
87 message = "writing feature-sg";
88 goto abort_transaction;
89 }
90
91 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4",
92 "%d", sg);
93 if (err) {
94 message = "writing feature-gso-tcpv4";
95 goto abort_transaction;
96 }
97
98 /* We support rx-copy path. */
99 err = xenbus_printf(xbt, dev->nodename,
100 "feature-rx-copy", "%d", 1);
101 if (err) {
102 message = "writing feature-rx-copy";
103 goto abort_transaction;
104 }
105
106 /*
107 * We don't support rx-flip path (except old guests who don't
108 * grok this feature flag).
109 */
110 err = xenbus_printf(xbt, dev->nodename,
111 "feature-rx-flip", "%d", 0);
112 if (err) {
113 message = "writing feature-rx-flip";
114 goto abort_transaction;
115 }
116
117 err = xenbus_transaction_end(xbt, 0);
118 } while (err == -EAGAIN);
119
120 if (err) {
121 xenbus_dev_fatal(dev, err, "completing transaction");
122 goto fail;
123 }
124
125 err = xenbus_switch_state(dev, XenbusStateInitWait);
126 if (err)
127 goto fail;
128
129 /* This kicks hotplug scripts, so do it immediately. */
130 backend_create_xenvif(be);
131
132 return 0;
133
134abort_transaction:
135 xenbus_transaction_end(xbt, 1);
136 xenbus_dev_fatal(dev, err, "%s", message);
137fail:
138 pr_debug("failed");
139 netback_remove(dev);
140 return err;
141}
142
143
144/*
145 * Handle the creation of the hotplug script environment. We add the script
146 * and vif variables to the environment, for the benefit of the vif-* hotplug
147 * scripts.
148 */
149static int netback_uevent(struct xenbus_device *xdev,
150 struct kobj_uevent_env *env)
151{
152 struct backend_info *be = dev_get_drvdata(&xdev->dev);
153 char *val;
154
155 val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL);
156 if (IS_ERR(val)) {
157 int err = PTR_ERR(val);
158 xenbus_dev_fatal(xdev, err, "reading script");
159 return err;
160 } else {
161 if (add_uevent_var(env, "script=%s", val)) {
162 kfree(val);
163 return -ENOMEM;
164 }
165 kfree(val);
166 }
167
168 if (!be || !be->vif)
169 return 0;
170
171 return add_uevent_var(env, "vif=%s", be->vif->dev->name);
172}
173
174
175static void backend_create_xenvif(struct backend_info *be)
176{
177 int err;
178 long handle;
179 struct xenbus_device *dev = be->dev;
180
181 if (be->vif != NULL)
182 return;
183
184 err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle);
185 if (err != 1) {
186 xenbus_dev_fatal(dev, err, "reading handle");
187 return;
188 }
189
190 be->vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle);
191 if (IS_ERR(be->vif)) {
192 err = PTR_ERR(be->vif);
193 be->vif = NULL;
194 xenbus_dev_fatal(dev, err, "creating interface");
195 return;
196 }
197
198 kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
199}
200
201
202static void disconnect_backend(struct xenbus_device *dev)
203{
204 struct backend_info *be = dev_get_drvdata(&dev->dev);
205
206 if (be->vif) {
207 xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
208 xenvif_disconnect(be->vif);
209 be->vif = NULL;
210 }
211}
212
213/**
214 * Callback received when the frontend's state changes.
215 */
216static void frontend_changed(struct xenbus_device *dev,
217 enum xenbus_state frontend_state)
218{
219 struct backend_info *be = dev_get_drvdata(&dev->dev);
220
221 pr_debug("frontend state %s", xenbus_strstate(frontend_state));
222
223 be->frontend_state = frontend_state;
224
225 switch (frontend_state) {
226 case XenbusStateInitialising:
227 if (dev->state == XenbusStateClosed) {
228 printk(KERN_INFO "%s: %s: prepare for reconnect\n",
229 __func__, dev->nodename);
230 xenbus_switch_state(dev, XenbusStateInitWait);
231 }
232 break;
233
234 case XenbusStateInitialised:
235 break;
236
237 case XenbusStateConnected:
238 if (dev->state == XenbusStateConnected)
239 break;
240 backend_create_xenvif(be);
241 if (be->vif)
242 connect(be);
243 break;
244
245 case XenbusStateClosing:
246 if (be->vif)
247 kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
248 disconnect_backend(dev);
249 xenbus_switch_state(dev, XenbusStateClosing);
250 break;
251
252 case XenbusStateClosed:
253 xenbus_switch_state(dev, XenbusStateClosed);
254 if (xenbus_dev_is_online(dev))
255 break;
256 /* fall through if not online */
257 case XenbusStateUnknown:
258 device_unregister(&dev->dev);
259 break;
260
261 default:
262 xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
263 frontend_state);
264 break;
265 }
266}
267
268
269static void xen_net_read_rate(struct xenbus_device *dev,
270 unsigned long *bytes, unsigned long *usec)
271{
272 char *s, *e;
273 unsigned long b, u;
274 char *ratestr;
275
276 /* Default to unlimited bandwidth. */
277 *bytes = ~0UL;
278 *usec = 0;
279
280 ratestr = xenbus_read(XBT_NIL, dev->nodename, "rate", NULL);
281 if (IS_ERR(ratestr))
282 return;
283
284 s = ratestr;
285 b = simple_strtoul(s, &e, 10);
286 if ((s == e) || (*e != ','))
287 goto fail;
288
289 s = e + 1;
290 u = simple_strtoul(s, &e, 10);
291 if ((s == e) || (*e != '\0'))
292 goto fail;
293
294 *bytes = b;
295 *usec = u;
296
297 kfree(ratestr);
298 return;
299
300 fail:
301 pr_warn("Failed to parse network rate limit. Traffic unlimited.\n");
302 kfree(ratestr);
303}
304
305static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
306{
307 char *s, *e, *macstr;
308 int i;
309
310 macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
311 if (IS_ERR(macstr))
312 return PTR_ERR(macstr);
313
314 for (i = 0; i < ETH_ALEN; i++) {
315 mac[i] = simple_strtoul(s, &e, 16);
316 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
317 kfree(macstr);
318 return -ENOENT;
319 }
320 s = e+1;
321 }
322
323 kfree(macstr);
324 return 0;
325}
326
327static void unregister_hotplug_status_watch(struct backend_info *be)
328{
329 if (be->have_hotplug_status_watch) {
330 unregister_xenbus_watch(&be->hotplug_status_watch);
331 kfree(be->hotplug_status_watch.node);
332 }
333 be->have_hotplug_status_watch = 0;
334}
335
336static void hotplug_status_changed(struct xenbus_watch *watch,
337 const char **vec,
338 unsigned int vec_size)
339{
340 struct backend_info *be = container_of(watch,
341 struct backend_info,
342 hotplug_status_watch);
343 char *str;
344 unsigned int len;
345
346 str = xenbus_read(XBT_NIL, be->dev->nodename, "hotplug-status", &len);
347 if (IS_ERR(str))
348 return;
349 if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) {
350 xenbus_switch_state(be->dev, XenbusStateConnected);
351 /* Not interested in this watch anymore. */
352 unregister_hotplug_status_watch(be);
353 }
354 kfree(str);
355}
356
357static void connect(struct backend_info *be)
358{
359 int err;
360 struct xenbus_device *dev = be->dev;
361
362 err = connect_rings(be);
363 if (err)
364 return;
365
366 err = xen_net_read_mac(dev, be->vif->fe_dev_addr);
367 if (err) {
368 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
369 return;
370 }
371
372 xen_net_read_rate(dev, &be->vif->credit_bytes,
373 &be->vif->credit_usec);
374 be->vif->remaining_credit = be->vif->credit_bytes;
375
376 unregister_hotplug_status_watch(be);
377 err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
378 hotplug_status_changed,
379 "%s/%s", dev->nodename, "hotplug-status");
380 if (err) {
381 /* Switch now, since we can't do a watch. */
382 xenbus_switch_state(dev, XenbusStateConnected);
383 } else {
384 be->have_hotplug_status_watch = 1;
385 }
386
387 netif_wake_queue(be->vif->dev);
388}
389
390
391static int connect_rings(struct backend_info *be)
392{
393 struct xenvif *vif = be->vif;
394 struct xenbus_device *dev = be->dev;
395 unsigned long tx_ring_ref, rx_ring_ref;
396 unsigned int evtchn, rx_copy;
397 int err;
398 int val;
399
400 err = xenbus_gather(XBT_NIL, dev->otherend,
401 "tx-ring-ref", "%lu", &tx_ring_ref,
402 "rx-ring-ref", "%lu", &rx_ring_ref,
403 "event-channel", "%u", &evtchn, NULL);
404 if (err) {
405 xenbus_dev_fatal(dev, err,
406 "reading %s/ring-ref and event-channel",
407 dev->otherend);
408 return err;
409 }
410
411 err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
412 &rx_copy);
413 if (err == -ENOENT) {
414 err = 0;
415 rx_copy = 0;
416 }
417 if (err < 0) {
418 xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy",
419 dev->otherend);
420 return err;
421 }
422 if (!rx_copy)
423 return -EOPNOTSUPP;
424
425 if (vif->dev->tx_queue_len != 0) {
426 if (xenbus_scanf(XBT_NIL, dev->otherend,
427 "feature-rx-notify", "%d", &val) < 0)
428 val = 0;
429 if (val)
430 vif->can_queue = 1;
431 else
432 /* Must be non-zero for pfifo_fast to work. */
433 vif->dev->tx_queue_len = 1;
434 }
435
436 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg",
437 "%d", &val) < 0)
438 val = 0;
439 vif->can_sg = !!val;
440
441 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4",
442 "%d", &val) < 0)
443 val = 0;
444 vif->gso = !!val;
445
446 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix",
447 "%d", &val) < 0)
448 val = 0;
449 vif->gso_prefix = !!val;
450
451 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload",
452 "%d", &val) < 0)
453 val = 0;
454 vif->csum = !val;
455
456 /* Map the shared frame, irq etc. */
457 err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref, evtchn);
458 if (err) {
459 xenbus_dev_fatal(dev, err,
460 "mapping shared-frames %lu/%lu port %u",
461 tx_ring_ref, rx_ring_ref, evtchn);
462 return err;
463 }
464 return 0;
465}
466
467
468/* ** Driver Registration ** */
469
470
471static const struct xenbus_device_id netback_ids[] = {
472 { "vif" },
473 { "" }
474};
475
476
477static struct xenbus_driver netback = {
478 .name = "vif",
479 .owner = THIS_MODULE,
480 .ids = netback_ids,
481 .probe = netback_probe,
482 .remove = netback_remove,
483 .uevent = netback_uevent,
484 .otherend_changed = frontend_changed,
485};
486
487int xenvif_xenbus_init(void)
488{
489 return xenbus_register_backend(&netback);
490}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index da1f12120346..5c8d9c385be0 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -122,7 +122,7 @@ struct netfront_info {
122 struct mmu_update rx_mmu[NET_RX_RING_SIZE]; 122 struct mmu_update rx_mmu[NET_RX_RING_SIZE];
123 123
124 /* Statistics */ 124 /* Statistics */
125 int rx_gso_checksum_fixup; 125 unsigned long rx_gso_checksum_fixup;
126}; 126};
127 127
128struct netfront_rx_info { 128struct netfront_rx_info {
@@ -359,7 +359,7 @@ static void xennet_tx_buf_gc(struct net_device *dev)
359 struct xen_netif_tx_response *txrsp; 359 struct xen_netif_tx_response *txrsp;
360 360
361 txrsp = RING_GET_RESPONSE(&np->tx, cons); 361 txrsp = RING_GET_RESPONSE(&np->tx, cons);
362 if (txrsp->status == NETIF_RSP_NULL) 362 if (txrsp->status == XEN_NETIF_RSP_NULL)
363 continue; 363 continue;
364 364
365 id = txrsp->id; 365 id = txrsp->id;
@@ -416,7 +416,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
416 larger than a page), split it it into page-sized chunks. */ 416 larger than a page), split it it into page-sized chunks. */
417 while (len > PAGE_SIZE - offset) { 417 while (len > PAGE_SIZE - offset) {
418 tx->size = PAGE_SIZE - offset; 418 tx->size = PAGE_SIZE - offset;
419 tx->flags |= NETTXF_more_data; 419 tx->flags |= XEN_NETTXF_more_data;
420 len -= tx->size; 420 len -= tx->size;
421 data += tx->size; 421 data += tx->size;
422 offset = 0; 422 offset = 0;
@@ -442,7 +442,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
442 for (i = 0; i < frags; i++) { 442 for (i = 0; i < frags; i++) {
443 skb_frag_t *frag = skb_shinfo(skb)->frags + i; 443 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
444 444
445 tx->flags |= NETTXF_more_data; 445 tx->flags |= XEN_NETTXF_more_data;
446 446
447 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); 447 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
448 np->tx_skbs[id].skb = skb_get(skb); 448 np->tx_skbs[id].skb = skb_get(skb);
@@ -517,10 +517,10 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
517 tx->flags = 0; 517 tx->flags = 0;
518 if (skb->ip_summed == CHECKSUM_PARTIAL) 518 if (skb->ip_summed == CHECKSUM_PARTIAL)
519 /* local packet? */ 519 /* local packet? */
520 tx->flags |= NETTXF_csum_blank | NETTXF_data_validated; 520 tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
521 else if (skb->ip_summed == CHECKSUM_UNNECESSARY) 521 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
522 /* remote but checksummed. */ 522 /* remote but checksummed. */
523 tx->flags |= NETTXF_data_validated; 523 tx->flags |= XEN_NETTXF_data_validated;
524 524
525 if (skb_shinfo(skb)->gso_size) { 525 if (skb_shinfo(skb)->gso_size) {
526 struct xen_netif_extra_info *gso; 526 struct xen_netif_extra_info *gso;
@@ -531,7 +531,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
531 if (extra) 531 if (extra)
532 extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; 532 extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
533 else 533 else
534 tx->flags |= NETTXF_extra_info; 534 tx->flags |= XEN_NETTXF_extra_info;
535 535
536 gso->u.gso.size = skb_shinfo(skb)->gso_size; 536 gso->u.gso.size = skb_shinfo(skb)->gso_size;
537 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; 537 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
@@ -651,7 +651,7 @@ static int xennet_get_responses(struct netfront_info *np,
651 int err = 0; 651 int err = 0;
652 unsigned long ret; 652 unsigned long ret;
653 653
654 if (rx->flags & NETRXF_extra_info) { 654 if (rx->flags & XEN_NETRXF_extra_info) {
655 err = xennet_get_extras(np, extras, rp); 655 err = xennet_get_extras(np, extras, rp);
656 cons = np->rx.rsp_cons; 656 cons = np->rx.rsp_cons;
657 } 657 }
@@ -688,7 +688,7 @@ static int xennet_get_responses(struct netfront_info *np,
688 __skb_queue_tail(list, skb); 688 __skb_queue_tail(list, skb);
689 689
690next: 690next:
691 if (!(rx->flags & NETRXF_more_data)) 691 if (!(rx->flags & XEN_NETRXF_more_data))
692 break; 692 break;
693 693
694 if (cons + frags == rp) { 694 if (cons + frags == rp) {
@@ -983,9 +983,9 @@ err:
983 skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); 983 skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
984 skb->len += skb->data_len; 984 skb->len += skb->data_len;
985 985
986 if (rx->flags & NETRXF_csum_blank) 986 if (rx->flags & XEN_NETRXF_csum_blank)
987 skb->ip_summed = CHECKSUM_PARTIAL; 987 skb->ip_summed = CHECKSUM_PARTIAL;
988 else if (rx->flags & NETRXF_data_validated) 988 else if (rx->flags & XEN_NETRXF_data_validated)
989 skb->ip_summed = CHECKSUM_UNNECESSARY; 989 skb->ip_summed = CHECKSUM_UNNECESSARY;
990 990
991 __skb_queue_tail(&rxq, skb); 991 __skb_queue_tail(&rxq, skb);
@@ -1692,7 +1692,7 @@ static void xennet_get_ethtool_stats(struct net_device *dev,
1692 int i; 1692 int i;
1693 1693
1694 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) 1694 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1695 data[i] = *(int *)(np + xennet_stats[i].offset); 1695 data[i] = *(unsigned long *)(np + xennet_stats[i].offset);
1696} 1696}
1697 1697
1698static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data) 1698static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index f47a714538db..af3f7b095647 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -225,7 +225,8 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
225/*****************************************************************************/ 225/*****************************************************************************/
226#define QETH_MAX_QUEUES 4 226#define QETH_MAX_QUEUES 4
227#define QETH_IN_BUF_SIZE_DEFAULT 65536 227#define QETH_IN_BUF_SIZE_DEFAULT 65536
228#define QETH_IN_BUF_COUNT_DEFAULT 16 228#define QETH_IN_BUF_COUNT_DEFAULT 64
229#define QETH_IN_BUF_COUNT_HSDEFAULT 128
229#define QETH_IN_BUF_COUNT_MIN 8 230#define QETH_IN_BUF_COUNT_MIN 8
230#define QETH_IN_BUF_COUNT_MAX 128 231#define QETH_IN_BUF_COUNT_MAX 128
231#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12) 232#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12)
@@ -741,7 +742,6 @@ struct qeth_card {
741 /* QDIO buffer handling */ 742 /* QDIO buffer handling */
742 struct qeth_qdio_info qdio; 743 struct qeth_qdio_info qdio;
743 struct qeth_perf_stats perf_stats; 744 struct qeth_perf_stats perf_stats;
744 int use_hard_stop;
745 int read_or_write_problem; 745 int read_or_write_problem;
746 struct qeth_osn_info osn_info; 746 struct qeth_osn_info osn_info;
747 struct qeth_discipline discipline; 747 struct qeth_discipline discipline;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 019ae58ab913..25eef304bd47 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -302,12 +302,15 @@ static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
302 int com = cmd->hdr.command; 302 int com = cmd->hdr.command;
303 ipa_name = qeth_get_ipa_cmd_name(com); 303 ipa_name = qeth_get_ipa_cmd_name(com);
304 if (rc) 304 if (rc)
305 QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s returned x%X \"%s\"\n", 305 QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s/%s returned "
306 ipa_name, com, QETH_CARD_IFNAME(card), 306 "x%X \"%s\"\n",
307 rc, qeth_get_ipa_msg(rc)); 307 ipa_name, com, dev_name(&card->gdev->dev),
308 QETH_CARD_IFNAME(card), rc,
309 qeth_get_ipa_msg(rc));
308 else 310 else
309 QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s succeeded\n", 311 QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s/%s succeeded\n",
310 ipa_name, com, QETH_CARD_IFNAME(card)); 312 ipa_name, com, dev_name(&card->gdev->dev),
313 QETH_CARD_IFNAME(card));
311} 314}
312 315
313static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, 316static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
@@ -1023,7 +1026,10 @@ static void qeth_init_qdio_info(struct qeth_card *card)
1023 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); 1026 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1024 /* inbound */ 1027 /* inbound */
1025 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; 1028 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1026 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT; 1029 if (card->info.type == QETH_CARD_TYPE_IQD)
1030 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
1031 else
1032 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
1027 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count; 1033 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
1028 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list); 1034 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
1029 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list); 1035 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
@@ -1083,7 +1089,6 @@ static int qeth_setup_card(struct qeth_card *card)
1083 card->data.state = CH_STATE_DOWN; 1089 card->data.state = CH_STATE_DOWN;
1084 card->state = CARD_STATE_DOWN; 1090 card->state = CARD_STATE_DOWN;
1085 card->lan_online = 0; 1091 card->lan_online = 0;
1086 card->use_hard_stop = 0;
1087 card->read_or_write_problem = 0; 1092 card->read_or_write_problem = 0;
1088 card->dev = NULL; 1093 card->dev = NULL;
1089 spin_lock_init(&card->vlanlock); 1094 spin_lock_init(&card->vlanlock);
@@ -1732,20 +1737,22 @@ int qeth_send_control_data(struct qeth_card *card, int len,
1732 }; 1737 };
1733 } 1738 }
1734 1739
1740 if (reply->rc == -EIO)
1741 goto error;
1735 rc = reply->rc; 1742 rc = reply->rc;
1736 qeth_put_reply(reply); 1743 qeth_put_reply(reply);
1737 return rc; 1744 return rc;
1738 1745
1739time_err: 1746time_err:
1747 reply->rc = -ETIME;
1740 spin_lock_irqsave(&reply->card->lock, flags); 1748 spin_lock_irqsave(&reply->card->lock, flags);
1741 list_del_init(&reply->list); 1749 list_del_init(&reply->list);
1742 spin_unlock_irqrestore(&reply->card->lock, flags); 1750 spin_unlock_irqrestore(&reply->card->lock, flags);
1743 reply->rc = -ETIME;
1744 atomic_inc(&reply->received); 1751 atomic_inc(&reply->received);
1752error:
1745 atomic_set(&card->write.irq_pending, 0); 1753 atomic_set(&card->write.irq_pending, 0);
1746 qeth_release_buffer(iob->channel, iob); 1754 qeth_release_buffer(iob->channel, iob);
1747 card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO; 1755 card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO;
1748 wake_up(&reply->wait_q);
1749 rc = reply->rc; 1756 rc = reply->rc;
1750 qeth_put_reply(reply); 1757 qeth_put_reply(reply);
1751 return rc; 1758 return rc;
@@ -2490,45 +2497,19 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2490} 2497}
2491EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); 2498EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
2492 2499
2493static int qeth_send_startstoplan(struct qeth_card *card,
2494 enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot)
2495{
2496 int rc;
2497 struct qeth_cmd_buffer *iob;
2498
2499 iob = qeth_get_ipacmd_buffer(card, ipacmd, prot);
2500 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
2501
2502 return rc;
2503}
2504
2505int qeth_send_startlan(struct qeth_card *card) 2500int qeth_send_startlan(struct qeth_card *card)
2506{ 2501{
2507 int rc; 2502 int rc;
2503 struct qeth_cmd_buffer *iob;
2508 2504
2509 QETH_DBF_TEXT(SETUP, 2, "strtlan"); 2505 QETH_DBF_TEXT(SETUP, 2, "strtlan");
2510 2506
2511 rc = qeth_send_startstoplan(card, IPA_CMD_STARTLAN, 0); 2507 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0);
2508 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
2512 return rc; 2509 return rc;
2513} 2510}
2514EXPORT_SYMBOL_GPL(qeth_send_startlan); 2511EXPORT_SYMBOL_GPL(qeth_send_startlan);
2515 2512
2516int qeth_send_stoplan(struct qeth_card *card)
2517{
2518 int rc = 0;
2519
2520 /*
2521 * TODO: according to the IPA format document page 14,
2522 * TCP/IP (we!) never issue a STOPLAN
2523 * is this right ?!?
2524 */
2525 QETH_DBF_TEXT(SETUP, 2, "stoplan");
2526
2527 rc = qeth_send_startstoplan(card, IPA_CMD_STOPLAN, 0);
2528 return rc;
2529}
2530EXPORT_SYMBOL_GPL(qeth_send_stoplan);
2531
2532int qeth_default_setadapterparms_cb(struct qeth_card *card, 2513int qeth_default_setadapterparms_cb(struct qeth_card *card,
2533 struct qeth_reply *reply, unsigned long data) 2514 struct qeth_reply *reply, unsigned long data)
2534{ 2515{
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index ada0fe782373..6fbaacb21943 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -202,17 +202,19 @@ static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
202 kfree(mc); 202 kfree(mc);
203} 203}
204 204
205static void qeth_l2_del_all_mc(struct qeth_card *card) 205static void qeth_l2_del_all_mc(struct qeth_card *card, int del)
206{ 206{
207 struct qeth_mc_mac *mc, *tmp; 207 struct qeth_mc_mac *mc, *tmp;
208 208
209 spin_lock_bh(&card->mclock); 209 spin_lock_bh(&card->mclock);
210 list_for_each_entry_safe(mc, tmp, &card->mc_list, list) { 210 list_for_each_entry_safe(mc, tmp, &card->mc_list, list) {
211 if (mc->is_vmac) 211 if (del) {
212 qeth_l2_send_setdelmac(card, mc->mc_addr, 212 if (mc->is_vmac)
213 qeth_l2_send_setdelmac(card, mc->mc_addr,
213 IPA_CMD_DELVMAC, NULL); 214 IPA_CMD_DELVMAC, NULL);
214 else 215 else
215 qeth_l2_send_delgroupmac(card, mc->mc_addr); 216 qeth_l2_send_delgroupmac(card, mc->mc_addr);
217 }
216 list_del(&mc->list); 218 list_del(&mc->list);
217 kfree(mc); 219 kfree(mc);
218 } 220 }
@@ -288,18 +290,13 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
288 qeth_l2_send_setdelvlan_cb, NULL); 290 qeth_l2_send_setdelvlan_cb, NULL);
289} 291}
290 292
291static void qeth_l2_process_vlans(struct qeth_card *card, int clear) 293static void qeth_l2_process_vlans(struct qeth_card *card)
292{ 294{
293 struct qeth_vlan_vid *id; 295 struct qeth_vlan_vid *id;
294 QETH_CARD_TEXT(card, 3, "L2prcvln"); 296 QETH_CARD_TEXT(card, 3, "L2prcvln");
295 spin_lock_bh(&card->vlanlock); 297 spin_lock_bh(&card->vlanlock);
296 list_for_each_entry(id, &card->vid_list, list) { 298 list_for_each_entry(id, &card->vid_list, list) {
297 if (clear) 299 qeth_l2_send_setdelvlan(card, id->vid, IPA_CMD_SETVLAN);
298 qeth_l2_send_setdelvlan(card, id->vid,
299 IPA_CMD_DELVLAN);
300 else
301 qeth_l2_send_setdelvlan(card, id->vid,
302 IPA_CMD_SETVLAN);
303 } 300 }
304 spin_unlock_bh(&card->vlanlock); 301 spin_unlock_bh(&card->vlanlock);
305} 302}
@@ -379,19 +376,11 @@ static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
379 dev_close(card->dev); 376 dev_close(card->dev);
380 rtnl_unlock(); 377 rtnl_unlock();
381 } 378 }
382 if (!card->use_hard_stop || 379 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
383 recovery_mode) {
384 __u8 *mac = &card->dev->dev_addr[0];
385 rc = qeth_l2_send_delmac(card, mac);
386 QETH_DBF_TEXT_(SETUP, 2, "Lerr%d", rc);
387 }
388 card->state = CARD_STATE_SOFTSETUP; 380 card->state = CARD_STATE_SOFTSETUP;
389 } 381 }
390 if (card->state == CARD_STATE_SOFTSETUP) { 382 if (card->state == CARD_STATE_SOFTSETUP) {
391 qeth_l2_process_vlans(card, 1); 383 qeth_l2_del_all_mc(card, 0);
392 if (!card->use_hard_stop ||
393 recovery_mode)
394 qeth_l2_del_all_mc(card);
395 qeth_clear_ipacmd_list(card); 384 qeth_clear_ipacmd_list(card);
396 card->state = CARD_STATE_HARDSETUP; 385 card->state = CARD_STATE_HARDSETUP;
397 } 386 }
@@ -405,7 +394,6 @@ static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
405 qeth_clear_cmd_buffers(&card->read); 394 qeth_clear_cmd_buffers(&card->read);
406 qeth_clear_cmd_buffers(&card->write); 395 qeth_clear_cmd_buffers(&card->write);
407 } 396 }
408 card->use_hard_stop = 0;
409 return rc; 397 return rc;
410} 398}
411 399
@@ -705,7 +693,7 @@ static void qeth_l2_set_multicast_list(struct net_device *dev)
705 if (qeth_threads_running(card, QETH_RECOVER_THREAD) && 693 if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
706 (card->state != CARD_STATE_UP)) 694 (card->state != CARD_STATE_UP))
707 return; 695 return;
708 qeth_l2_del_all_mc(card); 696 qeth_l2_del_all_mc(card, 1);
709 spin_lock_bh(&card->mclock); 697 spin_lock_bh(&card->mclock);
710 netdev_for_each_mc_addr(ha, dev) 698 netdev_for_each_mc_addr(ha, dev)
711 qeth_l2_add_mc(card, ha->addr, 0); 699 qeth_l2_add_mc(card, ha->addr, 0);
@@ -907,10 +895,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
907 qeth_set_allowed_threads(card, 0, 1); 895 qeth_set_allowed_threads(card, 0, 1);
908 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); 896 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
909 897
910 if (cgdev->state == CCWGROUP_ONLINE) { 898 if (cgdev->state == CCWGROUP_ONLINE)
911 card->use_hard_stop = 1;
912 qeth_l2_set_offline(cgdev); 899 qeth_l2_set_offline(cgdev);
913 }
914 900
915 if (card->dev) { 901 if (card->dev) {
916 unregister_netdev(card->dev); 902 unregister_netdev(card->dev);
@@ -1040,7 +1026,7 @@ contin:
1040 1026
1041 if (card->info.type != QETH_CARD_TYPE_OSN && 1027 if (card->info.type != QETH_CARD_TYPE_OSN &&
1042 card->info.type != QETH_CARD_TYPE_OSM) 1028 card->info.type != QETH_CARD_TYPE_OSM)
1043 qeth_l2_process_vlans(card, 0); 1029 qeth_l2_process_vlans(card);
1044 1030
1045 netif_tx_disable(card->dev); 1031 netif_tx_disable(card->dev);
1046 1032
@@ -1076,7 +1062,6 @@ contin:
1076 return 0; 1062 return 0;
1077 1063
1078out_remove: 1064out_remove:
1079 card->use_hard_stop = 1;
1080 qeth_l2_stop_card(card, 0); 1065 qeth_l2_stop_card(card, 0);
1081 ccw_device_set_offline(CARD_DDEV(card)); 1066 ccw_device_set_offline(CARD_DDEV(card));
1082 ccw_device_set_offline(CARD_WDEV(card)); 1067 ccw_device_set_offline(CARD_WDEV(card));
@@ -1144,7 +1129,6 @@ static int qeth_l2_recover(void *ptr)
1144 QETH_CARD_TEXT(card, 2, "recover2"); 1129 QETH_CARD_TEXT(card, 2, "recover2");
1145 dev_warn(&card->gdev->dev, 1130 dev_warn(&card->gdev->dev,
1146 "A recovery process has been started for the device\n"); 1131 "A recovery process has been started for the device\n");
1147 card->use_hard_stop = 1;
1148 __qeth_l2_set_offline(card->gdev, 1); 1132 __qeth_l2_set_offline(card->gdev, 1);
1149 rc = __qeth_l2_set_online(card->gdev, 1); 1133 rc = __qeth_l2_set_online(card->gdev, 1);
1150 if (!rc) 1134 if (!rc)
@@ -1191,7 +1175,6 @@ static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev)
1191 if (gdev->state == CCWGROUP_OFFLINE) 1175 if (gdev->state == CCWGROUP_OFFLINE)
1192 return 0; 1176 return 0;
1193 if (card->state == CARD_STATE_UP) { 1177 if (card->state == CARD_STATE_UP) {
1194 card->use_hard_stop = 1;
1195 __qeth_l2_set_offline(card->gdev, 1); 1178 __qeth_l2_set_offline(card->gdev, 1);
1196 } else 1179 } else
1197 __qeth_l2_set_offline(card->gdev, 0); 1180 __qeth_l2_set_offline(card->gdev, 0);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index d09b0c44fc3d..142e5f6ef4f3 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -510,8 +510,7 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
510 kfree(tbd_list); 510 kfree(tbd_list);
511} 511}
512 512
513static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean, 513static void qeth_l3_clear_ip_list(struct qeth_card *card, int recover)
514 int recover)
515{ 514{
516 struct qeth_ipaddr *addr, *tmp; 515 struct qeth_ipaddr *addr, *tmp;
517 unsigned long flags; 516 unsigned long flags;
@@ -530,11 +529,6 @@ static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean,
530 addr = list_entry(card->ip_list.next, 529 addr = list_entry(card->ip_list.next,
531 struct qeth_ipaddr, entry); 530 struct qeth_ipaddr, entry);
532 list_del_init(&addr->entry); 531 list_del_init(&addr->entry);
533 if (clean) {
534 spin_unlock_irqrestore(&card->ip_lock, flags);
535 qeth_l3_deregister_addr_entry(card, addr);
536 spin_lock_irqsave(&card->ip_lock, flags);
537 }
538 if (!recover || addr->is_multicast) { 532 if (!recover || addr->is_multicast) {
539 kfree(addr); 533 kfree(addr);
540 continue; 534 continue;
@@ -1611,29 +1605,6 @@ static int qeth_l3_start_ipassists(struct qeth_card *card)
1611 return 0; 1605 return 0;
1612} 1606}
1613 1607
1614static int qeth_l3_put_unique_id(struct qeth_card *card)
1615{
1616
1617 int rc = 0;
1618 struct qeth_cmd_buffer *iob;
1619 struct qeth_ipa_cmd *cmd;
1620
1621 QETH_CARD_TEXT(card, 2, "puniqeid");
1622
1623 if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) ==
1624 UNIQUE_ID_NOT_BY_CARD)
1625 return -1;
1626 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_DESTROY_ADDR,
1627 QETH_PROT_IPV6);
1628 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1629 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
1630 card->info.unique_id;
1631 memcpy(&cmd->data.create_destroy_addr.unique_id[0],
1632 card->dev->dev_addr, OSA_ADDR_LEN);
1633 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
1634 return rc;
1635}
1636
1637static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card, 1608static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
1638 struct qeth_reply *reply, unsigned long data) 1609 struct qeth_reply *reply, unsigned long data)
1639{ 1610{
@@ -2324,25 +2295,14 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
2324 dev_close(card->dev); 2295 dev_close(card->dev);
2325 rtnl_unlock(); 2296 rtnl_unlock();
2326 } 2297 }
2327 if (!card->use_hard_stop) {
2328 rc = qeth_send_stoplan(card);
2329 if (rc)
2330 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
2331 }
2332 card->state = CARD_STATE_SOFTSETUP; 2298 card->state = CARD_STATE_SOFTSETUP;
2333 } 2299 }
2334 if (card->state == CARD_STATE_SOFTSETUP) { 2300 if (card->state == CARD_STATE_SOFTSETUP) {
2335 qeth_l3_clear_ip_list(card, !card->use_hard_stop, 1); 2301 qeth_l3_clear_ip_list(card, 1);
2336 qeth_clear_ipacmd_list(card); 2302 qeth_clear_ipacmd_list(card);
2337 card->state = CARD_STATE_HARDSETUP; 2303 card->state = CARD_STATE_HARDSETUP;
2338 } 2304 }
2339 if (card->state == CARD_STATE_HARDSETUP) { 2305 if (card->state == CARD_STATE_HARDSETUP) {
2340 if (!card->use_hard_stop &&
2341 (card->info.type != QETH_CARD_TYPE_IQD)) {
2342 rc = qeth_l3_put_unique_id(card);
2343 if (rc)
2344 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
2345 }
2346 qeth_qdio_clear_card(card, 0); 2306 qeth_qdio_clear_card(card, 0);
2347 qeth_clear_qdio_buffers(card); 2307 qeth_clear_qdio_buffers(card);
2348 qeth_clear_working_pool_list(card); 2308 qeth_clear_working_pool_list(card);
@@ -2352,7 +2312,6 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
2352 qeth_clear_cmd_buffers(&card->read); 2312 qeth_clear_cmd_buffers(&card->read);
2353 qeth_clear_cmd_buffers(&card->write); 2313 qeth_clear_cmd_buffers(&card->write);
2354 } 2314 }
2355 card->use_hard_stop = 0;
2356 return rc; 2315 return rc;
2357} 2316}
2358 2317
@@ -3433,6 +3392,8 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
3433 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD)) 3392 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
3434 card->dev->dev_id = card->info.unique_id & 3393 card->dev->dev_id = card->info.unique_id &
3435 0xffff; 3394 0xffff;
3395 if (!card->info.guestlan)
3396 card->dev->features |= NETIF_F_GRO;
3436 } 3397 }
3437 } else if (card->info.type == QETH_CARD_TYPE_IQD) { 3398 } else if (card->info.type == QETH_CARD_TYPE_IQD) {
3438 card->dev = alloc_netdev(0, "hsi%d", ether_setup); 3399 card->dev = alloc_netdev(0, "hsi%d", ether_setup);
@@ -3471,6 +3432,9 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
3471 card->discipline.output_handler = (qdio_handler_t *) 3432 card->discipline.output_handler = (qdio_handler_t *)
3472 qeth_qdio_output_handler; 3433 qeth_qdio_output_handler;
3473 card->discipline.recover = qeth_l3_recover; 3434 card->discipline.recover = qeth_l3_recover;
3435 if ((card->info.type == QETH_CARD_TYPE_OSD) ||
3436 (card->info.type == QETH_CARD_TYPE_OSX))
3437 card->options.checksum_type = HW_CHECKSUMMING;
3474 return 0; 3438 return 0;
3475} 3439}
3476 3440
@@ -3483,17 +3447,15 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
3483 qeth_set_allowed_threads(card, 0, 1); 3447 qeth_set_allowed_threads(card, 0, 1);
3484 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); 3448 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
3485 3449
3486 if (cgdev->state == CCWGROUP_ONLINE) { 3450 if (cgdev->state == CCWGROUP_ONLINE)
3487 card->use_hard_stop = 1;
3488 qeth_l3_set_offline(cgdev); 3451 qeth_l3_set_offline(cgdev);
3489 }
3490 3452
3491 if (card->dev) { 3453 if (card->dev) {
3492 unregister_netdev(card->dev); 3454 unregister_netdev(card->dev);
3493 card->dev = NULL; 3455 card->dev = NULL;
3494 } 3456 }
3495 3457
3496 qeth_l3_clear_ip_list(card, 0, 0); 3458 qeth_l3_clear_ip_list(card, 0);
3497 qeth_l3_clear_ipato_list(card); 3459 qeth_l3_clear_ipato_list(card);
3498 return; 3460 return;
3499} 3461}
@@ -3594,7 +3556,6 @@ contin:
3594 mutex_unlock(&card->discipline_mutex); 3556 mutex_unlock(&card->discipline_mutex);
3595 return 0; 3557 return 0;
3596out_remove: 3558out_remove:
3597 card->use_hard_stop = 1;
3598 qeth_l3_stop_card(card, 0); 3559 qeth_l3_stop_card(card, 0);
3599 ccw_device_set_offline(CARD_DDEV(card)); 3560 ccw_device_set_offline(CARD_DDEV(card));
3600 ccw_device_set_offline(CARD_WDEV(card)); 3561 ccw_device_set_offline(CARD_WDEV(card));
@@ -3663,7 +3624,6 @@ static int qeth_l3_recover(void *ptr)
3663 QETH_CARD_TEXT(card, 2, "recover2"); 3624 QETH_CARD_TEXT(card, 2, "recover2");
3664 dev_warn(&card->gdev->dev, 3625 dev_warn(&card->gdev->dev,
3665 "A recovery process has been started for the device\n"); 3626 "A recovery process has been started for the device\n");
3666 card->use_hard_stop = 1;
3667 __qeth_l3_set_offline(card->gdev, 1); 3627 __qeth_l3_set_offline(card->gdev, 1);
3668 rc = __qeth_l3_set_online(card->gdev, 1); 3628 rc = __qeth_l3_set_online(card->gdev, 1);
3669 if (!rc) 3629 if (!rc)
@@ -3684,7 +3644,6 @@ static int qeth_l3_recover(void *ptr)
3684static void qeth_l3_shutdown(struct ccwgroup_device *gdev) 3644static void qeth_l3_shutdown(struct ccwgroup_device *gdev)
3685{ 3645{
3686 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 3646 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
3687 qeth_l3_clear_ip_list(card, 0, 0);
3688 qeth_qdio_clear_card(card, 0); 3647 qeth_qdio_clear_card(card, 0);
3689 qeth_clear_qdio_buffers(card); 3648 qeth_clear_qdio_buffers(card);
3690} 3649}
@@ -3700,7 +3659,6 @@ static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev)
3700 if (gdev->state == CCWGROUP_OFFLINE) 3659 if (gdev->state == CCWGROUP_OFFLINE)
3701 return 0; 3660 return 0;
3702 if (card->state == CARD_STATE_UP) { 3661 if (card->state == CARD_STATE_UP) {
3703 card->use_hard_stop = 1;
3704 __qeth_l3_set_offline(card->gdev, 1); 3662 __qeth_l3_set_offline(card->gdev, 1);
3705 } else 3663 } else
3706 __qeth_l3_set_offline(card->gdev, 0); 3664 __qeth_l3_set_offline(card->gdev, 0);
diff --git a/drivers/scsi/cxgbi/cxgb3i/Kconfig b/drivers/scsi/cxgbi/cxgb3i/Kconfig
index 5cf4e9831f1b..11dff23f7838 100644
--- a/drivers/scsi/cxgbi/cxgb3i/Kconfig
+++ b/drivers/scsi/cxgbi/cxgb3i/Kconfig
@@ -1,6 +1,8 @@
1config SCSI_CXGB3_ISCSI 1config SCSI_CXGB3_ISCSI
2 tristate "Chelsio T3 iSCSI support" 2 tristate "Chelsio T3 iSCSI support"
3 depends on CHELSIO_T3_DEPENDS 3 depends on PCI && INET
4 select NETDEVICES
5 select NETDEV_10000
4 select CHELSIO_T3 6 select CHELSIO_T3
5 select SCSI_ISCSI_ATTRS 7 select SCSI_ISCSI_ATTRS
6 ---help--- 8 ---help---
diff --git a/drivers/scsi/cxgbi/cxgb4i/Kconfig b/drivers/scsi/cxgbi/cxgb4i/Kconfig
index bb94b39b17b3..d5302c27f377 100644
--- a/drivers/scsi/cxgbi/cxgb4i/Kconfig
+++ b/drivers/scsi/cxgbi/cxgb4i/Kconfig
@@ -1,6 +1,8 @@
1config SCSI_CXGB4_ISCSI 1config SCSI_CXGB4_ISCSI
2 tristate "Chelsio T4 iSCSI support" 2 tristate "Chelsio T4 iSCSI support"
3 depends on CHELSIO_T4_DEPENDS 3 depends on PCI && INET
4 select NETDEVICES
5 select NETDEV_10000
4 select CHELSIO_T4 6 select CHELSIO_T4
5 select SCSI_ISCSI_ATTRS 7 select SCSI_ISCSI_ATTRS
6 ---help--- 8 ---help---
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index d2ad3d676724..a24dff9f9163 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -451,26 +451,13 @@ static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev)
451} 451}
452 452
453static struct rtable *find_route_ipv4(__be32 saddr, __be32 daddr, 453static struct rtable *find_route_ipv4(__be32 saddr, __be32 daddr,
454 __be16 sport, __be16 dport, u8 tos) 454 __be16 sport, __be16 dport, u8 tos)
455{ 455{
456 struct rtable *rt; 456 struct rtable *rt;
457 struct flowi fl = {
458 .oif = 0,
459 .nl_u = {
460 .ip4_u = {
461 .daddr = daddr,
462 .saddr = saddr,
463 .tos = tos }
464 },
465 .proto = IPPROTO_TCP,
466 .uli_u = {
467 .ports = {
468 .sport = sport,
469 .dport = dport }
470 }
471 };
472 457
473 if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0)) 458 rt = ip_route_output_ports(&init_net, NULL, daddr, saddr,
459 dport, sport, IPPROTO_TCP, tos, 0);
460 if (IS_ERR(rt))
474 return NULL; 461 return NULL;
475 462
476 return rt; 463 return rt;
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 9f9600b67001..3becc6a20a4f 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -285,9 +285,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
285 } 285 }
286 286
287 /* Do not support for bonding device */ 287 /* Do not support for bonding device */
288 if ((netdev->priv_flags & IFF_MASTER_ALB) || 288 if (netdev->priv_flags & IFF_BONDING && netdev->flags & IFF_MASTER) {
289 (netdev->priv_flags & IFF_SLAVE_INACTIVE) ||
290 (netdev->priv_flags & IFF_MASTER_8023AD)) {
291 FCOE_NETDEV_DBG(netdev, "Bonded interfaces not supported\n"); 289 FCOE_NETDEV_DBG(netdev, "Bonded interfaces not supported\n");
292 return -EOPNOTSUPP; 290 return -EOPNOTSUPP;
293 } 291 }
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 3918d2cc5856..e05ba6eefc7e 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -1192,10 +1192,10 @@ void ssb_device_enable(struct ssb_device *dev, u32 core_specific_flags)
1192} 1192}
1193EXPORT_SYMBOL(ssb_device_enable); 1193EXPORT_SYMBOL(ssb_device_enable);
1194 1194
1195/* Wait for a bit in a register to get set or unset. 1195/* Wait for bitmask in a register to get set or cleared.
1196 * timeout is in units of ten-microseconds */ 1196 * timeout is in units of ten-microseconds */
1197static int ssb_wait_bit(struct ssb_device *dev, u16 reg, u32 bitmask, 1197static int ssb_wait_bits(struct ssb_device *dev, u16 reg, u32 bitmask,
1198 int timeout, int set) 1198 int timeout, int set)
1199{ 1199{
1200 int i; 1200 int i;
1201 u32 val; 1201 u32 val;
@@ -1203,7 +1203,7 @@ static int ssb_wait_bit(struct ssb_device *dev, u16 reg, u32 bitmask,
1203 for (i = 0; i < timeout; i++) { 1203 for (i = 0; i < timeout; i++) {
1204 val = ssb_read32(dev, reg); 1204 val = ssb_read32(dev, reg);
1205 if (set) { 1205 if (set) {
1206 if (val & bitmask) 1206 if ((val & bitmask) == bitmask)
1207 return 0; 1207 return 0;
1208 } else { 1208 } else {
1209 if (!(val & bitmask)) 1209 if (!(val & bitmask))
@@ -1220,20 +1220,38 @@ static int ssb_wait_bit(struct ssb_device *dev, u16 reg, u32 bitmask,
1220 1220
1221void ssb_device_disable(struct ssb_device *dev, u32 core_specific_flags) 1221void ssb_device_disable(struct ssb_device *dev, u32 core_specific_flags)
1222{ 1222{
1223 u32 reject; 1223 u32 reject, val;
1224 1224
1225 if (ssb_read32(dev, SSB_TMSLOW) & SSB_TMSLOW_RESET) 1225 if (ssb_read32(dev, SSB_TMSLOW) & SSB_TMSLOW_RESET)
1226 return; 1226 return;
1227 1227
1228 reject = ssb_tmslow_reject_bitmask(dev); 1228 reject = ssb_tmslow_reject_bitmask(dev);
1229 ssb_write32(dev, SSB_TMSLOW, reject | SSB_TMSLOW_CLOCK); 1229
1230 ssb_wait_bit(dev, SSB_TMSLOW, reject, 1000, 1); 1230 if (ssb_read32(dev, SSB_TMSLOW) & SSB_TMSLOW_CLOCK) {
1231 ssb_wait_bit(dev, SSB_TMSHIGH, SSB_TMSHIGH_BUSY, 1000, 0); 1231 ssb_write32(dev, SSB_TMSLOW, reject | SSB_TMSLOW_CLOCK);
1232 ssb_write32(dev, SSB_TMSLOW, 1232 ssb_wait_bits(dev, SSB_TMSLOW, reject, 1000, 1);
1233 SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | 1233 ssb_wait_bits(dev, SSB_TMSHIGH, SSB_TMSHIGH_BUSY, 1000, 0);
1234 reject | SSB_TMSLOW_RESET | 1234
1235 core_specific_flags); 1235 if (ssb_read32(dev, SSB_IDLOW) & SSB_IDLOW_INITIATOR) {
1236 ssb_flush_tmslow(dev); 1236 val = ssb_read32(dev, SSB_IMSTATE);
1237 val |= SSB_IMSTATE_REJECT;
1238 ssb_write32(dev, SSB_IMSTATE, val);
1239 ssb_wait_bits(dev, SSB_IMSTATE, SSB_IMSTATE_BUSY, 1000,
1240 0);
1241 }
1242
1243 ssb_write32(dev, SSB_TMSLOW,
1244 SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
1245 reject | SSB_TMSLOW_RESET |
1246 core_specific_flags);
1247 ssb_flush_tmslow(dev);
1248
1249 if (ssb_read32(dev, SSB_IDLOW) & SSB_IDLOW_INITIATOR) {
1250 val = ssb_read32(dev, SSB_IMSTATE);
1251 val &= ~SSB_IMSTATE_REJECT;
1252 ssb_write32(dev, SSB_IMSTATE, val);
1253 }
1254 }
1237 1255
1238 ssb_write32(dev, SSB_TMSLOW, 1256 ssb_write32(dev, SSB_TMSLOW,
1239 reject | SSB_TMSLOW_RESET | 1257 reject | SSB_TMSLOW_RESET |
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index 158449e55044..a467b20baac8 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -468,10 +468,14 @@ static void sprom_extract_r45(struct ssb_sprom *out, const u16 *in)
468 SPEX(country_code, SSB_SPROM4_CCODE, 0xFFFF, 0); 468 SPEX(country_code, SSB_SPROM4_CCODE, 0xFFFF, 0);
469 SPEX(boardflags_lo, SSB_SPROM4_BFLLO, 0xFFFF, 0); 469 SPEX(boardflags_lo, SSB_SPROM4_BFLLO, 0xFFFF, 0);
470 SPEX(boardflags_hi, SSB_SPROM4_BFLHI, 0xFFFF, 0); 470 SPEX(boardflags_hi, SSB_SPROM4_BFLHI, 0xFFFF, 0);
471 SPEX(boardflags2_lo, SSB_SPROM4_BFL2LO, 0xFFFF, 0);
472 SPEX(boardflags2_hi, SSB_SPROM4_BFL2HI, 0xFFFF, 0);
471 } else { 473 } else {
472 SPEX(country_code, SSB_SPROM5_CCODE, 0xFFFF, 0); 474 SPEX(country_code, SSB_SPROM5_CCODE, 0xFFFF, 0);
473 SPEX(boardflags_lo, SSB_SPROM5_BFLLO, 0xFFFF, 0); 475 SPEX(boardflags_lo, SSB_SPROM5_BFLLO, 0xFFFF, 0);
474 SPEX(boardflags_hi, SSB_SPROM5_BFLHI, 0xFFFF, 0); 476 SPEX(boardflags_hi, SSB_SPROM5_BFLHI, 0xFFFF, 0);
477 SPEX(boardflags2_lo, SSB_SPROM5_BFL2LO, 0xFFFF, 0);
478 SPEX(boardflags2_hi, SSB_SPROM5_BFL2HI, 0xFFFF, 0);
475 } 479 }
476 SPEX(ant_available_a, SSB_SPROM4_ANTAVAIL, SSB_SPROM4_ANTAVAIL_A, 480 SPEX(ant_available_a, SSB_SPROM4_ANTAVAIL, SSB_SPROM4_ANTAVAIL_A,
477 SSB_SPROM4_ANTAVAIL_A_SHIFT); 481 SSB_SPROM4_ANTAVAIL_A_SHIFT);
@@ -641,7 +645,7 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out,
641 break; 645 break;
642 default: 646 default:
643 ssb_printk(KERN_WARNING PFX "Unsupported SPROM" 647 ssb_printk(KERN_WARNING PFX "Unsupported SPROM"
644 " revision %d detected. Will extract" 648 " revision %d detected. Will extract"
645 " v1\n", out->revision); 649 " v1\n", out->revision);
646 out->revision = 1; 650 out->revision = 1;
647 sprom_extract_r123(out, in); 651 sprom_extract_r123(out, in);
diff --git a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
index 9e74beb0b64b..555b056b49b1 100644
--- a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
@@ -2308,7 +2308,9 @@ static s32 wl_inform_single_bss(struct wl_priv *wl, struct wl_bss_info *bi)
2308 notif_bss_info->frame_len = 2308 notif_bss_info->frame_len =
2309 offsetof(struct ieee80211_mgmt, 2309 offsetof(struct ieee80211_mgmt,
2310 u.beacon.variable) + wl_get_ielen(wl); 2310 u.beacon.variable) + wl_get_ielen(wl);
2311 freq = ieee80211_channel_to_frequency(notif_bss_info->channel); 2311 freq = ieee80211_channel_to_frequency(notif_bss_info->channel,
2312 band->band);
2313
2312 channel = ieee80211_get_channel(wiphy, freq); 2314 channel = ieee80211_get_channel(wiphy, freq);
2313 2315
2314 WL_DBG("SSID : \"%s\", rssi %d, channel %d, capability : 0x04%x, bssid %pM\n", 2316 WL_DBG("SSID : \"%s\", rssi %d, channel %d, capability : 0x04%x, bssid %pM\n",
diff --git a/drivers/staging/brcm80211/brcmsmac/wl_mac80211.c b/drivers/staging/brcm80211/brcmsmac/wl_mac80211.c
index 66708d8df7f6..774b4e916b29 100644
--- a/drivers/staging/brcm80211/brcmsmac/wl_mac80211.c
+++ b/drivers/staging/brcm80211/brcmsmac/wl_mac80211.c
@@ -74,9 +74,6 @@ static int wl_request_fw(struct wl_info *wl, struct pci_dev *pdev);
74static void wl_release_fw(struct wl_info *wl); 74static void wl_release_fw(struct wl_info *wl);
75 75
76/* local prototypes */ 76/* local prototypes */
77static int wl_start(struct sk_buff *skb, struct wl_info *wl);
78static int wl_start_int(struct wl_info *wl, struct ieee80211_hw *hw,
79 struct sk_buff *skb);
80static void wl_dpc(unsigned long data); 77static void wl_dpc(unsigned long data);
81static irqreturn_t wl_isr(int irq, void *dev_id); 78static irqreturn_t wl_isr(int irq, void *dev_id);
82 79
@@ -111,7 +108,6 @@ module_param(phymsglevel, int, 0);
111#define WL_TO_HW(wl) (wl->pub->ieee_hw) 108#define WL_TO_HW(wl) (wl->pub->ieee_hw)
112 109
113/* MAC80211 callback functions */ 110/* MAC80211 callback functions */
114static int wl_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
115static int wl_ops_start(struct ieee80211_hw *hw); 111static int wl_ops_start(struct ieee80211_hw *hw);
116static void wl_ops_stop(struct ieee80211_hw *hw); 112static void wl_ops_stop(struct ieee80211_hw *hw);
117static int wl_ops_add_interface(struct ieee80211_hw *hw, 113static int wl_ops_add_interface(struct ieee80211_hw *hw,
@@ -152,21 +148,19 @@ static int wl_ops_ampdu_action(struct ieee80211_hw *hw,
152 u8 buf_size); 148 u8 buf_size);
153static void wl_ops_rfkill_poll(struct ieee80211_hw *hw); 149static void wl_ops_rfkill_poll(struct ieee80211_hw *hw);
154 150
155static int wl_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 151static void wl_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
156{ 152{
157 int status;
158 struct wl_info *wl = hw->priv; 153 struct wl_info *wl = hw->priv;
159 154
160 WL_LOCK(wl); 155 WL_LOCK(wl);
161 if (!wl->pub->up) { 156 if (!wl->pub->up) {
162 WL_ERROR("ops->tx called while down\n"); 157 WL_ERROR("ops->tx called while down\n");
163 status = -ENETDOWN; 158 kfree_skb(skb);
164 goto done; 159 goto done;
165 } 160 }
166 status = wl_start(skb, wl); 161 wlc_sendpkt_mac80211(wl->wlc, skb, hw);
167 done: 162 done:
168 WL_UNLOCK(wl); 163 WL_UNLOCK(wl);
169 return status;
170} 164}
171 165
172static int wl_ops_start(struct ieee80211_hw *hw) 166static int wl_ops_start(struct ieee80211_hw *hw)
@@ -1396,25 +1390,6 @@ static void wl_free(struct wl_info *wl)
1396} 1390}
1397 1391
1398/* 1392/*
1399 * transmit a packet
1400 * precondition: perimeter lock has been acquired
1401 */
1402static int BCMFASTPATH wl_start(struct sk_buff *skb, struct wl_info *wl)
1403{
1404 if (!wl)
1405 return -ENETDOWN;
1406
1407 return wl_start_int(wl, WL_TO_HW(wl), skb);
1408}
1409
1410static int BCMFASTPATH
1411wl_start_int(struct wl_info *wl, struct ieee80211_hw *hw, struct sk_buff *skb)
1412{
1413 wlc_sendpkt_mac80211(wl->wlc, skb, hw);
1414 return NETDEV_TX_OK;
1415}
1416
1417/*
1418 * precondition: perimeter lock has been acquired 1393 * precondition: perimeter lock has been acquired
1419 */ 1394 */
1420void wl_txflowcontrol(struct wl_info *wl, struct wl_if *wlif, bool state, 1395void wl_txflowcontrol(struct wl_info *wl, struct wl_if *wlif, bool state,
diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_main.c b/drivers/staging/brcm80211/brcmsmac/wlc_main.c
index 0870dc913cda..639b5d7c9603 100644
--- a/drivers/staging/brcm80211/brcmsmac/wlc_main.c
+++ b/drivers/staging/brcm80211/brcmsmac/wlc_main.c
@@ -6838,11 +6838,14 @@ prep_mac80211_status(struct wlc_info *wlc, d11rxhdr_t *rxh, struct sk_buff *p,
6838 ratespec_t rspec; 6838 ratespec_t rspec;
6839 unsigned char *plcp; 6839 unsigned char *plcp;
6840 6840
6841#if 0
6842 /* Clearly, this is bogus -- reading the TSF now is wrong */
6841 wlc_read_tsf(wlc, &tsf_l, &tsf_h); /* mactime */ 6843 wlc_read_tsf(wlc, &tsf_l, &tsf_h); /* mactime */
6842 rx_status->mactime = tsf_h; 6844 rx_status->mactime = tsf_h;
6843 rx_status->mactime <<= 32; 6845 rx_status->mactime <<= 32;
6844 rx_status->mactime |= tsf_l; 6846 rx_status->mactime |= tsf_l;
6845 rx_status->flag |= RX_FLAG_TSFT; 6847 rx_status->flag |= RX_FLAG_MACTIME_MPDU; /* clearly wrong */
6848#endif
6846 6849
6847 channel = WLC_CHAN_CHANNEL(rxh->RxChan); 6850 channel = WLC_CHAN_CHANNEL(rxh->RxChan);
6848 6851
diff --git a/drivers/staging/pohmelfs/config.c b/drivers/staging/pohmelfs/config.c
index a9a3e25a7efa..b6c42cb0d1c6 100644
--- a/drivers/staging/pohmelfs/config.c
+++ b/drivers/staging/pohmelfs/config.c
@@ -525,7 +525,7 @@ static void pohmelfs_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *n
525{ 525{
526 int err; 526 int err;
527 527
528 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) 528 if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
529 return; 529 return;
530 530
531 switch (msg->flags) { 531 switch (msg->flags) {
diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c
index 2163d60c2eaf..3724e1e67ec2 100644
--- a/drivers/staging/winbond/wbusb.c
+++ b/drivers/staging/winbond/wbusb.c
@@ -118,13 +118,14 @@ static void wbsoft_configure_filter(struct ieee80211_hw *dev,
118 *total_flags = new_flags; 118 *total_flags = new_flags;
119} 119}
120 120
121static int wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb) 121static void wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
122{ 122{
123 struct wbsoft_priv *priv = dev->priv; 123 struct wbsoft_priv *priv = dev->priv;
124 124
125 if (priv->sMlmeFrame.IsInUsed != PACKET_FREE_TO_USE) { 125 if (priv->sMlmeFrame.IsInUsed != PACKET_FREE_TO_USE) {
126 priv->sMlmeFrame.wNumTxMMPDUDiscarded++; 126 priv->sMlmeFrame.wNumTxMMPDUDiscarded++;
127 return NETDEV_TX_BUSY; 127 kfree_skb(skb);
128 return;
128 } 129 }
129 130
130 priv->sMlmeFrame.IsInUsed = PACKET_COME_FROM_MLME; 131 priv->sMlmeFrame.IsInUsed = PACKET_COME_FROM_MLME;
@@ -140,8 +141,6 @@ static int wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
140 */ 141 */
141 142
142 Mds_Tx(priv); 143 Mds_Tx(priv);
143
144 return NETDEV_TX_OK;
145} 144}
146 145
147static int wbsoft_start(struct ieee80211_hw *dev) 146static int wbsoft_start(struct ieee80211_hw *dev)
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index 52ec0959d462..5180a215d781 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -73,7 +73,7 @@ static void uvesafb_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *ns
73 struct uvesafb_task *utask; 73 struct uvesafb_task *utask;
74 struct uvesafb_ktask *task; 74 struct uvesafb_ktask *task;
75 75
76 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) 76 if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
77 return; 77 return;
78 78
79 if (msg->seq >= UVESAFB_TASKS_MAX) 79 if (msg->seq >= UVESAFB_TASKS_MAX)
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 0ad1699a1b3e..65f5068afd84 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -794,6 +794,21 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
794 return irq; 794 return irq;
795} 795}
796 796
797static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
798 unsigned int remote_port)
799{
800 struct evtchn_bind_interdomain bind_interdomain;
801 int err;
802
803 bind_interdomain.remote_dom = remote_domain;
804 bind_interdomain.remote_port = remote_port;
805
806 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
807 &bind_interdomain);
808
809 return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
810}
811
797 812
798int bind_virq_to_irq(unsigned int virq, unsigned int cpu) 813int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
799{ 814{
@@ -889,6 +904,29 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn,
889} 904}
890EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler); 905EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
891 906
907int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
908 unsigned int remote_port,
909 irq_handler_t handler,
910 unsigned long irqflags,
911 const char *devname,
912 void *dev_id)
913{
914 int irq, retval;
915
916 irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
917 if (irq < 0)
918 return irq;
919
920 retval = request_irq(irq, handler, irqflags, devname, dev_id);
921 if (retval != 0) {
922 unbind_from_irq(irq);
923 return retval;
924 }
925
926 return irq;
927}
928EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
929
892int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, 930int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
893 irq_handler_t handler, 931 irq_handler_t handler,
894 unsigned long irqflags, const char *devname, void *dev_id) 932 unsigned long irqflags, const char *devname, void *dev_id)